]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
0f9e89bcc5bad5ce97c269dee3b1efad2a8992d7
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61
62 sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor);
63 fd = dev_open(buf, O_RDWR);
64
65 if (dev->disk.raid_disk >= 0)
66 fdlist[dev->disk.raid_disk] = fd;
67 else
68 fdlist[next_spare++] = fd;
69 }
70
71 if (!backup_file) {
72 backup_file = locate_backup(content->sys_name);
73 *backup_filep = backup_file;
74 }
75
76 if (st->ss->external && st->ss->recover_backup)
77 err = st->ss->recover_backup(st, content);
78 else
79 err = Grow_restart(st, content, fdlist, next_spare,
80 backup_file, verbose > 0);
81
82 while (next_spare > 0) {
83 next_spare--;
84 if (fdlist[next_spare] >= 0)
85 close(fdlist[next_spare]);
86 }
87 free(fdlist);
88 if (err) {
89 pr_err("Failed to restore critical section for reshape - sorry.\n");
90 if (!backup_file)
91 pr_err("Possibly you need to specify a --backup-file\n");
92 return 1;
93 }
94
95 dprintf("restore_backup() returns status OK.\n");
96 return 0;
97 }
98
99 int Grow_Add_device(char *devname, int fd, char *newdev)
100 {
101 /* Add a device to an active array.
102 * Currently, just extend a linear array.
103 * This requires writing a new superblock on the
104 * new device, calling the kernel to add the device,
105 * and if that succeeds, update the superblock on
106 * all other devices.
107 * This means that we need to *find* all other devices.
108 */
109 struct mdinfo info;
110
111 dev_t rdev;
112 int nfd, fd2;
113 int d, nd;
114 struct supertype *st = NULL;
115 char *subarray = NULL;
116
117 if (md_get_array_info(fd, &info.array) < 0) {
118 pr_err("cannot get array info for %s\n", devname);
119 return 1;
120 }
121
122 if (info.array.level != -1) {
123 pr_err("can only add devices to linear arrays\n");
124 return 1;
125 }
126
127 st = super_by_fd(fd, &subarray);
128 if (!st) {
129 pr_err("cannot handle arrays with superblock version %d\n",
130 info.array.major_version);
131 return 1;
132 }
133
134 if (subarray) {
135 pr_err("Cannot grow linear sub-arrays yet\n");
136 free(subarray);
137 free(st);
138 return 1;
139 }
140
141 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
142 if (nfd < 0) {
143 pr_err("cannot open %s\n", newdev);
144 free(st);
145 return 1;
146 }
147 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
148 close(nfd);
149 free(st);
150 return 1;
151 }
152 /* now check out all the devices and make sure we can read the
153 * superblock */
154 for (d=0 ; d < info.array.raid_disks ; d++) {
155 mdu_disk_info_t disk;
156 char *dv;
157
158 st->ss->free_super(st);
159
160 disk.number = d;
161 if (md_get_disk_info(fd, &disk) < 0) {
162 pr_err("cannot get device detail for device %d\n", d);
163 close(nfd);
164 free(st);
165 return 1;
166 }
167 dv = map_dev(disk.major, disk.minor, 1);
168 if (!dv) {
169 pr_err("cannot find device file for device %d\n", d);
170 close(nfd);
171 free(st);
172 return 1;
173 }
174 fd2 = dev_open(dv, O_RDWR);
175 if (fd2 < 0) {
176 pr_err("cannot open device file %s\n", dv);
177 close(nfd);
178 free(st);
179 return 1;
180 }
181
182 if (st->ss->load_super(st, fd2, NULL)) {
183 pr_err("cannot find super block on %s\n", dv);
184 close(nfd);
185 close(fd2);
186 free(st);
187 return 1;
188 }
189 close(fd2);
190 }
191 /* Ok, looks good. Lets update the superblock and write it out to
192 * newdev.
193 */
194
195 info.disk.number = d;
196 info.disk.major = major(rdev);
197 info.disk.minor = minor(rdev);
198 info.disk.raid_disk = d;
199 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
200 st->ss->update_super(st, &info, "linear-grow-new", newdev, 0, 0, NULL);
201
202 if (st->ss->store_super(st, nfd)) {
203 pr_err("Cannot store new superblock on %s\n", newdev);
204 close(nfd);
205 return 1;
206 }
207 close(nfd);
208
209 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
210 pr_err("Cannot add new disk to this array\n");
211 return 1;
212 }
213 /* Well, that seems to have worked.
214 * Now go through and update all superblocks
215 */
216
217 if (md_get_array_info(fd, &info.array) < 0) {
218 pr_err("cannot get array info for %s\n", devname);
219 return 1;
220 }
221
222 nd = d;
223 for (d=0 ; d < info.array.raid_disks ; d++) {
224 mdu_disk_info_t disk;
225 char *dv;
226
227 disk.number = d;
228 if (md_get_disk_info(fd, &disk) < 0) {
229 pr_err("cannot get device detail for device %d\n", d);
230 return 1;
231 }
232 dv = map_dev(disk.major, disk.minor, 1);
233 if (!dv) {
234 pr_err("cannot find device file for device %d\n", d);
235 return 1;
236 }
237 fd2 = dev_open(dv, O_RDWR);
238 if (fd2 < 0) {
239 pr_err("cannot open device file %s\n", dv);
240 return 1;
241 }
242 if (st->ss->load_super(st, fd2, NULL)) {
243 pr_err("cannot find super block on %s\n", dv);
244 close(fd);
245 return 1;
246 }
247 info.array.raid_disks = nd+1;
248 info.array.nr_disks = nd+1;
249 info.array.active_disks = nd+1;
250 info.array.working_disks = nd+1;
251
252 st->ss->update_super(st, &info, "linear-grow-update", dv,
253 0, 0, NULL);
254
255 if (st->ss->store_super(st, fd2)) {
256 pr_err("Cannot store new superblock on %s\n", dv);
257 close(fd2);
258 return 1;
259 }
260 close(fd2);
261 }
262
263 return 0;
264 }
265
266 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
267 {
268 /*
269 * First check that array doesn't have a bitmap
270 * Then create the bitmap
271 * Then add it
272 *
273 * For internal bitmaps, we need to check the version,
274 * find all the active devices, and write the bitmap block
275 * to all devices
276 */
277 mdu_bitmap_file_t bmf;
278 mdu_array_info_t array;
279 struct supertype *st;
280 char *subarray = NULL;
281 int major = BITMAP_MAJOR_HI;
282 unsigned long long bitmapsize, array_size;
283 struct mdinfo *mdi;
284
285 /*
286 * We only ever get called if s->bitmap_file is != NULL, so this check
287 * is just here to quiet down static code checkers.
288 */
289 if (!s->bitmap_file)
290 return 1;
291
292 if (strcmp(s->bitmap_file, "clustered") == 0)
293 major = BITMAP_MAJOR_CLUSTERED;
294
295 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
296 if (errno == ENOMEM)
297 pr_err("Memory allocation failure.\n");
298 else
299 pr_err("bitmaps not supported by this kernel.\n");
300 return 1;
301 }
302 if (bmf.pathname[0]) {
303 if (strcmp(s->bitmap_file,"none") == 0) {
304 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
305 pr_err("failed to remove bitmap %s\n",
306 bmf.pathname);
307 return 1;
308 }
309 return 0;
310 }
311 pr_err("%s already has a bitmap (%s)\n", devname, bmf.pathname);
312 return 1;
313 }
314 if (md_get_array_info(fd, &array) != 0) {
315 pr_err("cannot get array status for %s\n", devname);
316 return 1;
317 }
318 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
319 if (strcmp(s->bitmap_file, "none")==0) {
320 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
321 if (md_set_array_info(fd, &array) != 0) {
322 if (array.state & (1 << MD_SB_CLUSTERED))
323 pr_err("failed to remove clustered bitmap.\n");
324 else
325 pr_err("failed to remove internal bitmap.\n");
326 return 1;
327 }
328 return 0;
329 }
330 pr_err("bitmap already present on %s\n", devname);
331 return 1;
332 }
333
334 if (strcmp(s->bitmap_file, "none") == 0) {
335 pr_err("no bitmap found on %s\n", devname);
336 return 1;
337 }
338 if (array.level <= 0) {
339 pr_err("Bitmaps not meaningful with level %s\n",
340 map_num(pers, array.level)?:"of this array");
341 return 1;
342 }
343 bitmapsize = array.size;
344 bitmapsize <<= 1;
345 if (get_dev_size(fd, NULL, &array_size) &&
346 array_size > (0x7fffffffULL << 9)) {
347 /* Array is big enough that we cannot trust array.size
348 * try other approaches
349 */
350 bitmapsize = get_component_size(fd);
351 }
352 if (bitmapsize == 0) {
353 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
354 return 1;
355 }
356
357 if (array.level == 10) {
358 int ncopies;
359
360 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
361 bitmapsize = bitmapsize * array.raid_disks / ncopies;
362 }
363
364 st = super_by_fd(fd, &subarray);
365 if (!st) {
366 pr_err("Cannot understand version %d.%d\n",
367 array.major_version, array.minor_version);
368 return 1;
369 }
370 if (subarray) {
371 pr_err("Cannot add bitmaps to sub-arrays yet\n");
372 free(subarray);
373 free(st);
374 return 1;
375 }
376
377 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
378 if (mdi) {
379 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
380 pr_err("Cannot add bitmap to array with PPL\n");
381 free(mdi);
382 free(st);
383 return 1;
384 }
385 free(mdi);
386 }
387
388 if (strcmp(s->bitmap_file, "internal") == 0 ||
389 strcmp(s->bitmap_file, "clustered") == 0) {
390 int rv;
391 int d;
392 int offset_setable = 0;
393 if (st->ss->add_internal_bitmap == NULL) {
394 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
395 return 1;
396 }
397 st->nodes = c->nodes;
398 st->cluster_name = c->homecluster;
399 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
400 if (mdi)
401 offset_setable = 1;
402 for (d = 0; d < st->max_devs; d++) {
403 mdu_disk_info_t disk;
404 char *dv;
405 int fd2;
406
407 disk.number = d;
408 if (md_get_disk_info(fd, &disk) < 0)
409 continue;
410 if (disk.major == 0 && disk.minor == 0)
411 continue;
412 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
413 continue;
414 dv = map_dev(disk.major, disk.minor, 1);
415 if (!dv)
416 continue;
417 fd2 = dev_open(dv, O_RDWR);
418 if (fd2 < 0)
419 continue;
420 rv = st->ss->load_super(st, fd2, NULL);
421 if (!rv) {
422 rv = st->ss->add_internal_bitmap(
423 st, &s->bitmap_chunk, c->delay,
424 s->write_behind, bitmapsize,
425 offset_setable, major);
426 if (!rv) {
427 st->ss->write_bitmap(st, fd2,
428 NodeNumUpdate);
429 } else {
430 pr_err("failed to create internal bitmap - chunksize problem.\n");
431 }
432 } else {
433 pr_err("failed to load super-block.\n");
434 }
435 close(fd2);
436 if (rv)
437 return 1;
438 }
439 if (offset_setable) {
440 st->ss->getinfo_super(st, mdi, NULL);
441 if (sysfs_init(mdi, fd, NULL)) {
442 pr_err("failed to intialize sysfs.\n");
443 free(mdi);
444 }
445 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
446 mdi->bitmap_offset);
447 free(mdi);
448 } else {
449 if (strcmp(s->bitmap_file, "clustered") == 0)
450 array.state |= (1 << MD_SB_CLUSTERED);
451 array.state |= (1 << MD_SB_BITMAP_PRESENT);
452 rv = md_set_array_info(fd, &array);
453 }
454 if (rv < 0) {
455 if (errno == EBUSY)
456 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
457 pr_err("failed to set internal bitmap.\n");
458 return 1;
459 }
460 } else {
461 int uuid[4];
462 int bitmap_fd;
463 int d;
464 int max_devs = st->max_devs;
465
466 /* try to load a superblock */
467 for (d = 0; d < max_devs; d++) {
468 mdu_disk_info_t disk;
469 char *dv;
470 int fd2;
471 disk.number = d;
472 if (md_get_disk_info(fd, &disk) < 0)
473 continue;
474 if ((disk.major==0 && disk.minor == 0) ||
475 (disk.state & (1 << MD_DISK_REMOVED)))
476 continue;
477 dv = map_dev(disk.major, disk.minor, 1);
478 if (!dv)
479 continue;
480 fd2 = dev_open(dv, O_RDONLY);
481 if (fd2 >= 0) {
482 if (st->ss->load_super(st, fd2, NULL) == 0) {
483 close(fd2);
484 st->ss->uuid_from_super(st, uuid);
485 break;
486 }
487 close(fd2);
488 }
489 }
490 if (d == max_devs) {
491 pr_err("cannot find UUID for array!\n");
492 return 1;
493 }
494 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
495 s->bitmap_chunk, c->delay, s->write_behind,
496 bitmapsize, major)) {
497 return 1;
498 }
499 bitmap_fd = open(s->bitmap_file, O_RDWR);
500 if (bitmap_fd < 0) {
501 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
502 return 1;
503 }
504 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
505 int err = errno;
506 if (errno == EBUSY)
507 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
508 pr_err("Cannot set bitmap file for %s: %s\n",
509 devname, strerror(err));
510 return 1;
511 }
512 }
513
514 return 0;
515 }
516
517 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
518 {
519 struct supertype *st;
520 struct mdinfo *sra;
521 struct mdinfo *sd;
522 char *subarray = NULL;
523 int ret = 0;
524 char container_dev[PATH_MAX];
525 char buf[20];
526
527 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
528 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
529 pr_err("Operation not supported for consistency policy %s\n",
530 map_num(consistency_policies, s->consistency_policy));
531 return 1;
532 }
533
534 st = super_by_fd(fd, &subarray);
535 if (!st)
536 return 1;
537
538 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
539 GET_DEVS|GET_STATE);
540 if (!sra) {
541 ret = 1;
542 goto free_st;
543 }
544
545 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
546 !st->ss->write_init_ppl) {
547 pr_err("%s metadata does not support PPL\n", st->ss->name);
548 ret = 1;
549 goto free_info;
550 }
551
552 if (sra->array.level != 5) {
553 pr_err("Operation not supported for array level %d\n",
554 sra->array.level);
555 ret = 1;
556 goto free_info;
557 }
558
559 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
560 pr_err("Consistency policy is already %s\n",
561 map_num(consistency_policies, s->consistency_policy));
562 ret = 1;
563 goto free_info;
564 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
565 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
566 pr_err("Current consistency policy is %s, cannot change to %s\n",
567 map_num(consistency_policies, sra->consistency_policy),
568 map_num(consistency_policies, s->consistency_policy));
569 ret = 1;
570 goto free_info;
571 }
572
573 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
574 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
575 ret = 1;
576 goto free_info;
577 } else if (strcmp(buf, "reshape\n") == 0) {
578 pr_err("PPL cannot be enabled when reshape is in progress\n");
579 ret = 1;
580 goto free_info;
581 }
582 }
583
584 if (subarray) {
585 char *update;
586
587 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
588 update = "ppl";
589 else
590 update = "no-ppl";
591
592 sprintf(container_dev, "/dev/%s", st->container_devnm);
593
594 ret = Update_subarray(container_dev, subarray, update, NULL,
595 c->verbose);
596 if (ret)
597 goto free_info;
598 }
599
600 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
601 struct mdinfo info;
602
603 if (subarray) {
604 struct mdinfo *mdi;
605 int cfd;
606
607 cfd = open(container_dev, O_RDWR|O_EXCL);
608 if (cfd < 0) {
609 pr_err("Failed to open %s\n", container_dev);
610 ret = 1;
611 goto free_info;
612 }
613
614 ret = st->ss->load_container(st, cfd, st->container_devnm);
615 close(cfd);
616
617 if (ret) {
618 pr_err("Cannot read superblock for %s\n",
619 container_dev);
620 goto free_info;
621 }
622
623 mdi = st->ss->container_content(st, subarray);
624 info = *mdi;
625 free(mdi);
626 }
627
628 for (sd = sra->devs; sd; sd = sd->next) {
629 int dfd;
630 char *devpath;
631
632 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
633 dfd = dev_open(devpath, O_RDWR);
634 if (dfd < 0) {
635 pr_err("Failed to open %s\n", devpath);
636 ret = 1;
637 goto free_info;
638 }
639
640 if (!subarray) {
641 ret = st->ss->load_super(st, dfd, NULL);
642 if (ret) {
643 pr_err("Failed to load super-block.\n");
644 close(dfd);
645 goto free_info;
646 }
647
648 ret = st->ss->update_super(st, sra, "ppl",
649 devname,
650 c->verbose, 0, NULL);
651 if (ret) {
652 close(dfd);
653 st->ss->free_super(st);
654 goto free_info;
655 }
656 st->ss->getinfo_super(st, &info, NULL);
657 }
658
659 ret |= sysfs_set_num(sra, sd, "ppl_sector",
660 info.ppl_sector);
661 ret |= sysfs_set_num(sra, sd, "ppl_size",
662 info.ppl_size);
663
664 if (ret) {
665 pr_err("Failed to set PPL attributes for %s\n",
666 sd->sys_name);
667 close(dfd);
668 st->ss->free_super(st);
669 goto free_info;
670 }
671
672 ret = st->ss->write_init_ppl(st, &info, dfd);
673 if (ret)
674 pr_err("Failed to write PPL\n");
675
676 close(dfd);
677
678 if (!subarray)
679 st->ss->free_super(st);
680
681 if (ret)
682 goto free_info;
683 }
684 }
685
686 ret = sysfs_set_str(sra, NULL, "consistency_policy",
687 map_num(consistency_policies,
688 s->consistency_policy));
689 if (ret)
690 pr_err("Failed to change array consistency policy\n");
691
692 free_info:
693 sysfs_free(sra);
694 free_st:
695 free(st);
696 free(subarray);
697
698 return ret;
699 }
700
701 /*
702 * When reshaping an array we might need to backup some data.
703 * This is written to all spares with a 'super_block' describing it.
704 * The superblock goes 4K from the end of the used space on the
705 * device.
706 * It if written after the backup is complete.
707 * It has the following structure.
708 */
709
710 static struct mdp_backup_super {
711 char magic[16]; /* md_backup_data-1 or -2 */
712 __u8 set_uuid[16];
713 __u64 mtime;
714 /* start/sizes in 512byte sectors */
715 __u64 devstart; /* address on backup device/file of data */
716 __u64 arraystart;
717 __u64 length;
718 __u32 sb_csum; /* csum of preceeding bytes. */
719 __u32 pad1;
720 __u64 devstart2; /* offset in to data of second section */
721 __u64 arraystart2;
722 __u64 length2;
723 __u32 sb_csum2; /* csum of preceeding bytes. */
724 __u8 pad[512-68-32];
725 } __attribute__((aligned(512))) bsb, bsb2;
726
727 static __u32 bsb_csum(char *buf, int len)
728 {
729 int i;
730 int csum = 0;
731 for (i = 0; i < len; i++)
732 csum = (csum<<3) + buf[0];
733 return __cpu_to_le32(csum);
734 }
735
736 static int check_idle(struct supertype *st)
737 {
738 /* Check that all member arrays for this container, or the
739 * container of this array, are idle
740 */
741 char *container = (st->container_devnm[0]
742 ? st->container_devnm : st->devnm);
743 struct mdstat_ent *ent, *e;
744 int is_idle = 1;
745
746 ent = mdstat_read(0, 0);
747 for (e = ent ; e; e = e->next) {
748 if (!is_container_member(e, container))
749 continue;
750 if (e->percent >= 0) {
751 is_idle = 0;
752 break;
753 }
754 }
755 free_mdstat(ent);
756 return is_idle;
757 }
758
759 static int freeze_container(struct supertype *st)
760 {
761 char *container = (st->container_devnm[0]
762 ? st->container_devnm : st->devnm);
763
764 if (!check_idle(st))
765 return -1;
766
767 if (block_monitor(container, 1)) {
768 pr_err("failed to freeze container\n");
769 return -2;
770 }
771
772 return 1;
773 }
774
775 static void unfreeze_container(struct supertype *st)
776 {
777 char *container = (st->container_devnm[0]
778 ? st->container_devnm : st->devnm);
779
780 unblock_monitor(container, 1);
781 }
782
783 static int freeze(struct supertype *st)
784 {
785 /* Try to freeze resync/rebuild on this array/container.
786 * Return -1 if the array is busy,
787 * return -2 container cannot be frozen,
788 * return 0 if this kernel doesn't support 'frozen'
789 * return 1 if it worked.
790 */
791 if (st->ss->external)
792 return freeze_container(st);
793 else {
794 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
795 int err;
796 char buf[20];
797
798 if (!sra)
799 return -1;
800 /* Need to clear any 'read-auto' status */
801 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
802 strncmp(buf, "read-auto", 9) == 0)
803 sysfs_set_str(sra, NULL, "array_state", "clean");
804
805 err = sysfs_freeze_array(sra);
806 sysfs_free(sra);
807 return err;
808 }
809 }
810
811 static void unfreeze(struct supertype *st)
812 {
813 if (st->ss->external)
814 return unfreeze_container(st);
815 else {
816 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
817 char buf[20];
818
819 if (sra &&
820 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
821 strcmp(buf, "frozen\n") == 0)
822 sysfs_set_str(sra, NULL, "sync_action", "idle");
823 sysfs_free(sra);
824 }
825 }
826
827 static void wait_reshape(struct mdinfo *sra)
828 {
829 int fd = sysfs_get_fd(sra, NULL, "sync_action");
830 char action[20];
831
832 if (fd < 0)
833 return;
834
835 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
836 strncmp(action, "reshape", 7) == 0)
837 sysfs_wait(fd, NULL);
838 close(fd);
839 }
840
841 static int reshape_super(struct supertype *st, unsigned long long size,
842 int level, int layout, int chunksize, int raid_disks,
843 int delta_disks, char *backup_file, char *dev,
844 int direction, int verbose)
845 {
846 /* nothing extra to check in the native case */
847 if (!st->ss->external)
848 return 0;
849 if (!st->ss->reshape_super || !st->ss->manage_reshape) {
850 pr_err("%s metadata does not support reshape\n",
851 st->ss->name);
852 return 1;
853 }
854
855 return st->ss->reshape_super(st, size, level, layout, chunksize,
856 raid_disks, delta_disks, backup_file, dev,
857 direction, verbose);
858 }
859
860 static void sync_metadata(struct supertype *st)
861 {
862 if (st->ss->external) {
863 if (st->update_tail) {
864 flush_metadata_updates(st);
865 st->update_tail = &st->updates;
866 } else
867 st->ss->sync_metadata(st);
868 }
869 }
870
871 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
872 {
873 /* when dealing with external metadata subarrays we need to be
874 * prepared to handle EAGAIN. The kernel may need to wait for
875 * mdmon to mark the array active so the kernel can handle
876 * allocations/writeback when preparing the reshape action
877 * (md_allow_write()). We temporarily disable safe_mode_delay
878 * to close a race with the array_state going clean before the
879 * next write to raid_disks / stripe_cache_size
880 */
881 char safe[50];
882 int rc;
883
884 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
885 if (!container ||
886 (strcmp(name, "raid_disks") != 0 &&
887 strcmp(name, "stripe_cache_size") != 0))
888 return sysfs_set_num(sra, NULL, name, n);
889
890 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
891 if (rc <= 0)
892 return -1;
893 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
894 rc = sysfs_set_num(sra, NULL, name, n);
895 if (rc < 0 && errno == EAGAIN) {
896 ping_monitor(container);
897 /* if we get EAGAIN here then the monitor is not active
898 * so stop trying
899 */
900 rc = sysfs_set_num(sra, NULL, name, n);
901 }
902 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
903 return rc;
904 }
905
906 int start_reshape(struct mdinfo *sra, int already_running,
907 int before_data_disks, int data_disks)
908 {
909 int err;
910 unsigned long long sync_max_to_set;
911
912 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
913 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
914 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
915 sra->reshape_progress);
916 if (before_data_disks <= data_disks)
917 sync_max_to_set = sra->reshape_progress / data_disks;
918 else
919 sync_max_to_set = (sra->component_size * data_disks
920 - sra->reshape_progress) / data_disks;
921 if (!already_running)
922 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
923 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
924 if (!already_running && err == 0) {
925 int cnt = 5;
926 do {
927 err = sysfs_set_str(sra, NULL, "sync_action",
928 "reshape");
929 if (err)
930 sleep(1);
931 } while (err && errno == EBUSY && cnt-- > 0);
932 }
933 return err;
934 }
935
936 void abort_reshape(struct mdinfo *sra)
937 {
938 sysfs_set_str(sra, NULL, "sync_action", "idle");
939 /*
940 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
941 * suspend_hi to decrease as well as increase.")
942 * you could only increase suspend_{lo,hi} unless the region they
943 * covered was empty. So to reset to 0, you need to push suspend_lo
944 * up past suspend_hi first. So to maximize the chance of mdadm
945 * working on all kernels, we want to keep doing that.
946 */
947 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
948 sysfs_set_num(sra, NULL, "suspend_hi", 0);
949 sysfs_set_num(sra, NULL, "suspend_lo", 0);
950 sysfs_set_num(sra, NULL, "sync_min", 0);
951 // It isn't safe to reset sync_max as we aren't monitoring.
952 // Array really should be stopped at this point.
953 }
954
955 int remove_disks_for_takeover(struct supertype *st,
956 struct mdinfo *sra,
957 int layout)
958 {
959 int nr_of_copies;
960 struct mdinfo *remaining;
961 int slot;
962
963 if (st->ss->external) {
964 int rv = 0;
965 struct mdinfo *arrays = st->ss->container_content(st, NULL);
966 /*
967 * containter_content returns list of arrays in container
968 * If arrays->next is not NULL it means that there are
969 * 2 arrays in container and operation should be blocked
970 */
971 if (arrays) {
972 if (arrays->next)
973 rv = 1;
974 sysfs_free(arrays);
975 if (rv) {
976 pr_err("Error. Cannot perform operation on /dev/%s\n", st->devnm);
977 pr_err("For this operation it MUST be single array in container\n");
978 return rv;
979 }
980 }
981 }
982
983 if (sra->array.level == 10)
984 nr_of_copies = layout & 0xff;
985 else if (sra->array.level == 1)
986 nr_of_copies = sra->array.raid_disks;
987 else
988 return 1;
989
990 remaining = sra->devs;
991 sra->devs = NULL;
992 /* for each 'copy', select one device and remove from the list. */
993 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
994 struct mdinfo **diskp;
995 int found = 0;
996
997 /* Find a working device to keep */
998 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
999 struct mdinfo *disk = *diskp;
1000
1001 if (disk->disk.raid_disk < slot)
1002 continue;
1003 if (disk->disk.raid_disk >= slot + nr_of_copies)
1004 continue;
1005 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1006 continue;
1007 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1008 continue;
1009 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1010 continue;
1011
1012 /* We have found a good disk to use! */
1013 *diskp = disk->next;
1014 disk->next = sra->devs;
1015 sra->devs = disk;
1016 found = 1;
1017 break;
1018 }
1019 if (!found)
1020 break;
1021 }
1022
1023 if (slot < sra->array.raid_disks) {
1024 /* didn't find all slots */
1025 struct mdinfo **e;
1026 e = &remaining;
1027 while (*e)
1028 e = &(*e)->next;
1029 *e = sra->devs;
1030 sra->devs = remaining;
1031 return 1;
1032 }
1033
1034 /* Remove all 'remaining' devices from the array */
1035 while (remaining) {
1036 struct mdinfo *sd = remaining;
1037 remaining = sd->next;
1038
1039 sysfs_set_str(sra, sd, "state", "faulty");
1040 sysfs_set_str(sra, sd, "slot", "none");
1041 /* for external metadata disks should be removed in mdmon */
1042 if (!st->ss->external)
1043 sysfs_set_str(sra, sd, "state", "remove");
1044 sd->disk.state |= (1<<MD_DISK_REMOVED);
1045 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1046 sd->next = sra->devs;
1047 sra->devs = sd;
1048 }
1049 return 0;
1050 }
1051
1052 void reshape_free_fdlist(int *fdlist,
1053 unsigned long long *offsets,
1054 int size)
1055 {
1056 int i;
1057
1058 for (i = 0; i < size; i++)
1059 if (fdlist[i] >= 0)
1060 close(fdlist[i]);
1061
1062 free(fdlist);
1063 free(offsets);
1064 }
1065
1066 int reshape_prepare_fdlist(char *devname,
1067 struct mdinfo *sra,
1068 int raid_disks,
1069 int nrdisks,
1070 unsigned long blocks,
1071 char *backup_file,
1072 int *fdlist,
1073 unsigned long long *offsets)
1074 {
1075 int d = 0;
1076 struct mdinfo *sd;
1077
1078 enable_fds(nrdisks);
1079 for (d = 0; d <= nrdisks; d++)
1080 fdlist[d] = -1;
1081 d = raid_disks;
1082 for (sd = sra->devs; sd; sd = sd->next) {
1083 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1084 continue;
1085 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1086 sd->disk.raid_disk < raid_disks) {
1087 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1088 fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY);
1089 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1090 if (fdlist[sd->disk.raid_disk] < 0) {
1091 pr_err("%s: cannot open component %s\n",
1092 devname, dn ? dn : "-unknown-");
1093 d = -1;
1094 goto release;
1095 }
1096 } else if (backup_file == NULL) {
1097 /* spare */
1098 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1099 fdlist[d] = dev_open(dn, O_RDWR);
1100 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1101 if (fdlist[d] < 0) {
1102 pr_err("%s: cannot open component %s\n",
1103 devname, dn ? dn : "-unknown-");
1104 d = -1;
1105 goto release;
1106 }
1107 d++;
1108 }
1109 }
1110 release:
1111 return d;
1112 }
1113
1114 int reshape_open_backup_file(char *backup_file,
1115 int fd,
1116 char *devname,
1117 long blocks,
1118 int *fdlist,
1119 unsigned long long *offsets,
1120 char *sys_name,
1121 int restart)
1122 {
1123 /* Return 1 on success, 0 on any form of failure */
1124 /* need to check backup file is large enough */
1125 char buf[512];
1126 struct stat stb;
1127 unsigned int dev;
1128 int i;
1129
1130 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1131 S_IRUSR | S_IWUSR);
1132 *offsets = 8 * 512;
1133 if (*fdlist < 0) {
1134 pr_err("%s: cannot create backup file %s: %s\n",
1135 devname, backup_file, strerror(errno));
1136 return 0;
1137 }
1138 /* Guard against backup file being on array device.
1139 * If array is partitioned or if LVM etc is in the
1140 * way this will not notice, but it is better than
1141 * nothing.
1142 */
1143 fstat(*fdlist, &stb);
1144 dev = stb.st_dev;
1145 fstat(fd, &stb);
1146 if (stb.st_rdev == dev) {
1147 pr_err("backup file must NOT be on the array being reshaped.\n");
1148 close(*fdlist);
1149 return 0;
1150 }
1151
1152 memset(buf, 0, 512);
1153 for (i=0; i < blocks + 8 ; i++) {
1154 if (write(*fdlist, buf, 512) != 512) {
1155 pr_err("%s: cannot create backup file %s: %s\n",
1156 devname, backup_file, strerror(errno));
1157 return 0;
1158 }
1159 }
1160 if (fsync(*fdlist) != 0) {
1161 pr_err("%s: cannot create backup file %s: %s\n",
1162 devname, backup_file, strerror(errno));
1163 return 0;
1164 }
1165
1166 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1167 char *bu = make_backup(sys_name);
1168 if (symlink(backup_file, bu))
1169 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1170 strerror(errno));
1171 free(bu);
1172 }
1173
1174 return 1;
1175 }
1176
1177 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1178 unsigned int ndata, unsigned int odata)
1179 {
1180 unsigned long a, b, blocks;
1181 /* So how much do we need to backup.
1182 * We need an amount of data which is both a whole number of
1183 * old stripes and a whole number of new stripes.
1184 * So LCM for (chunksize*datadisks).
1185 */
1186 a = (ochunk/512) * odata;
1187 b = (nchunk/512) * ndata;
1188 /* Find GCD */
1189 a = GCD(a, b);
1190 /* LCM == product / GCD */
1191 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
1192
1193 return blocks;
1194 }
1195
1196 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1197 {
1198 /* Based on the current array state in info->array and
1199 * the changes in info->new_* etc, determine:
1200 * - whether the change is possible
1201 * - Intermediate level/raid_disks/layout
1202 * - whether a restriping reshape is needed
1203 * - number of sectors in minimum change unit. This
1204 * will cover a whole number of stripes in 'before' and
1205 * 'after'.
1206 *
1207 * Return message if the change should be rejected
1208 * NULL if the change can be achieved
1209 *
1210 * This can be called as part of starting a reshape, or
1211 * when assembling an array that is undergoing reshape.
1212 */
1213 int near, far, offset, copies;
1214 int new_disks;
1215 int old_chunk, new_chunk;
1216 /* delta_parity records change in number of devices
1217 * caused by level change
1218 */
1219 int delta_parity = 0;
1220
1221 memset(re, 0, sizeof(*re));
1222
1223 /* If a new level not explicitly given, we assume no-change */
1224 if (info->new_level == UnSet)
1225 info->new_level = info->array.level;
1226
1227 if (info->new_chunk)
1228 switch (info->new_level) {
1229 case 0:
1230 case 4:
1231 case 5:
1232 case 6:
1233 case 10:
1234 /* chunk size is meaningful, must divide component_size
1235 * evenly
1236 */
1237 if (info->component_size % (info->new_chunk/512)) {
1238 unsigned long long shrink = info->component_size;
1239 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1240 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1241 info->new_chunk/1024, info->component_size/2);
1242 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1243 devname, shrink/2);
1244 pr_err("will shrink the array so the given chunk size would work.\n");
1245 return "";
1246 }
1247 break;
1248 default:
1249 return "chunk size not meaningful for this level";
1250 }
1251 else
1252 info->new_chunk = info->array.chunk_size;
1253
1254 switch (info->array.level) {
1255 default:
1256 return "No reshape is possibly for this RAID level";
1257 case LEVEL_LINEAR:
1258 if (info->delta_disks != UnSet)
1259 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1260 else
1261 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1262 case 1:
1263 /* RAID1 can convert to RAID1 with different disks, or
1264 * raid5 with 2 disks, or
1265 * raid0 with 1 disk
1266 */
1267 if (info->new_level > 1 && (info->component_size & 7))
1268 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1269 if (info->new_level == 0) {
1270 if (info->delta_disks != UnSet &&
1271 info->delta_disks != 0)
1272 return "Cannot change number of disks with RAID1->RAID0 conversion";
1273 re->level = 0;
1274 re->before.data_disks = 1;
1275 re->after.data_disks = 1;
1276 return NULL;
1277 }
1278 if (info->new_level == 1) {
1279 if (info->delta_disks == UnSet)
1280 /* Don't know what to do */
1281 return "no change requested for Growing RAID1";
1282 re->level = 1;
1283 return NULL;
1284 }
1285 if (info->array.raid_disks != 2 && info->new_level == 5)
1286 return "Can only convert a 2-device array to RAID5";
1287 if (info->array.raid_disks == 2 && info->new_level == 5) {
1288 re->level = 5;
1289 re->before.data_disks = 1;
1290 if (info->delta_disks != UnSet &&
1291 info->delta_disks != 0)
1292 re->after.data_disks = 1 + info->delta_disks;
1293 else
1294 re->after.data_disks = 1;
1295 if (re->after.data_disks < 1)
1296 return "Number of disks too small for RAID5";
1297
1298 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1299 info->array.chunk_size = 65536;
1300 break;
1301 }
1302 /* Could do some multi-stage conversions, but leave that to
1303 * later.
1304 */
1305 return "Impossibly level change request for RAID1";
1306
1307 case 10:
1308 /* RAID10 can be converted from near mode to
1309 * RAID0 by removing some devices.
1310 * It can also be reshaped if the kernel supports
1311 * new_data_offset.
1312 */
1313 switch (info->new_level) {
1314 case 0:
1315 if ((info->array.layout & ~0xff) != 0x100)
1316 return "Cannot Grow RAID10 with far/offset layout";
1317 /*
1318 * number of devices must be multiple of
1319 * number of copies
1320 */
1321 if (info->array.raid_disks %
1322 (info->array.layout & 0xff))
1323 return "RAID10 layout too complex for Grow operation";
1324
1325 new_disks = (info->array.raid_disks /
1326 (info->array.layout & 0xff));
1327 if (info->delta_disks == UnSet)
1328 info->delta_disks = (new_disks
1329 - info->array.raid_disks);
1330
1331 if (info->delta_disks !=
1332 new_disks - info->array.raid_disks)
1333 return "New number of raid-devices impossible for RAID10";
1334 if (info->new_chunk &&
1335 info->new_chunk != info->array.chunk_size)
1336 return "Cannot change chunk-size with RAID10 Grow";
1337
1338 /* looks good */
1339 re->level = 0;
1340 re->before.data_disks = new_disks;
1341 re->after.data_disks = re->before.data_disks;
1342 return NULL;
1343
1344 case 10:
1345 near = info->array.layout & 0xff;
1346 far = (info->array.layout >> 8) & 0xff;
1347 offset = info->array.layout & 0x10000;
1348 if (far > 1 && !offset)
1349 return "Cannot reshape RAID10 in far-mode";
1350 copies = near * far;
1351
1352 old_chunk = info->array.chunk_size * far;
1353
1354 if (info->new_layout == UnSet)
1355 info->new_layout = info->array.layout;
1356 else {
1357 near = info->new_layout & 0xff;
1358 far = (info->new_layout >> 8) & 0xff;
1359 offset = info->new_layout & 0x10000;
1360 if (far > 1 && !offset)
1361 return "Cannot reshape RAID10 to far-mode";
1362 if (near * far != copies)
1363 return "Cannot change number of copies when reshaping RAID10";
1364 }
1365 if (info->delta_disks == UnSet)
1366 info->delta_disks = 0;
1367 new_disks = (info->array.raid_disks +
1368 info->delta_disks);
1369
1370 new_chunk = info->new_chunk * far;
1371
1372 re->level = 10;
1373 re->before.layout = info->array.layout;
1374 re->before.data_disks = info->array.raid_disks;
1375 re->after.layout = info->new_layout;
1376 re->after.data_disks = new_disks;
1377 /* For RAID10 we don't do backup but do allow reshape,
1378 * so set backup_blocks to INVALID_SECTORS rather than
1379 * zero.
1380 * And there is no need to synchronise stripes on both
1381 * 'old' and 'new'. So the important
1382 * number is the minimum data_offset difference
1383 * which is the larger of (offset copies * chunk).
1384 */
1385 re->backup_blocks = INVALID_SECTORS;
1386 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1387 if (new_disks < re->before.data_disks &&
1388 info->space_after < re->min_offset_change)
1389 /* Reduce component size by one chunk */
1390 re->new_size = (info->component_size -
1391 re->min_offset_change);
1392 else
1393 re->new_size = info->component_size;
1394 re->new_size = re->new_size * new_disks / copies;
1395 return NULL;
1396
1397 default:
1398 return "RAID10 can only be changed to RAID0";
1399 }
1400 case 0:
1401 /* RAID0 can be converted to RAID10, or to RAID456 */
1402 if (info->new_level == 10) {
1403 if (info->new_layout == UnSet &&
1404 info->delta_disks == UnSet) {
1405 /* Assume near=2 layout */
1406 info->new_layout = 0x102;
1407 info->delta_disks = info->array.raid_disks;
1408 }
1409 if (info->new_layout == UnSet) {
1410 int copies = 1 + (info->delta_disks
1411 / info->array.raid_disks);
1412 if (info->array.raid_disks * (copies-1) !=
1413 info->delta_disks)
1414 return "Impossible number of devices for RAID0->RAID10";
1415 info->new_layout = 0x100 + copies;
1416 }
1417 if (info->delta_disks == UnSet) {
1418 int copies = info->new_layout & 0xff;
1419 if (info->new_layout != 0x100 + copies)
1420 return "New layout impossible for RAID0->RAID10";;
1421 info->delta_disks = (copies - 1) *
1422 info->array.raid_disks;
1423 }
1424 if (info->new_chunk &&
1425 info->new_chunk != info->array.chunk_size)
1426 return "Cannot change chunk-size with RAID0->RAID10";
1427 /* looks good */
1428 re->level = 10;
1429 re->before.data_disks = (info->array.raid_disks +
1430 info->delta_disks);
1431 re->after.data_disks = re->before.data_disks;
1432 re->before.layout = info->new_layout;
1433 return NULL;
1434 }
1435
1436 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1437 * a raid4 style layout of the final level.
1438 */
1439 switch (info->new_level) {
1440 case 4:
1441 delta_parity = 1;
1442 case 0:
1443 re->level = 4;
1444 re->before.layout = 0;
1445 break;
1446 case 5:
1447 delta_parity = 1;
1448 re->level = 5;
1449 re->before.layout = ALGORITHM_PARITY_N;
1450 if (info->new_layout == UnSet)
1451 info->new_layout = map_name(r5layout, "default");
1452 break;
1453 case 6:
1454 delta_parity = 2;
1455 re->level = 6;
1456 re->before.layout = ALGORITHM_PARITY_N;
1457 if (info->new_layout == UnSet)
1458 info->new_layout = map_name(r6layout, "default");
1459 break;
1460 default:
1461 return "Impossible level change requested";
1462 }
1463 re->before.data_disks = info->array.raid_disks;
1464 /* determining 'after' layout happens outside this 'switch' */
1465 break;
1466
1467 case 4:
1468 info->array.layout = ALGORITHM_PARITY_N;
1469 case 5:
1470 switch (info->new_level) {
1471 case 0:
1472 delta_parity = -1;
1473 case 4:
1474 re->level = info->array.level;
1475 re->before.data_disks = info->array.raid_disks - 1;
1476 re->before.layout = info->array.layout;
1477 break;
1478 case 5:
1479 re->level = 5;
1480 re->before.data_disks = info->array.raid_disks - 1;
1481 re->before.layout = info->array.layout;
1482 break;
1483 case 6:
1484 delta_parity = 1;
1485 re->level = 6;
1486 re->before.data_disks = info->array.raid_disks - 1;
1487 switch (info->array.layout) {
1488 case ALGORITHM_LEFT_ASYMMETRIC:
1489 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1490 break;
1491 case ALGORITHM_RIGHT_ASYMMETRIC:
1492 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1493 break;
1494 case ALGORITHM_LEFT_SYMMETRIC:
1495 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1496 break;
1497 case ALGORITHM_RIGHT_SYMMETRIC:
1498 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1499 break;
1500 case ALGORITHM_PARITY_0:
1501 re->before.layout = ALGORITHM_PARITY_0_6;
1502 break;
1503 case ALGORITHM_PARITY_N:
1504 re->before.layout = ALGORITHM_PARITY_N_6;
1505 break;
1506 default:
1507 return "Cannot convert an array with this layout";
1508 }
1509 break;
1510 case 1:
1511 if (info->array.raid_disks != 2)
1512 return "Can only convert a 2-device array to RAID1";
1513 if (info->delta_disks != UnSet &&
1514 info->delta_disks != 0)
1515 return "Cannot set raid_disk when converting RAID5->RAID1";
1516 re->level = 1;
1517 info->new_chunk = 0;
1518 return NULL;
1519 default:
1520 return "Impossible level change requested";
1521 }
1522 break;
1523 case 6:
1524 switch (info->new_level) {
1525 case 4:
1526 case 5:
1527 delta_parity = -1;
1528 case 6:
1529 re->level = 6;
1530 re->before.data_disks = info->array.raid_disks - 2;
1531 re->before.layout = info->array.layout;
1532 break;
1533 default:
1534 return "Impossible level change requested";
1535 }
1536 break;
1537 }
1538
1539 /* If we reached here then it looks like a re-stripe is
1540 * happening. We have determined the intermediate level
1541 * and initial raid_disks/layout and stored these in 're'.
1542 *
1543 * We need to deduce the final layout that can be atomically
1544 * converted to the end state.
1545 */
1546 switch (info->new_level) {
1547 case 0:
1548 /* We can only get to RAID0 from RAID4 or RAID5
1549 * with appropriate layout and one extra device
1550 */
1551 if (re->level != 4 && re->level != 5)
1552 return "Cannot covert to RAID0 from this level";
1553
1554 switch (re->level) {
1555 case 4:
1556 re->before.layout = 0;
1557 re->after.layout = 0;
1558 break;
1559 case 5:
1560 re->after.layout = ALGORITHM_PARITY_N;
1561 break;
1562 }
1563 break;
1564
1565 case 4:
1566 /* We can only get to RAID4 from RAID5 */
1567 if (re->level != 4 && re->level != 5)
1568 return "Cannot convert to RAID4 from this level";
1569
1570 switch (re->level) {
1571 case 4:
1572 re->after.layout = 0;
1573 break;
1574 case 5:
1575 re->after.layout = ALGORITHM_PARITY_N;
1576 break;
1577 }
1578 break;
1579
1580 case 5:
1581 /* We get to RAID5 from RAID5 or RAID6 */
1582 if (re->level != 5 && re->level != 6)
1583 return "Cannot convert to RAID5 from this level";
1584
1585 switch (re->level) {
1586 case 5:
1587 if (info->new_layout == UnSet)
1588 re->after.layout = re->before.layout;
1589 else
1590 re->after.layout = info->new_layout;
1591 break;
1592 case 6:
1593 if (info->new_layout == UnSet)
1594 info->new_layout = re->before.layout;
1595
1596 /* after.layout needs to be raid6 version of new_layout */
1597 if (info->new_layout == ALGORITHM_PARITY_N)
1598 re->after.layout = ALGORITHM_PARITY_N;
1599 else {
1600 char layout[40];
1601 char *ls = map_num(r5layout, info->new_layout);
1602 int l;
1603 if (ls) {
1604 /* Current RAID6 layout has a RAID5
1605 * equivalent - good
1606 */
1607 strcat(strcpy(layout, ls), "-6");
1608 l = map_name(r6layout, layout);
1609 if (l == UnSet)
1610 return "Cannot find RAID6 layout to convert to";
1611 } else {
1612 /* Current RAID6 has no equivalent.
1613 * If it is already a '-6' layout we
1614 * can leave it unchanged, else we must
1615 * fail
1616 */
1617 ls = map_num(r6layout,
1618 info->new_layout);
1619 if (!ls ||
1620 strcmp(ls+strlen(ls)-2, "-6") != 0)
1621 return "Please specify new layout";
1622 l = info->new_layout;
1623 }
1624 re->after.layout = l;
1625 }
1626 }
1627 break;
1628
1629 case 6:
1630 /* We must already be at level 6 */
1631 if (re->level != 6)
1632 return "Impossible level change";
1633 if (info->new_layout == UnSet)
1634 re->after.layout = info->array.layout;
1635 else
1636 re->after.layout = info->new_layout;
1637 break;
1638 default:
1639 return "Impossible level change requested";
1640 }
1641 if (info->delta_disks == UnSet)
1642 info->delta_disks = delta_parity;
1643
1644 re->after.data_disks =
1645 (re->before.data_disks + info->delta_disks - delta_parity);
1646
1647 switch (re->level) {
1648 case 6:
1649 re->parity = 2;
1650 break;
1651 case 4:
1652 case 5:
1653 re->parity = 1;
1654 break;
1655 default:
1656 re->parity = 0;
1657 break;
1658 }
1659 /* So we have a restripe operation, we need to calculate the number
1660 * of blocks per reshape operation.
1661 */
1662 re->new_size = info->component_size * re->before.data_disks;
1663 if (info->new_chunk == 0)
1664 info->new_chunk = info->array.chunk_size;
1665 if (re->after.data_disks == re->before.data_disks &&
1666 re->after.layout == re->before.layout &&
1667 info->new_chunk == info->array.chunk_size) {
1668 /* Nothing to change, can change level immediately. */
1669 re->level = info->new_level;
1670 re->backup_blocks = 0;
1671 return NULL;
1672 }
1673 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1674 /* chunk and layout changes make no difference */
1675 re->level = info->new_level;
1676 re->backup_blocks = 0;
1677 return NULL;
1678 }
1679
1680 if (re->after.data_disks == re->before.data_disks &&
1681 get_linux_version() < 2006032)
1682 return "in-place reshape is not safe before 2.6.32 - sorry.";
1683
1684 if (re->after.data_disks < re->before.data_disks &&
1685 get_linux_version() < 2006030)
1686 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1687
1688 re->backup_blocks = compute_backup_blocks(
1689 info->new_chunk, info->array.chunk_size,
1690 re->after.data_disks, re->before.data_disks);
1691 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1692
1693 re->new_size = info->component_size * re->after.data_disks;
1694 return NULL;
1695 }
1696
1697 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1698 char *text_version)
1699 {
1700 struct mdinfo *info;
1701 char *subarray;
1702 int ret_val = -1;
1703
1704 if ((st == NULL) || (sra == NULL))
1705 return ret_val;
1706
1707 if (text_version == NULL)
1708 text_version = sra->text_version;
1709 subarray = strchr(text_version + 1, '/')+1;
1710 info = st->ss->container_content(st, subarray);
1711 if (info) {
1712 unsigned long long current_size = 0;
1713 unsigned long long new_size = info->custom_array_size/2;
1714
1715 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1716 new_size > current_size) {
1717 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1718 < 0)
1719 dprintf("Error: Cannot set array size");
1720 else {
1721 ret_val = 0;
1722 dprintf("Array size changed");
1723 }
1724 dprintf_cont(" from %llu to %llu.\n",
1725 current_size, new_size);
1726 }
1727 sysfs_free(info);
1728 } else
1729 dprintf("Error: set_array_size(): info pointer in NULL\n");
1730
1731 return ret_val;
1732 }
1733
1734 static int reshape_array(char *container, int fd, char *devname,
1735 struct supertype *st, struct mdinfo *info,
1736 int force, struct mddev_dev *devlist,
1737 unsigned long long data_offset,
1738 char *backup_file, int verbose, int forked,
1739 int restart, int freeze_reshape);
1740 static int reshape_container(char *container, char *devname,
1741 int mdfd,
1742 struct supertype *st,
1743 struct mdinfo *info,
1744 int force,
1745 char *backup_file, int verbose,
1746 int forked, int restart, int freeze_reshape);
1747
1748 int Grow_reshape(char *devname, int fd,
1749 struct mddev_dev *devlist,
1750 unsigned long long data_offset,
1751 struct context *c, struct shape *s)
1752 {
1753 /* Make some changes in the shape of an array.
1754 * The kernel must support the change.
1755 *
1756 * There are three different changes. Each can trigger
1757 * a resync or recovery so we freeze that until we have
1758 * requested everything (if kernel supports freezing - 2.6.30).
1759 * The steps are:
1760 * - change size (i.e. component_size)
1761 * - change level
1762 * - change layout/chunksize/ndisks
1763 *
1764 * The last can require a reshape. It is different on different
1765 * levels so we need to check the level before actioning it.
1766 * Some times the level change needs to be requested after the
1767 * reshape (e.g. raid6->raid5, raid5->raid0)
1768 *
1769 */
1770 struct mdu_array_info_s array;
1771 int rv = 0;
1772 struct supertype *st;
1773 char *subarray = NULL;
1774
1775 int frozen;
1776 int changed = 0;
1777 char *container = NULL;
1778 int cfd = -1;
1779
1780 struct mddev_dev *dv;
1781 int added_disks;
1782
1783 struct mdinfo info;
1784 struct mdinfo *sra;
1785
1786 if (md_get_array_info(fd, &array) < 0) {
1787 pr_err("%s is not an active md array - aborting\n",
1788 devname);
1789 return 1;
1790 }
1791 if (data_offset != INVALID_SECTORS && array.level != 10 &&
1792 (array.level < 4 || array.level > 6)) {
1793 pr_err("--grow --data-offset not yet supported\n");
1794 return 1;
1795 }
1796
1797 if (s->size > 0 &&
1798 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1799 pr_err("cannot change component size at the same time as other changes.\n"
1800 " Change size first, then check data is intact before making other changes.\n");
1801 return 1;
1802 }
1803
1804 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1805 array.level > 1 && get_linux_version() < 2006032 &&
1806 !check_env("MDADM_FORCE_FEWER")) {
1807 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1808 " Please use a newer kernel\n");
1809 return 1;
1810 }
1811
1812 if (array.level > 1 && s->size > 0 &&
1813 (array.chunk_size / 1024) > (int)s->size) {
1814 pr_err("component size must be larger than chunk size.\n");
1815 return 1;
1816 }
1817
1818 st = super_by_fd(fd, &subarray);
1819 if (!st) {
1820 pr_err("Unable to determine metadata format for %s\n", devname);
1821 return 1;
1822 }
1823 if (s->raiddisks > st->max_devs) {
1824 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1825 return 1;
1826 }
1827 if (s->level == 0 &&
1828 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1829 !(array.state & (1<<MD_SB_CLUSTERED))) {
1830 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1831 if (md_set_array_info(fd, &array)!= 0) {
1832 pr_err("failed to remove internal bitmap.\n");
1833 return 1;
1834 }
1835 }
1836
1837 /* in the external case we need to check that the requested reshape is
1838 * supported, and perform an initial check that the container holds the
1839 * pre-requisite spare devices (mdmon owns final validation)
1840 */
1841 if (st->ss->external) {
1842 int retval;
1843
1844 if (subarray) {
1845 container = st->container_devnm;
1846 cfd = open_dev_excl(st->container_devnm);
1847 } else {
1848 container = st->devnm;
1849 close(fd);
1850 cfd = open_dev_excl(st->devnm);
1851 fd = cfd;
1852 }
1853 if (cfd < 0) {
1854 pr_err("Unable to open container for %s\n", devname);
1855 free(subarray);
1856 return 1;
1857 }
1858
1859 retval = st->ss->load_container(st, cfd, NULL);
1860
1861 if (retval) {
1862 pr_err("Cannot read superblock for %s\n", devname);
1863 free(subarray);
1864 return 1;
1865 }
1866
1867 /* check if operation is supported for metadata handler */
1868 if (st->ss->container_content) {
1869 struct mdinfo *cc = NULL;
1870 struct mdinfo *content = NULL;
1871
1872 cc = st->ss->container_content(st, subarray);
1873 for (content = cc; content ; content = content->next) {
1874 int allow_reshape = 1;
1875
1876 /* check if reshape is allowed based on metadata
1877 * indications stored in content.array.status
1878 */
1879 if (content->array.state &
1880 (1 << MD_SB_BLOCK_VOLUME))
1881 allow_reshape = 0;
1882 if (content->array.state &
1883 (1 << MD_SB_BLOCK_CONTAINER_RESHAPE))
1884 allow_reshape = 0;
1885 if (!allow_reshape) {
1886 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1887 devname, container);
1888 sysfs_free(cc);
1889 free(subarray);
1890 return 1;
1891 }
1892 if (content->consistency_policy ==
1893 CONSISTENCY_POLICY_PPL) {
1894 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1895 sysfs_free(cc);
1896 free(subarray);
1897 return 1;
1898 }
1899 }
1900 sysfs_free(cc);
1901 }
1902 if (mdmon_running(container))
1903 st->update_tail = &st->updates;
1904 }
1905
1906 added_disks = 0;
1907 for (dv = devlist; dv; dv = dv->next)
1908 added_disks++;
1909 if (s->raiddisks > array.raid_disks &&
1910 array.spare_disks + added_disks <
1911 (s->raiddisks - array.raid_disks) &&
1912 !c->force) {
1913 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1914 " Use --force to over-ride this check.\n",
1915 s->raiddisks - array.raid_disks,
1916 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1917 array.spare_disks + added_disks);
1918 return 1;
1919 }
1920
1921 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1922 GET_STATE | GET_VERSION);
1923 if (sra) {
1924 if (st->ss->external && subarray == NULL) {
1925 array.level = LEVEL_CONTAINER;
1926 sra->array.level = LEVEL_CONTAINER;
1927 }
1928 } else {
1929 pr_err("failed to read sysfs parameters for %s\n",
1930 devname);
1931 return 1;
1932 }
1933 frozen = freeze(st);
1934 if (frozen < -1) {
1935 /* freeze() already spewed the reason */
1936 sysfs_free(sra);
1937 return 1;
1938 } else if (frozen < 0) {
1939 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1940 sysfs_free(sra);
1941 return 1;
1942 }
1943
1944 /* ========= set size =============== */
1945 if (s->size > 0 &&
1946 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1947 unsigned long long orig_size = get_component_size(fd)/2;
1948 unsigned long long min_csize;
1949 struct mdinfo *mdi;
1950 int raid0_takeover = 0;
1951
1952 if (orig_size == 0)
1953 orig_size = (unsigned) array.size;
1954
1955 if (orig_size == 0) {
1956 pr_err("Cannot set device size in this type of array.\n");
1957 rv = 1;
1958 goto release;
1959 }
1960
1961 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1962 devname, APPLY_METADATA_CHANGES,
1963 c->verbose > 0)) {
1964 rv = 1;
1965 goto release;
1966 }
1967 sync_metadata(st);
1968 if (st->ss->external) {
1969 /* metadata can have size limitation
1970 * update size value according to metadata information
1971 */
1972 struct mdinfo *sizeinfo =
1973 st->ss->container_content(st, subarray);
1974 if (sizeinfo) {
1975 unsigned long long new_size =
1976 sizeinfo->custom_array_size/2;
1977 int data_disks = get_data_disks(
1978 sizeinfo->array.level,
1979 sizeinfo->array.layout,
1980 sizeinfo->array.raid_disks);
1981 new_size /= data_disks;
1982 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
1983 orig_size, new_size,
1984 new_size * data_disks);
1985 s->size = new_size;
1986 sysfs_free(sizeinfo);
1987 }
1988 }
1989
1990 /* Update the size of each member device in case
1991 * they have been resized. This will never reduce
1992 * below the current used-size. The "size" attribute
1993 * understands '0' to mean 'max'.
1994 */
1995 min_csize = 0;
1996 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1997 sysfs_set_num(sra, mdi, "size",
1998 s->size == MAX_SIZE ? 0 : s->size);
1999 if (array.not_persistent == 0 &&
2000 array.major_version == 0 &&
2001 get_linux_version() < 3001000) {
2002 /* Dangerous to allow size to exceed 2TB */
2003 unsigned long long csize;
2004 if (sysfs_get_ll(sra, mdi, "size",
2005 &csize) == 0) {
2006 if (csize >= 2ULL*1024*1024*1024)
2007 csize = 2ULL*1024*1024*1024;
2008 if ((min_csize == 0 ||
2009 (min_csize > csize)))
2010 min_csize = csize;
2011 }
2012 }
2013 }
2014 if (min_csize && s->size > min_csize) {
2015 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2016 rv = 1;
2017 goto size_change_error;
2018 }
2019 if (min_csize && s->size == MAX_SIZE) {
2020 /* Don't let the kernel choose a size - it will get
2021 * it wrong
2022 */
2023 pr_err("Limited v0.90 array to 2TB per device\n");
2024 s->size = min_csize;
2025 }
2026 if (st->ss->external) {
2027 if (sra->array.level == 0) {
2028 rv = sysfs_set_str(sra, NULL, "level", "raid5");
2029 if (!rv) {
2030 raid0_takeover = 1;
2031 /* get array parameters after takeover
2032 * to change one parameter at time only
2033 */
2034 rv = md_get_array_info(fd, &array);
2035 }
2036 }
2037 /* make sure mdmon is
2038 * aware of the new level */
2039 if (!mdmon_running(st->container_devnm))
2040 start_mdmon(st->container_devnm);
2041 ping_monitor(container);
2042 if (mdmon_running(st->container_devnm) &&
2043 st->update_tail == NULL)
2044 st->update_tail = &st->updates;
2045 }
2046
2047 if (s->size == MAX_SIZE)
2048 s->size = 0;
2049 array.size = s->size;
2050 if (s->size & ~INT32_MAX) {
2051 /* got truncated to 32bit, write to
2052 * component_size instead
2053 */
2054 if (sra)
2055 rv = sysfs_set_num(sra, NULL,
2056 "component_size", s->size);
2057 else
2058 rv = -1;
2059 } else {
2060 rv = md_set_array_info(fd, &array);
2061
2062 /* manage array size when it is managed externally
2063 */
2064 if ((rv == 0) && st->ss->external)
2065 rv = set_array_size(st, sra, sra->text_version);
2066 }
2067
2068 if (raid0_takeover) {
2069 /* do not recync non-existing parity,
2070 * we will drop it anyway
2071 */
2072 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2073 /* go back to raid0, drop parity disk
2074 */
2075 sysfs_set_str(sra, NULL, "level", "raid0");
2076 md_get_array_info(fd, &array);
2077 }
2078
2079 size_change_error:
2080 if (rv != 0) {
2081 int err = errno;
2082
2083 /* restore metadata */
2084 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2085 UnSet, NULL, devname,
2086 ROLLBACK_METADATA_CHANGES,
2087 c->verbose) == 0)
2088 sync_metadata(st);
2089 pr_err("Cannot set device size for %s: %s\n",
2090 devname, strerror(err));
2091 if (err == EBUSY &&
2092 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2093 cont_err("Bitmap must be removed before size can be changed\n");
2094 rv = 1;
2095 goto release;
2096 }
2097 if (s->assume_clean) {
2098 /* This will fail on kernels older than 3.0 unless
2099 * a backport has been arranged.
2100 */
2101 if (sra == NULL ||
2102 sysfs_set_str(sra, NULL, "resync_start",
2103 "none") < 0)
2104 pr_err("--assume-clean not supported with --grow on this kernel\n");
2105 }
2106 md_get_array_info(fd, &array);
2107 s->size = get_component_size(fd)/2;
2108 if (s->size == 0)
2109 s->size = array.size;
2110 if (c->verbose >= 0) {
2111 if (s->size == orig_size)
2112 pr_err("component size of %s unchanged at %lluK\n",
2113 devname, s->size);
2114 else
2115 pr_err("component size of %s has been set to %lluK\n",
2116 devname, s->size);
2117 }
2118 changed = 1;
2119 } else if (array.level != LEVEL_CONTAINER) {
2120 s->size = get_component_size(fd)/2;
2121 if (s->size == 0)
2122 s->size = array.size;
2123 }
2124
2125 /* See if there is anything else to do */
2126 if ((s->level == UnSet || s->level == array.level) &&
2127 (s->layout_str == NULL) &&
2128 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2129 data_offset == INVALID_SECTORS &&
2130 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2131 /* Nothing more to do */
2132 if (!changed && c->verbose >= 0)
2133 pr_err("%s: no change requested\n", devname);
2134 goto release;
2135 }
2136
2137 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2138 * current implementation assumes that following conditions must be met:
2139 * - RAID10:
2140 * - far_copies == 1
2141 * - near_copies == 2
2142 */
2143 if ((s->level == 0 && array.level == 10 && sra &&
2144 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2145 (s->level == 0 && array.level == 1 && sra)) {
2146 int err;
2147
2148 err = remove_disks_for_takeover(st, sra, array.layout);
2149 if (err) {
2150 dprintf("Array cannot be reshaped\n");
2151 if (cfd > -1)
2152 close(cfd);
2153 rv = 1;
2154 goto release;
2155 }
2156 /* Make sure mdmon has seen the device removal
2157 * and updated metadata before we continue with
2158 * level change
2159 */
2160 if (container)
2161 ping_monitor(container);
2162 }
2163
2164 memset(&info, 0, sizeof(info));
2165 info.array = array;
2166 if (sysfs_init(&info, fd, NULL)) {
2167 pr_err("failed to intialize sysfs.\n");
2168 rv = 1;
2169 goto release;
2170 }
2171 strcpy(info.text_version, sra->text_version);
2172 info.component_size = s->size*2;
2173 info.new_level = s->level;
2174 info.new_chunk = s->chunk * 1024;
2175 if (info.array.level == LEVEL_CONTAINER) {
2176 info.delta_disks = UnSet;
2177 info.array.raid_disks = s->raiddisks;
2178 } else if (s->raiddisks)
2179 info.delta_disks = s->raiddisks - info.array.raid_disks;
2180 else
2181 info.delta_disks = UnSet;
2182 if (s->layout_str == NULL) {
2183 info.new_layout = UnSet;
2184 if (info.array.level == 6 &&
2185 (info.new_level == 6 || info.new_level == UnSet) &&
2186 info.array.layout >= 16) {
2187 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2188 cont_err("during the reshape, please specify --layout=preserve\n");
2189 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2190 rv = 1;
2191 goto release;
2192 }
2193 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2194 strcmp(s->layout_str, "normalize") == 0) {
2195 /* If we have a -6 RAID6 layout, remove the '-6'. */
2196 info.new_layout = UnSet;
2197 if (info.array.level == 6 && info.new_level == UnSet) {
2198 char l[40], *h;
2199 strcpy(l, map_num(r6layout, info.array.layout));
2200 h = strrchr(l, '-');
2201 if (h && strcmp(h, "-6") == 0) {
2202 *h = 0;
2203 info.new_layout = map_name(r6layout, l);
2204 }
2205 } else {
2206 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2207 rv = 1;
2208 goto release;
2209 }
2210 } else if (strcmp(s->layout_str, "preserve") == 0) {
2211 /* This means that a non-standard RAID6 layout
2212 * is OK.
2213 * In particular:
2214 * - When reshape a RAID6 (e.g. adding a device)
2215 * which is in a non-standard layout, it is OK
2216 * to preserve that layout.
2217 * - When converting a RAID5 to RAID6, leave it in
2218 * the XXX-6 layout, don't re-layout.
2219 */
2220 if (info.array.level == 6 && info.new_level == UnSet)
2221 info.new_layout = info.array.layout;
2222 else if (info.array.level == 5 && info.new_level == 6) {
2223 char l[40];
2224 strcpy(l, map_num(r5layout, info.array.layout));
2225 strcat(l, "-6");
2226 info.new_layout = map_name(r6layout, l);
2227 } else {
2228 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2229 rv = 1;
2230 goto release;
2231 }
2232 } else {
2233 int l = info.new_level;
2234 if (l == UnSet)
2235 l = info.array.level;
2236 switch (l) {
2237 case 5:
2238 info.new_layout = map_name(r5layout, s->layout_str);
2239 break;
2240 case 6:
2241 info.new_layout = map_name(r6layout, s->layout_str);
2242 break;
2243 case 10:
2244 info.new_layout = parse_layout_10(s->layout_str);
2245 break;
2246 case LEVEL_FAULTY:
2247 info.new_layout = parse_layout_faulty(s->layout_str);
2248 break;
2249 default:
2250 pr_err("layout not meaningful with this level\n");
2251 rv = 1;
2252 goto release;
2253 }
2254 if (info.new_layout == UnSet) {
2255 pr_err("layout %s not understood for this level\n",
2256 s->layout_str);
2257 rv = 1;
2258 goto release;
2259 }
2260 }
2261
2262 if (array.level == LEVEL_FAULTY) {
2263 if (s->level != UnSet && s->level != array.level) {
2264 pr_err("cannot change level of Faulty device\n");
2265 rv =1 ;
2266 }
2267 if (s->chunk) {
2268 pr_err("cannot set chunksize of Faulty device\n");
2269 rv =1 ;
2270 }
2271 if (s->raiddisks && s->raiddisks != 1) {
2272 pr_err("cannot set raid_disks of Faulty device\n");
2273 rv =1 ;
2274 }
2275 if (s->layout_str) {
2276 if (md_get_array_info(fd, &array) != 0) {
2277 dprintf("Cannot get array information.\n");
2278 goto release;
2279 }
2280 array.layout = info.new_layout;
2281 if (md_set_array_info(fd, &array) != 0) {
2282 pr_err("failed to set new layout\n");
2283 rv = 1;
2284 } else if (c->verbose >= 0)
2285 printf("layout for %s set to %d\n",
2286 devname, array.layout);
2287 }
2288 } else if (array.level == LEVEL_CONTAINER) {
2289 /* This change is to be applied to every array in the
2290 * container. This is only needed when the metadata imposes
2291 * restraints of the various arrays in the container.
2292 * Currently we only know that IMSM requires all arrays
2293 * to have the same number of devices so changing the
2294 * number of devices (On-Line Capacity Expansion) must be
2295 * performed at the level of the container
2296 */
2297 if (fd > 0) {
2298 close(fd);
2299 fd = -1;
2300 }
2301 rv = reshape_container(container, devname, -1, st, &info,
2302 c->force, c->backup_file, c->verbose,
2303 0, 0, 0);
2304 frozen = 0;
2305 } else {
2306 /* get spare devices from external metadata
2307 */
2308 if (st->ss->external) {
2309 struct mdinfo *info2;
2310
2311 info2 = st->ss->container_content(st, subarray);
2312 if (info2) {
2313 info.array.spare_disks =
2314 info2->array.spare_disks;
2315 sysfs_free(info2);
2316 }
2317 }
2318
2319 /* Impose these changes on a single array. First
2320 * check that the metadata is OK with the change. */
2321
2322 if (reshape_super(st, 0, info.new_level,
2323 info.new_layout, info.new_chunk,
2324 info.array.raid_disks, info.delta_disks,
2325 c->backup_file, devname,
2326 APPLY_METADATA_CHANGES, c->verbose)) {
2327 rv = 1;
2328 goto release;
2329 }
2330 sync_metadata(st);
2331 rv = reshape_array(container, fd, devname, st, &info, c->force,
2332 devlist, data_offset, c->backup_file,
2333 c->verbose, 0, 0, 0);
2334 frozen = 0;
2335 }
2336 release:
2337 sysfs_free(sra);
2338 if (frozen > 0)
2339 unfreeze(st);
2340 return rv;
2341 }
2342
2343 /* verify_reshape_position()
2344 * Function checks if reshape position in metadata is not farther
2345 * than position in md.
2346 * Return value:
2347 * 0 : not valid sysfs entry
2348 * it can be caused by not started reshape, it should be started
2349 * by reshape array or raid0 array is before takeover
2350 * -1 : error, reshape position is obviously wrong
2351 * 1 : success, reshape progress correct or updated
2352 */
2353 static int verify_reshape_position(struct mdinfo *info, int level)
2354 {
2355 int ret_val = 0;
2356 char buf[40];
2357 int rv;
2358
2359 /* read sync_max, failure can mean raid0 array */
2360 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2361
2362 if (rv > 0) {
2363 char *ep;
2364 unsigned long long position = strtoull(buf, &ep, 0);
2365
2366 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2367 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2368 position *= get_data_disks(level,
2369 info->new_layout,
2370 info->array.raid_disks);
2371 if (info->reshape_progress < position) {
2372 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2373 info->reshape_progress, position);
2374 info->reshape_progress = position;
2375 ret_val = 1;
2376 } else if (info->reshape_progress > position) {
2377 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2378 position, info->reshape_progress);
2379 ret_val = -1;
2380 } else {
2381 dprintf("Reshape position in md and metadata are the same;");
2382 ret_val = 1;
2383 }
2384 }
2385 } else if (rv == 0) {
2386 /* for valid sysfs entry, 0-length content
2387 * should be indicated as error
2388 */
2389 ret_val = -1;
2390 }
2391
2392 return ret_val;
2393 }
2394
2395 static unsigned long long choose_offset(unsigned long long lo,
2396 unsigned long long hi,
2397 unsigned long long min,
2398 unsigned long long max)
2399 {
2400 /* Choose a new offset between hi and lo.
2401 * It must be between min and max, but
2402 * we would prefer something near the middle of hi/lo, and also
2403 * prefer to be aligned to a big power of 2.
2404 *
2405 * So we start with the middle, then for each bit,
2406 * starting at '1' and increasing, if it is set, we either
2407 * add it or subtract it if possible, preferring the option
2408 * which is furthest from the boundary.
2409 *
2410 * We stop once we get a 1MB alignment. As units are in sectors,
2411 * 1MB = 2*1024 sectors.
2412 */
2413 unsigned long long choice = (lo + hi) / 2;
2414 unsigned long long bit = 1;
2415
2416 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2417 unsigned long long bigger, smaller;
2418 if (! (bit & choice))
2419 continue;
2420 bigger = choice + bit;
2421 smaller = choice - bit;
2422 if (bigger > max && smaller < min)
2423 break;
2424 if (bigger > max)
2425 choice = smaller;
2426 else if (smaller < min)
2427 choice = bigger;
2428 else if (hi - bigger > smaller - lo)
2429 choice = bigger;
2430 else
2431 choice = smaller;
2432 }
2433 return choice;
2434 }
2435
2436 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2437 char *devname, int delta_disks,
2438 unsigned long long data_offset,
2439 unsigned long long min,
2440 int can_fallback)
2441 {
2442 struct mdinfo *sd;
2443 int dir = 0;
2444 int err = 0;
2445 unsigned long long before, after;
2446
2447 /* Need to find min space before and after so same is used
2448 * on all devices
2449 */
2450 before = UINT64_MAX;
2451 after = UINT64_MAX;
2452 for (sd = sra->devs; sd; sd = sd->next) {
2453 char *dn;
2454 int dfd;
2455 int rv;
2456 struct supertype *st2;
2457 struct mdinfo info2;
2458
2459 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2460 continue;
2461 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2462 dfd = dev_open(dn, O_RDONLY);
2463 if (dfd < 0) {
2464 pr_err("%s: cannot open component %s\n",
2465 devname, dn ? dn : "-unknown-");
2466 goto release;
2467 }
2468 st2 = dup_super(st);
2469 rv = st2->ss->load_super(st2,dfd, NULL);
2470 close(dfd);
2471 if (rv) {
2472 free(st2);
2473 pr_err("%s: cannot get superblock from %s\n",
2474 devname, dn);
2475 goto release;
2476 }
2477 st2->ss->getinfo_super(st2, &info2, NULL);
2478 st2->ss->free_super(st2);
2479 free(st2);
2480 if (info2.space_before == 0 &&
2481 info2.space_after == 0) {
2482 /* Metadata doesn't support data_offset changes */
2483 if (!can_fallback)
2484 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2485 devname);
2486 goto fallback;
2487 }
2488 if (before > info2.space_before)
2489 before = info2.space_before;
2490 if (after > info2.space_after)
2491 after = info2.space_after;
2492
2493 if (data_offset != INVALID_SECTORS) {
2494 if (dir == 0) {
2495 if (info2.data_offset == data_offset) {
2496 pr_err("%s: already has that data_offset\n",
2497 dn);
2498 goto release;
2499 }
2500 if (data_offset < info2.data_offset)
2501 dir = -1;
2502 else
2503 dir = 1;
2504 } else if ((data_offset <= info2.data_offset &&
2505 dir == 1) ||
2506 (data_offset >= info2.data_offset &&
2507 dir == -1)) {
2508 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2509 dn);
2510 goto release;
2511 }
2512 }
2513 }
2514 if (before == UINT64_MAX)
2515 /* impossible really, there must be no devices */
2516 return 1;
2517
2518 for (sd = sra->devs; sd; sd = sd->next) {
2519 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2520 unsigned long long new_data_offset;
2521
2522 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2523 continue;
2524 if (delta_disks < 0) {
2525 /* Don't need any space as array is shrinking
2526 * just move data_offset up by min
2527 */
2528 if (data_offset == INVALID_SECTORS)
2529 new_data_offset = sd->data_offset + min;
2530 else {
2531 if (data_offset < sd->data_offset + min) {
2532 pr_err("--data-offset too small for %s\n",
2533 dn);
2534 goto release;
2535 }
2536 new_data_offset = data_offset;
2537 }
2538 } else if (delta_disks > 0) {
2539 /* need space before */
2540 if (before < min) {
2541 if (can_fallback)
2542 goto fallback;
2543 pr_err("Insufficient head-space for reshape on %s\n",
2544 dn);
2545 goto release;
2546 }
2547 if (data_offset == INVALID_SECTORS)
2548 new_data_offset = sd->data_offset - min;
2549 else {
2550 if (data_offset > sd->data_offset - min) {
2551 pr_err("--data-offset too large for %s\n",
2552 dn);
2553 goto release;
2554 }
2555 new_data_offset = data_offset;
2556 }
2557 } else {
2558 if (dir == 0) {
2559 /* can move up or down. If 'data_offset'
2560 * was set we would have already decided,
2561 * so just choose direction with most space.
2562 */
2563 if (before > after)
2564 dir = -1;
2565 else
2566 dir = 1;
2567 }
2568 sysfs_set_str(sra, NULL, "reshape_direction",
2569 dir == 1 ? "backwards" : "forwards");
2570 if (dir > 0) {
2571 /* Increase data offset */
2572 if (after < min) {
2573 if (can_fallback)
2574 goto fallback;
2575 pr_err("Insufficient tail-space for reshape on %s\n",
2576 dn);
2577 goto release;
2578 }
2579 if (data_offset != INVALID_SECTORS &&
2580 data_offset < sd->data_offset + min) {
2581 pr_err("--data-offset too small on %s\n",
2582 dn);
2583 goto release;
2584 }
2585 if (data_offset != INVALID_SECTORS)
2586 new_data_offset = data_offset;
2587 else
2588 new_data_offset = choose_offset(sd->data_offset,
2589 sd->data_offset + after,
2590 sd->data_offset + min,
2591 sd->data_offset + after);
2592 } else {
2593 /* Decrease data offset */
2594 if (before < min) {
2595 if (can_fallback)
2596 goto fallback;
2597 pr_err("insufficient head-room on %s\n",
2598 dn);
2599 goto release;
2600 }
2601 if (data_offset != INVALID_SECTORS &&
2602 data_offset < sd->data_offset - min) {
2603 pr_err("--data-offset too small on %s\n",
2604 dn);
2605 goto release;
2606 }
2607 if (data_offset != INVALID_SECTORS)
2608 new_data_offset = data_offset;
2609 else
2610 new_data_offset = choose_offset(sd->data_offset - before,
2611 sd->data_offset,
2612 sd->data_offset - before,
2613 sd->data_offset - min);
2614 }
2615 }
2616 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2617 if (err < 0 && errno == E2BIG) {
2618 /* try again after increasing data size to max */
2619 err = sysfs_set_num(sra, sd, "size", 0);
2620 if (err < 0 && errno == EINVAL &&
2621 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2622 /* some kernels have a bug where you cannot
2623 * use '0' on spare devices. */
2624 sysfs_set_num(sra, sd, "size",
2625 (sra->component_size + after)/2);
2626 }
2627 err = sysfs_set_num(sra, sd, "new_offset",
2628 new_data_offset);
2629 }
2630 if (err < 0) {
2631 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2632 pr_err("data-offset is too big for %s\n", dn);
2633 goto release;
2634 }
2635 if (sd == sra->devs &&
2636 (errno == ENOENT || errno == E2BIG))
2637 /* Early kernel, no 'new_offset' file,
2638 * or kernel doesn't like us.
2639 * For RAID5/6 this is not fatal
2640 */
2641 return 1;
2642 pr_err("Cannot set new_offset for %s\n", dn);
2643 break;
2644 }
2645 }
2646 return err;
2647 release:
2648 return -1;
2649 fallback:
2650 /* Just use a backup file */
2651 return 1;
2652 }
2653
2654 static int raid10_reshape(char *container, int fd, char *devname,
2655 struct supertype *st, struct mdinfo *info,
2656 struct reshape *reshape,
2657 unsigned long long data_offset,
2658 int force, int verbose)
2659 {
2660 /* Changing raid_disks, layout, chunksize or possibly
2661 * just data_offset for a RAID10.
2662 * We must always change data_offset. We change by at least
2663 * ->min_offset_change which is the largest of the old and new
2664 * chunk sizes.
2665 * If raid_disks is increasing, then data_offset must decrease
2666 * by at least this copy size.
2667 * If raid_disks is unchanged, data_offset must increase or
2668 * decrease by at least min_offset_change but preferably by much more.
2669 * We choose half of the available space.
2670 * If raid_disks is decreasing, data_offset must increase by
2671 * at least min_offset_change. To allow of this, component_size
2672 * must be decreased by the same amount.
2673 *
2674 * So we calculate the required minimum and direction, possibly
2675 * reduce the component_size, then iterate through the devices
2676 * and set the new_data_offset.
2677 * If that all works, we set chunk_size, layout, raid_disks, and start
2678 * 'reshape'
2679 */
2680 struct mdinfo *sra;
2681 unsigned long long min;
2682 int err = 0;
2683
2684 sra = sysfs_read(fd, NULL,
2685 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2686 );
2687 if (!sra) {
2688 pr_err("%s: Cannot get array details from sysfs\n", devname);
2689 goto release;
2690 }
2691 min = reshape->min_offset_change;
2692
2693 if (info->delta_disks)
2694 sysfs_set_str(sra, NULL, "reshape_direction",
2695 info->delta_disks < 0 ? "backwards" : "forwards");
2696 if (info->delta_disks < 0 && info->space_after < min) {
2697 int rv = sysfs_set_num(sra, NULL, "component_size",
2698 (sra->component_size - min)/2);
2699 if (rv) {
2700 pr_err("cannot reduce component size\n");
2701 goto release;
2702 }
2703 }
2704 err = set_new_data_offset(sra, st, devname, info->delta_disks,
2705 data_offset, min, 0);
2706 if (err == 1) {
2707 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2708 cont_err("supported on this kernel\n");
2709 err = -1;
2710 }
2711 if (err < 0)
2712 goto release;
2713
2714 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2715 err = errno;
2716 if (!err && sysfs_set_num(sra, NULL, "layout",
2717 reshape->after.layout) < 0)
2718 err = errno;
2719 if (!err &&
2720 sysfs_set_num(sra, NULL, "raid_disks",
2721 info->array.raid_disks + info->delta_disks) < 0)
2722 err = errno;
2723 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2724 err = errno;
2725 if (err) {
2726 pr_err("Cannot set array shape for %s\n",
2727 devname);
2728 if (err == EBUSY &&
2729 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2730 cont_err(" Bitmap must be removed before shape can be changed\n");
2731 goto release;
2732 }
2733 sysfs_free(sra);
2734 return 0;
2735 release:
2736 sysfs_free(sra);
2737 return 1;
2738 }
2739
2740 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2741 {
2742 struct mdinfo *sra, *sd;
2743 /* Initialisation to silence compiler warning */
2744 unsigned long long min_space_before = 0, min_space_after = 0;
2745 int first = 1;
2746
2747 sra = sysfs_read(fd, NULL, GET_DEVS);
2748 if (!sra)
2749 return;
2750 for (sd = sra->devs; sd; sd = sd->next) {
2751 char *dn;
2752 int dfd;
2753 struct supertype *st2;
2754 struct mdinfo info2;
2755
2756 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2757 continue;
2758 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2759 dfd = dev_open(dn, O_RDONLY);
2760 if (dfd < 0)
2761 break;
2762 st2 = dup_super(st);
2763 if (st2->ss->load_super(st2,dfd, NULL)) {
2764 close(dfd);
2765 free(st2);
2766 break;
2767 }
2768 close(dfd);
2769 st2->ss->getinfo_super(st2, &info2, NULL);
2770 st2->ss->free_super(st2);
2771 free(st2);
2772 if (first ||
2773 min_space_before > info2.space_before)
2774 min_space_before = info2.space_before;
2775 if (first ||
2776 min_space_after > info2.space_after)
2777 min_space_after = info2.space_after;
2778 first = 0;
2779 }
2780 if (sd == NULL && !first) {
2781 info->space_after = min_space_after;
2782 info->space_before = min_space_before;
2783 }
2784 sysfs_free(sra);
2785 }
2786
2787 static void update_cache_size(char *container, struct mdinfo *sra,
2788 struct mdinfo *info,
2789 int disks, unsigned long long blocks)
2790 {
2791 /* Check that the internal stripe cache is
2792 * large enough, or it won't work.
2793 * It must hold at least 4 stripes of the larger
2794 * chunk size
2795 */
2796 unsigned long cache;
2797 cache = max(info->array.chunk_size, info->new_chunk);
2798 cache *= 4; /* 4 stripes minimum */
2799 cache /= 512; /* convert to sectors */
2800 /* make sure there is room for 'blocks' with a bit to spare */
2801 if (cache < 16 + blocks / disks)
2802 cache = 16 + blocks / disks;
2803 cache /= (4096/512); /* Convert from sectors to pages */
2804
2805 if (sra->cache_size < cache)
2806 subarray_set_num(container, sra, "stripe_cache_size",
2807 cache+1);
2808 }
2809
2810 static int impose_reshape(struct mdinfo *sra,
2811 struct mdinfo *info,
2812 struct supertype *st,
2813 int fd,
2814 int restart,
2815 char *devname, char *container,
2816 struct reshape *reshape)
2817 {
2818 struct mdu_array_info_s array;
2819
2820 sra->new_chunk = info->new_chunk;
2821
2822 if (restart) {
2823 /* for external metadata checkpoint saved by mdmon can be lost
2824 * or missed /due to e.g. crash/. Check if md is not during
2825 * restart farther than metadata points to.
2826 * If so, this means metadata information is obsolete.
2827 */
2828 if (st->ss->external)
2829 verify_reshape_position(info, reshape->level);
2830 sra->reshape_progress = info->reshape_progress;
2831 } else {
2832 sra->reshape_progress = 0;
2833 if (reshape->after.data_disks < reshape->before.data_disks)
2834 /* start from the end of the new array */
2835 sra->reshape_progress = (sra->component_size
2836 * reshape->after.data_disks);
2837 }
2838
2839 md_get_array_info(fd, &array);
2840 if (info->array.chunk_size == info->new_chunk &&
2841 reshape->before.layout == reshape->after.layout &&
2842 st->ss->external == 0) {
2843 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2844 array.raid_disks = reshape->after.data_disks + reshape->parity;
2845 if (!restart && md_set_array_info(fd, &array) != 0) {
2846 int err = errno;
2847
2848 pr_err("Cannot set device shape for %s: %s\n",
2849 devname, strerror(errno));
2850
2851 if (err == EBUSY &&
2852 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2853 cont_err("Bitmap must be removed before shape can be changed\n");
2854
2855 goto release;
2856 }
2857 } else if (!restart) {
2858 /* set them all just in case some old 'new_*' value
2859 * persists from some earlier problem.
2860 */
2861 int err = 0;
2862 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2863 err = errno;
2864 if (!err && sysfs_set_num(sra, NULL, "layout",
2865 reshape->after.layout) < 0)
2866 err = errno;
2867 if (!err && subarray_set_num(container, sra, "raid_disks",
2868 reshape->after.data_disks +
2869 reshape->parity) < 0)
2870 err = errno;
2871 if (err) {
2872 pr_err("Cannot set device shape for %s\n", devname);
2873
2874 if (err == EBUSY &&
2875 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2876 cont_err("Bitmap must be removed before shape can be changed\n");
2877 goto release;
2878 }
2879 }
2880 return 0;
2881 release:
2882 return -1;
2883 }
2884
2885 static int impose_level(int fd, int level, char *devname, int verbose)
2886 {
2887 char *c;
2888 struct mdu_array_info_s array;
2889 struct mdinfo info;
2890
2891 if (sysfs_init(&info, fd, NULL)) {
2892 pr_err("failed to intialize sysfs.\n");
2893 return 1;
2894 }
2895
2896 md_get_array_info(fd, &array);
2897 if (level == 0 && (array.level >= 4 && array.level <= 6)) {
2898 /* To convert to RAID0 we need to fail and
2899 * remove any non-data devices. */
2900 int found = 0;
2901 int d;
2902 int data_disks = array.raid_disks - 1;
2903 if (array.level == 6)
2904 data_disks -= 1;
2905 if (array.level == 5 && array.layout != ALGORITHM_PARITY_N)
2906 return -1;
2907 if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6)
2908 return -1;
2909 sysfs_set_str(&info, NULL,"sync_action", "idle");
2910 /* First remove any spares so no recovery starts */
2911 for (d = 0, found = 0;
2912 d < MAX_DISKS && found < array.nr_disks; d++) {
2913 mdu_disk_info_t disk;
2914 disk.number = d;
2915 if (md_get_disk_info(fd, &disk) < 0)
2916 continue;
2917 if (disk.major == 0 && disk.minor == 0)
2918 continue;
2919 found++;
2920 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2921 disk.raid_disk < data_disks)
2922 /* keep this */
2923 continue;
2924 ioctl(fd, HOT_REMOVE_DISK,
2925 makedev(disk.major, disk.minor));
2926 }
2927 /* Now fail anything left */
2928 md_get_array_info(fd, &array);
2929 for (d = 0, found = 0;
2930 d < MAX_DISKS && found < array.nr_disks; d++) {
2931 mdu_disk_info_t disk;
2932 disk.number = d;
2933 if (md_get_disk_info(fd, &disk) < 0)
2934 continue;
2935 if (disk.major == 0 && disk.minor == 0)
2936 continue;
2937 found++;
2938 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2939 disk.raid_disk < data_disks)
2940 /* keep this */
2941 continue;
2942 ioctl(fd, SET_DISK_FAULTY,
2943 makedev(disk.major, disk.minor));
2944 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2945 }
2946 }
2947 c = map_num(pers, level);
2948 if (c) {
2949 int err = sysfs_set_str(&info, NULL, "level", c);
2950 if (err) {
2951 err = errno;
2952 pr_err("%s: could not set level to %s\n",
2953 devname, c);
2954 if (err == EBUSY &&
2955 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2956 cont_err("Bitmap must be removed before level can be changed\n");
2957 return err;
2958 }
2959 if (verbose >= 0)
2960 pr_err("level of %s changed to %s\n", devname, c);
2961 }
2962 return 0;
2963 }
2964
2965 int sigterm = 0;
2966 static void catch_term(int sig)
2967 {
2968 sigterm = 1;
2969 }
2970
2971 static int continue_via_systemd(char *devnm)
2972 {
2973 int skipped, i, pid, status;
2974 char pathbuf[1024];
2975 /* In a systemd/udev world, it is best to get systemd to
2976 * run "mdadm --grow --continue" rather than running in the
2977 * background.
2978 */
2979 switch(fork()) {
2980 case 0:
2981 /* FIXME yuk. CLOSE_EXEC?? */
2982 skipped = 0;
2983 for (i = 3; skipped < 20; i++)
2984 if (close(i) < 0)
2985 skipped++;
2986 else
2987 skipped = 0;
2988
2989 /* Don't want to see error messages from
2990 * systemctl. If the service doesn't exist,
2991 * we fork ourselves.
2992 */
2993 close(2);
2994 open("/dev/null", O_WRONLY);
2995 snprintf(pathbuf, sizeof(pathbuf),
2996 "mdadm-grow-continue@%s.service", devnm);
2997 status = execl("/usr/bin/systemctl", "systemctl", "restart",
2998 pathbuf, NULL);
2999 status = execl("/bin/systemctl", "systemctl", "restart",
3000 pathbuf, NULL);
3001 exit(1);
3002 case -1: /* Just do it ourselves. */
3003 break;
3004 default: /* parent - good */
3005 pid = wait(&status);
3006 if (pid >= 0 && status == 0)
3007 return 1;
3008 }
3009 return 0;
3010 }
3011
3012 static int reshape_array(char *container, int fd, char *devname,
3013 struct supertype *st, struct mdinfo *info,
3014 int force, struct mddev_dev *devlist,
3015 unsigned long long data_offset,
3016 char *backup_file, int verbose, int forked,
3017 int restart, int freeze_reshape)
3018 {
3019 struct reshape reshape;
3020 int spares_needed;
3021 char *msg;
3022 int orig_level = UnSet;
3023 int odisks;
3024 int delayed;
3025
3026 struct mdu_array_info_s array;
3027 char *c;
3028
3029 struct mddev_dev *dv;
3030 int added_disks;
3031
3032 int *fdlist = NULL;
3033 unsigned long long *offsets = NULL;
3034 int d;
3035 int nrdisks;
3036 int err;
3037 unsigned long blocks;
3038 unsigned long long array_size;
3039 int done;
3040 struct mdinfo *sra = NULL;
3041 char buf[20];
3042
3043 /* when reshaping a RAID0, the component_size might be zero.
3044 * So try to fix that up.
3045 */
3046 if (md_get_array_info(fd, &array) != 0) {
3047 dprintf("Cannot get array information.\n");
3048 goto release;
3049 }
3050 if (array.level == 0 && info->component_size == 0) {
3051 get_dev_size(fd, NULL, &array_size);
3052 info->component_size = array_size / array.raid_disks;
3053 }
3054
3055 if (array.level == 10)
3056 /* Need space_after info */
3057 get_space_after(fd, st, info);
3058
3059 if (info->reshape_active) {
3060 int new_level = info->new_level;
3061 info->new_level = UnSet;
3062 if (info->delta_disks > 0)
3063 info->array.raid_disks -= info->delta_disks;
3064 msg = analyse_change(devname, info, &reshape);
3065 info->new_level = new_level;
3066 if (info->delta_disks > 0)
3067 info->array.raid_disks += info->delta_disks;
3068 if (!restart)
3069 /* Make sure the array isn't read-only */
3070 ioctl(fd, RESTART_ARRAY_RW, 0);
3071 } else
3072 msg = analyse_change(devname, info, &reshape);
3073 if (msg) {
3074 /* if msg == "", error has already been printed */
3075 if (msg[0])
3076 pr_err("%s\n", msg);
3077 goto release;
3078 }
3079 if (restart && (reshape.level != info->array.level ||
3080 reshape.before.layout != info->array.layout ||
3081 reshape.before.data_disks + reshape.parity !=
3082 info->array.raid_disks - max(0, info->delta_disks))) {
3083 pr_err("reshape info is not in native format - cannot continue.\n");
3084 goto release;
3085 }
3086
3087 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3088 !((sysfs_get_str(info, NULL, "sync_action",
3089 buf, sizeof(buf)) > 0) &&
3090 (strncmp(buf, "reshape", 7) == 0))) {
3091 /* When reshape is restarted from '0', very begin of array
3092 * it is possible that for external metadata reshape and array
3093 * configuration doesn't happen.
3094 * Check if md has the same opinion, and reshape is restarted
3095 * from 0. If so, this is regular reshape start after reshape
3096 * switch in metadata to next array only.
3097 */
3098 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3099 (info->reshape_progress == 0))
3100 restart = 0;
3101 }
3102 if (restart) {
3103 /*
3104 * reshape already started. just skip to monitoring
3105 * the reshape
3106 */
3107 if (reshape.backup_blocks == 0)
3108 return 0;
3109 if (restart & RESHAPE_NO_BACKUP)
3110 return 0;
3111
3112 /* Need 'sra' down at 'started:' */
3113 sra = sysfs_read(fd, NULL,
3114 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|
3115 GET_CHUNK|GET_CACHE);
3116 if (!sra) {
3117 pr_err("%s: Cannot get array details from sysfs\n",
3118 devname);
3119 goto release;
3120 }
3121
3122 if (!backup_file)
3123 backup_file = locate_backup(sra->sys_name);
3124
3125 goto started;
3126 }
3127 /* The container is frozen but the array may not be.
3128 * So freeze the array so spares don't get put to the wrong use
3129 * FIXME there should probably be a cleaner separation between
3130 * freeze_array and freeze_container.
3131 */
3132 sysfs_freeze_array(info);
3133 /* Check we have enough spares to not be degraded */
3134 added_disks = 0;
3135 for (dv = devlist; dv ; dv=dv->next)
3136 added_disks++;
3137 spares_needed = max(reshape.before.data_disks,
3138 reshape.after.data_disks) +
3139 reshape.parity - array.raid_disks;
3140
3141 if (!force && info->new_level > 1 && info->array.level > 1 &&
3142 spares_needed > info->array.spare_disks + added_disks) {
3143 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3144 " Use --force to over-ride this check.\n",
3145 spares_needed,
3146 spares_needed == 1 ? "" : "s",
3147 info->array.spare_disks + added_disks);
3148 goto release;
3149 }
3150 /* Check we have enough spares to not fail */
3151 spares_needed = max(reshape.before.data_disks,
3152 reshape.after.data_disks)
3153 - array.raid_disks;
3154 if ((info->new_level > 1 || info->new_level == 0) &&
3155 spares_needed > info->array.spare_disks +added_disks) {
3156 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3157 spares_needed, spares_needed == 1 ? "" : "s",
3158 info->array.spare_disks + added_disks);
3159 goto release;
3160 }
3161
3162 if (reshape.level != array.level) {
3163 int err = impose_level(fd, reshape.level, devname, verbose);
3164 if (err)
3165 goto release;
3166 info->new_layout = UnSet; /* after level change,
3167 * layout is meaningless */
3168 orig_level = array.level;
3169 sysfs_freeze_array(info);
3170
3171 if (reshape.level > 0 && st->ss->external) {
3172 /* make sure mdmon is aware of the new level */
3173 if (mdmon_running(container))
3174 flush_mdmon(container);
3175
3176 if (!mdmon_running(container))
3177 start_mdmon(container);
3178 ping_monitor(container);
3179 if (mdmon_running(container) && st->update_tail == NULL)
3180 st->update_tail = &st->updates;
3181 }
3182 }
3183 /* ->reshape_super might have chosen some spares from the
3184 * container that it wants to be part of the new array.
3185 * We can collect them with ->container_content and give
3186 * them to the kernel.
3187 */
3188 if (st->ss->reshape_super && st->ss->container_content) {
3189 char *subarray = strchr(info->text_version+1, '/')+1;
3190 struct mdinfo *info2 =
3191 st->ss->container_content(st, subarray);
3192 struct mdinfo *d;
3193
3194 if (info2) {
3195 if (sysfs_init(info2, fd, st->devnm)) {
3196 pr_err("unable to initialize sysfs for %s\n",
3197 st->devnm);
3198 free(info2);
3199 goto release;
3200 }
3201 /* When increasing number of devices, we need to set
3202 * new raid_disks before adding these, or they might
3203 * be rejected.
3204 */
3205 if (reshape.backup_blocks &&
3206 reshape.after.data_disks >
3207 reshape.before.data_disks)
3208 subarray_set_num(container, info2, "raid_disks",
3209 reshape.after.data_disks +
3210 reshape.parity);
3211 for (d = info2->devs; d; d = d->next) {
3212 if (d->disk.state == 0 &&
3213 d->disk.raid_disk >= 0) {
3214 /* This is a spare that wants to
3215 * be part of the array.
3216 */
3217 add_disk(fd, st, info2, d);
3218 }
3219 }
3220 sysfs_free(info2);
3221 }
3222 }
3223 /* We might have been given some devices to add to the
3224 * array. Now that the array has been changed to the right
3225 * level and frozen, we can safely add them.
3226 */
3227 if (devlist) {
3228 if (Manage_subdevs(devname, fd, devlist, verbose, 0, NULL, 0))
3229 goto release;
3230 }
3231
3232 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3233 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3234 if (reshape.backup_blocks == 0) {
3235 /* No restriping needed, but we might need to impose
3236 * some more changes: layout, raid_disks, chunk_size
3237 */
3238 /* read current array info */
3239 if (md_get_array_info(fd, &array) != 0) {
3240 dprintf("Cannot get array information.\n");
3241 goto release;
3242 }
3243 /* compare current array info with new values and if
3244 * it is different update them to new */
3245 if (info->new_layout != UnSet &&
3246 info->new_layout != array.layout) {
3247 array.layout = info->new_layout;
3248 if (md_set_array_info(fd, &array) != 0) {
3249 pr_err("failed to set new layout\n");
3250 goto release;
3251 } else if (verbose >= 0)
3252 printf("layout for %s set to %d\n",
3253 devname, array.layout);
3254 }
3255 if (info->delta_disks != UnSet && info->delta_disks != 0 &&
3256 array.raid_disks !=
3257 (info->array.raid_disks + info->delta_disks)) {
3258 array.raid_disks += info->delta_disks;
3259 if (md_set_array_info(fd, &array) != 0) {
3260 pr_err("failed to set raid disks\n");
3261 goto release;
3262 } else if (verbose >= 0) {
3263 printf("raid_disks for %s set to %d\n",
3264 devname, array.raid_disks);
3265 }
3266 }
3267 if (info->new_chunk != 0 &&
3268 info->new_chunk != array.chunk_size) {
3269 if (sysfs_set_num(info, NULL,
3270 "chunk_size", info->new_chunk) != 0) {
3271 pr_err("failed to set chunk size\n");
3272 goto release;
3273 } else if (verbose >= 0)
3274 printf("chunk size for %s set to %d\n",
3275 devname, array.chunk_size);
3276 }
3277 unfreeze(st);
3278 return 0;
3279 }
3280
3281 /*
3282 * There are three possibilities.
3283 * 1/ The array will shrink.
3284 * We need to ensure the reshape will pause before reaching
3285 * the 'critical section'. We also need to fork and wait for
3286 * that to happen. When it does we
3287 * suspend/backup/complete/unfreeze
3288 *
3289 * 2/ The array will not change size.
3290 * This requires that we keep a backup of a sliding window
3291 * so that we can restore data after a crash. So we need
3292 * to fork and monitor progress.
3293 * In future we will allow the data_offset to change, so
3294 * a sliding backup becomes unnecessary.
3295 *
3296 * 3/ The array will grow. This is relatively easy.
3297 * However the kernel's restripe routines will cheerfully
3298 * overwrite some early data before it is safe. So we
3299 * need to make a backup of the early parts of the array
3300 * and be ready to restore it if rebuild aborts very early.
3301 * For externally managed metadata, we still need a forked
3302 * child to monitor the reshape and suspend IO over the region
3303 * that is being reshaped.
3304 *
3305 * We backup data by writing it to one spare, or to a
3306 * file which was given on command line.
3307 *
3308 * In each case, we first make sure that storage is available
3309 * for the required backup.
3310 * Then we:
3311 * - request the shape change.
3312 * - fork to handle backup etc.
3313 */
3314 /* Check that we can hold all the data */
3315 get_dev_size(fd, NULL, &array_size);
3316 if (reshape.new_size < (array_size/512)) {
3317 pr_err("this change will reduce the size of the array.\n"
3318 " use --grow --array-size first to truncate array.\n"
3319 " e.g. mdadm --grow %s --array-size %llu\n",
3320 devname, reshape.new_size/2);
3321 goto release;
3322 }
3323
3324 if (array.level == 10) {
3325 /* Reshaping RAID10 does not require any data backup by
3326 * user-space. Instead it requires that the data_offset
3327 * is changed to avoid the need for backup.
3328 * So this is handled very separately
3329 */
3330 if (restart)
3331 /* Nothing to do. */
3332 return 0;
3333 return raid10_reshape(container, fd, devname, st, info,
3334 &reshape, data_offset, force, verbose);
3335 }
3336 sra = sysfs_read(fd, NULL,
3337 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3338 GET_CACHE);
3339 if (!sra) {
3340 pr_err("%s: Cannot get array details from sysfs\n",
3341 devname);
3342 goto release;
3343 }
3344
3345 if (!backup_file)
3346 switch(set_new_data_offset(sra, st, devname,
3347 reshape.after.data_disks - reshape.before.data_disks,
3348 data_offset,
3349 reshape.min_offset_change, 1)) {
3350 case -1:
3351 goto release;
3352 case 0:
3353 /* Updated data_offset, so it's easy now */
3354 update_cache_size(container, sra, info,
3355 min(reshape.before.data_disks,
3356 reshape.after.data_disks),
3357 reshape.backup_blocks);
3358
3359 /* Right, everything seems fine. Let's kick things off.
3360 */
3361 sync_metadata(st);
3362
3363 if (impose_reshape(sra, info, st, fd, restart,
3364 devname, container, &reshape) < 0)
3365 goto release;
3366 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3367 struct mdinfo *sd;
3368 if (errno != EINVAL) {
3369 pr_err("Failed to initiate reshape!\n");
3370 goto release;
3371 }
3372 /* revert data_offset and try the old way */
3373 for (sd = sra->devs; sd; sd = sd->next) {
3374 sysfs_set_num(sra, sd, "new_offset",
3375 sd->data_offset);
3376 sysfs_set_str(sra, NULL, "reshape_direction",
3377 "forwards");
3378 }
3379 break;
3380 }
3381 if (info->new_level == reshape.level)
3382 return 0;
3383 /* need to adjust level when reshape completes */
3384 switch(fork()) {
3385 case -1: /* ignore error, but don't wait */
3386 return 0;
3387 default: /* parent */
3388 return 0;
3389 case 0:
3390 map_fork();
3391 break;
3392 }
3393 close(fd);
3394 wait_reshape(sra);
3395 fd = open_dev(sra->sys_name);
3396 if (fd >= 0)
3397 impose_level(fd, info->new_level, devname, verbose);
3398 return 0;
3399 case 1: /* Couldn't set data_offset, try the old way */
3400 if (data_offset != INVALID_SECTORS) {
3401 pr_err("Cannot update data_offset on this array\n");
3402 goto release;
3403 }
3404 break;
3405 }
3406
3407 started:
3408 /* Decide how many blocks (sectors) for a reshape
3409 * unit. The number we have so far is just a minimum
3410 */
3411 blocks = reshape.backup_blocks;
3412 if (reshape.before.data_disks ==
3413 reshape.after.data_disks) {
3414 /* Make 'blocks' bigger for better throughput, but
3415 * not so big that we reject it below.
3416 * Try for 16 megabytes
3417 */
3418 while (blocks * 32 < sra->component_size && blocks < 16*1024*2)
3419 blocks *= 2;
3420 } else
3421 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3422
3423 if (blocks >= sra->component_size/2) {
3424 pr_err("%s: Something wrong - reshape aborted\n", devname);
3425 goto release;
3426 }
3427
3428 /* Now we need to open all these devices so we can read/write.
3429 */
3430 nrdisks = max(reshape.before.data_disks,
3431 reshape.after.data_disks) + reshape.parity
3432 + sra->array.spare_disks;
3433 fdlist = xcalloc((1+nrdisks), sizeof(int));
3434 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3435
3436 odisks = reshape.before.data_disks + reshape.parity;
3437 d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks,
3438 backup_file, fdlist, offsets);
3439 if (d < odisks) {
3440 goto release;
3441 }
3442 if ((st->ss->manage_reshape == NULL) ||
3443 (st->ss->recover_backup == NULL)) {
3444 if (backup_file == NULL) {
3445 if (reshape.after.data_disks <=
3446 reshape.before.data_disks) {
3447 pr_err("%s: Cannot grow - need backup-file\n",
3448 devname);
3449 pr_err(" Please provide one with \"--backup=...\"\n");
3450 goto release;
3451 } else if (d == odisks) {
3452 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3453 goto release;
3454 }
3455 } else {
3456 if (!reshape_open_backup_file(backup_file, fd, devname,
3457 (signed)blocks,
3458 fdlist+d, offsets+d,
3459 sra->sys_name, restart)) {
3460 goto release;
3461 }
3462 d++;
3463 }
3464 }
3465
3466 update_cache_size(container, sra, info,
3467 min(reshape.before.data_disks,
3468 reshape.after.data_disks), blocks);
3469
3470 /* Right, everything seems fine. Let's kick things off.
3471 * If only changing raid_disks, use ioctl, else use
3472 * sysfs.
3473 */
3474 sync_metadata(st);
3475
3476 if (impose_reshape(sra, info, st, fd, restart,
3477 devname, container, &reshape) < 0)
3478 goto release;
3479
3480 err = start_reshape(sra, restart, reshape.before.data_disks,
3481 reshape.after.data_disks);
3482 if (err) {
3483 pr_err("Cannot %s reshape for %s\n",
3484 restart ? "continue" : "start", devname);
3485 goto release;
3486 }
3487 if (restart)
3488 sysfs_set_str(sra, NULL, "array_state", "active");
3489 if (freeze_reshape) {
3490 free(fdlist);
3491 free(offsets);
3492 sysfs_free(sra);
3493 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3494 sra->reshape_progress);
3495 return 1;
3496 }
3497
3498 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3499 if (continue_via_systemd(container ?: sra->sys_name)) {
3500 free(fdlist);
3501 free(offsets);
3502 sysfs_free(sra);
3503 return 0;
3504 }
3505
3506 /* Now we just need to kick off the reshape and watch, while
3507 * handling backups of the data...
3508 * This is all done by a forked background process.
3509 */
3510 switch(forked ? 0 : fork()) {
3511 case -1:
3512 pr_err("Cannot run child to monitor reshape: %s\n",
3513 strerror(errno));
3514 abort_reshape(sra);
3515 goto release;
3516 default:
3517 free(fdlist);
3518 free(offsets);
3519 sysfs_free(sra);
3520 return 0;
3521 case 0:
3522 map_fork();
3523 break;
3524 }
3525
3526 /* If another array on the same devices is busy, the
3527 * reshape will wait for them. This would mean that
3528 * the first section that we suspend will stay suspended
3529 * for a long time. So check on that possibility
3530 * by looking for "DELAYED" in /proc/mdstat, and if found,
3531 * wait a while
3532 */
3533 do {
3534 struct mdstat_ent *mds, *m;
3535 delayed = 0;
3536 mds = mdstat_read(1, 0);
3537 for (m = mds; m; m = m->next)
3538 if (strcmp(m->devnm, sra->sys_name) == 0) {
3539 if (m->resync && m->percent == RESYNC_DELAYED)
3540 delayed = 1;
3541 if (m->resync == 0)
3542 /* Haven't started the reshape thread
3543 * yet, wait a bit
3544 */
3545 delayed = 2;
3546 break;
3547 }
3548 free_mdstat(mds);
3549 if (delayed == 1 && get_linux_version() < 3007000) {
3550 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3551 " You might experience problems until other reshapes complete.\n");
3552 delayed = 0;
3553 }
3554 if (delayed)
3555 mdstat_wait(30 - (delayed-1) * 25);
3556 } while (delayed);
3557 mdstat_close();
3558 close(fd);
3559 if (check_env("MDADM_GROW_VERIFY"))
3560 fd = open(devname, O_RDONLY | O_DIRECT);
3561 else
3562 fd = -1;
3563 mlockall(MCL_FUTURE);
3564
3565 signal(SIGTERM, catch_term);
3566
3567 if (st->ss->external) {
3568 /* metadata handler takes it from here */
3569 done = st->ss->manage_reshape(
3570 fd, sra, &reshape, st, blocks,
3571 fdlist, offsets, d - odisks, fdlist + odisks,
3572 offsets + odisks);
3573 } else
3574 done = child_monitor(
3575 fd, sra, &reshape, st, blocks, fdlist, offsets,
3576 d - odisks, fdlist + odisks, offsets + odisks);
3577
3578 free(fdlist);
3579 free(offsets);
3580
3581 if (backup_file && done) {
3582 char *bul;
3583 bul = make_backup(sra->sys_name);
3584 if (bul) {
3585 char buf[1024];
3586 int l = readlink(bul, buf, sizeof(buf) - 1);
3587 if (l > 0) {
3588 buf[l]=0;
3589 unlink(buf);
3590 }
3591 unlink(bul);
3592 free(bul);
3593 }
3594 unlink(backup_file);
3595 }
3596 if (!done) {
3597 abort_reshape(sra);
3598 goto out;
3599 }
3600
3601 if (!st->ss->external &&
3602 !(reshape.before.data_disks != reshape.after.data_disks &&
3603 info->custom_array_size) && info->new_level == reshape.level &&
3604 !forked) {
3605 /* no need to wait for the reshape to finish as
3606 * there is nothing more to do.
3607 */
3608 sysfs_free(sra);
3609 exit(0);
3610 }
3611 wait_reshape(sra);
3612
3613 if (st->ss->external) {
3614 /* Re-load the metadata as much could have changed */
3615 int cfd = open_dev(st->container_devnm);
3616 if (cfd >= 0) {
3617 flush_mdmon(container);
3618 st->ss->free_super(st);
3619 st->ss->load_container(st, cfd, container);
3620 close(cfd);
3621 }
3622 }
3623
3624 /* set new array size if required customer_array_size is used
3625 * by this metadata.
3626 */
3627 if (reshape.before.data_disks != reshape.after.data_disks &&
3628 info->custom_array_size)
3629 set_array_size(st, info, info->text_version);
3630
3631 if (info->new_level != reshape.level) {
3632 if (fd < 0)
3633 fd = open(devname, O_RDONLY);
3634 impose_level(fd, info->new_level, devname, verbose);
3635 close(fd);
3636 if (info->new_level == 0)
3637 st->update_tail = NULL;
3638 }
3639 out:
3640 sysfs_free(sra);
3641 if (forked)
3642 return 0;
3643 unfreeze(st);
3644 exit(0);
3645
3646 release:
3647 free(fdlist);
3648 free(offsets);
3649 if (orig_level != UnSet && sra) {
3650 c = map_num(pers, orig_level);
3651 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3652 pr_err("aborting level change\n");
3653 }
3654 sysfs_free(sra);
3655 if (!forked)
3656 unfreeze(st);
3657 return 1;
3658 }
3659
3660 /* mdfd handle is passed to be closed in child process (after fork).
3661 */
3662 int reshape_container(char *container, char *devname,
3663 int mdfd,
3664 struct supertype *st,
3665 struct mdinfo *info,
3666 int force,
3667 char *backup_file, int verbose,
3668 int forked, int restart, int freeze_reshape)
3669 {
3670 struct mdinfo *cc = NULL;
3671 int rv = restart;
3672 char last_devnm[32] = "";
3673
3674 /* component_size is not meaningful for a container,
3675 * so pass '0' meaning 'no change'
3676 */
3677 if (!restart &&
3678 reshape_super(st, 0, info->new_level,
3679 info->new_layout, info->new_chunk,
3680 info->array.raid_disks, info->delta_disks,
3681 backup_file, devname, APPLY_METADATA_CHANGES,
3682 verbose)) {
3683 unfreeze(st);
3684 return 1;
3685 }
3686
3687 sync_metadata(st);
3688
3689 /* ping monitor to be sure that update is on disk
3690 */
3691 ping_monitor(container);
3692
3693 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3694 if (continue_via_systemd(container))
3695 return 0;
3696
3697 switch (forked ? 0 : fork()) {
3698 case -1: /* error */
3699 perror("Cannot fork to complete reshape\n");
3700 unfreeze(st);
3701 return 1;
3702 default: /* parent */
3703 if (!freeze_reshape)
3704 printf("%s: multi-array reshape continues in background\n", Name);
3705 return 0;
3706 case 0: /* child */
3707 map_fork();
3708 break;
3709 }
3710
3711 /* close unused handle in child process
3712 */
3713 if (mdfd > -1)
3714 close(mdfd);
3715
3716 while(1) {
3717 /* For each member array with reshape_active,
3718 * we need to perform the reshape.
3719 * We pick the first array that needs reshaping and
3720 * reshape it. reshape_array() will re-read the metadata
3721 * so the next time through a different array should be
3722 * ready for reshape.
3723 * It is possible that the 'different' array will not
3724 * be assembled yet. In that case we simple exit.
3725 * When it is assembled, the mdadm which assembles it
3726 * will take over the reshape.
3727 */
3728 struct mdinfo *content;
3729 int fd;
3730 struct mdstat_ent *mdstat;
3731 char *adev;
3732 dev_t devid;
3733
3734 sysfs_free(cc);
3735
3736 cc = st->ss->container_content(st, NULL);
3737
3738 for (content = cc; content ; content = content->next) {
3739 char *subarray;
3740 if (!content->reshape_active)
3741 continue;
3742
3743 subarray = strchr(content->text_version+1, '/')+1;
3744 mdstat = mdstat_by_subdev(subarray, container);
3745 if (!mdstat)
3746 continue;
3747 if (mdstat->active == 0) {
3748 pr_err("Skipping inactive array %s.\n",
3749 mdstat->devnm);
3750 free_mdstat(mdstat);
3751 mdstat = NULL;
3752 continue;
3753 }
3754 break;
3755 }
3756 if (!content)
3757 break;
3758
3759 devid = devnm2devid(mdstat->devnm);
3760 adev = map_dev(major(devid), minor(devid), 0);
3761 if (!adev)
3762 adev = content->text_version;
3763
3764 fd = open_dev(mdstat->devnm);
3765 if (fd < 0) {
3766 pr_err("Device %s cannot be opened for reshape.\n",
3767 adev);
3768 break;
3769 }
3770
3771 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3772 /* Do not allow for multiple reshape_array() calls for
3773 * the same array.
3774 * It can happen when reshape_array() returns without
3775 * error, when reshape is not finished (wrong reshape
3776 * starting/continuation conditions). Mdmon doesn't
3777 * switch to next array in container and reentry
3778 * conditions for the same array occur.
3779 * This is possibly interim until the behaviour of
3780 * reshape_array is resolved().
3781 */
3782 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3783 close(fd);
3784 break;
3785 }
3786 strcpy(last_devnm, mdstat->devnm);
3787
3788 if (sysfs_init(content, fd, mdstat->devnm)) {
3789 pr_err("Unable to initialize sysfs for %s\n",
3790 mdstat->devnm);
3791 rv = 1;
3792 break;
3793 }
3794
3795 if (mdmon_running(container))
3796 flush_mdmon(container);
3797
3798 rv = reshape_array(container, fd, adev, st,
3799 content, force, NULL, INVALID_SECTORS,
3800 backup_file, verbose, 1, restart,
3801 freeze_reshape);
3802 close(fd);
3803
3804 if (freeze_reshape) {
3805 sysfs_free(cc);
3806 exit(0);
3807 }
3808
3809 restart = 0;
3810 if (rv)
3811 break;
3812
3813 if (mdmon_running(container))
3814 flush_mdmon(container);
3815 }
3816 if (!rv)
3817 unfreeze(st);
3818 sysfs_free(cc);
3819 exit(0);
3820 }
3821
3822 /*
3823 * We run a child process in the background which performs the following
3824 * steps:
3825 * - wait for resync to reach a certain point
3826 * - suspend io to the following section
3827 * - backup that section
3828 * - allow resync to proceed further
3829 * - resume io
3830 * - discard the backup.
3831 *
3832 * When are combined in slightly different ways in the three cases.
3833 * Grow:
3834 * - suspend/backup/allow/wait/resume/discard
3835 * Shrink:
3836 * - allow/wait/suspend/backup/allow/wait/resume/discard
3837 * same-size:
3838 * - wait/resume/discard/suspend/backup/allow
3839 *
3840 * suspend/backup/allow always come together
3841 * wait/resume/discard do too.
3842 * For the same-size case we have two backups to improve flow.
3843 *
3844 */
3845
3846 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3847 unsigned long long backup_point,
3848 unsigned long long wait_point,
3849 unsigned long long *suspend_point,
3850 unsigned long long *reshape_completed, int *frozen)
3851 {
3852 /* This function is called repeatedly by the reshape manager.
3853 * It determines how much progress can safely be made and allows
3854 * that progress.
3855 * - 'info' identifies the array and particularly records in
3856 * ->reshape_progress the metadata's knowledge of progress
3857 * This is a sector offset from the start of the array
3858 * of the next array block to be relocated. This number
3859 * may increase from 0 or decrease from array_size, depending
3860 * on the type of reshape that is happening.
3861 * Note that in contrast, 'sync_completed' is a block count of the
3862 * reshape so far. It gives the distance between the start point
3863 * (head or tail of device) and the next place that data will be
3864 * written. It always increases.
3865 * - 'reshape' is the structure created by analyse_change
3866 * - 'backup_point' shows how much the metadata manager has backed-up
3867 * data. For reshapes with increasing progress, it is the next address
3868 * to be backed up, previous addresses have been backed-up. For
3869 * decreasing progress, it is the earliest address that has been
3870 * backed up - later address are also backed up.
3871 * So addresses between reshape_progress and backup_point are
3872 * backed up providing those are in the 'correct' order.
3873 * - 'wait_point' is an array address. When reshape_completed
3874 * passes this point, progress_reshape should return. It might
3875 * return earlier if it determines that ->reshape_progress needs
3876 * to be updated or further backup is needed.
3877 * - suspend_point is maintained by progress_reshape and the caller
3878 * should not touch it except to initialise to zero.
3879 * It is an array address and it only increases in 2.6.37 and earlier.
3880 * This makes it difficult to handle reducing reshapes with
3881 * external metadata.
3882 * However: it is similar to backup_point in that it records the
3883 * other end of a suspended region from reshape_progress.
3884 * it is moved to extend the region that is safe to backup and/or
3885 * reshape
3886 * - reshape_completed is read from sysfs and returned. The caller
3887 * should copy this into ->reshape_progress when it has reason to
3888 * believe that the metadata knows this, and any backup outside this
3889 * has been erased.
3890 *
3891 * Return value is:
3892 * 1 if more data from backup_point - but only as far as suspend_point,
3893 * should be backed up
3894 * 0 if things are progressing smoothly
3895 * -1 if the reshape is finished because it is all done,
3896 * -2 if the reshape is finished due to an error.
3897 */
3898
3899 int advancing = (reshape->after.data_disks
3900 >= reshape->before.data_disks);
3901 unsigned long long need_backup; /* All data between start of array and
3902 * here will at some point need to
3903 * be backed up.
3904 */
3905 unsigned long long read_offset, write_offset;
3906 unsigned long long write_range;
3907 unsigned long long max_progress, target, completed;
3908 unsigned long long array_size = (info->component_size
3909 * reshape->before.data_disks);
3910 int fd;
3911 char buf[20];
3912
3913 /* First, we unsuspend any region that is now known to be safe.
3914 * If suspend_point is on the 'wrong' side of reshape_progress, then
3915 * we don't have or need suspension at the moment. This is true for
3916 * native metadata when we don't need to back-up.
3917 */
3918 if (advancing) {
3919 if (info->reshape_progress <= *suspend_point)
3920 sysfs_set_num(info, NULL, "suspend_lo",
3921 info->reshape_progress);
3922 } else {
3923 /* Note: this won't work in 2.6.37 and before.
3924 * Something somewhere should make sure we don't need it!
3925 */
3926 if (info->reshape_progress >= *suspend_point)
3927 sysfs_set_num(info, NULL, "suspend_hi",
3928 info->reshape_progress);
3929 }
3930
3931 /* Now work out how far it is safe to progress.
3932 * If the read_offset for ->reshape_progress is less than
3933 * 'blocks' beyond the write_offset, we can only progress as far
3934 * as a backup.
3935 * Otherwise we can progress until the write_offset for the new location
3936 * reaches (within 'blocks' of) the read_offset at the current location.
3937 * However that region must be suspended unless we are using native
3938 * metadata.
3939 * If we need to suspend more, we limit it to 128M per device, which is
3940 * rather arbitrary and should be some time-based calculation.
3941 */
3942 read_offset = info->reshape_progress / reshape->before.data_disks;
3943 write_offset = info->reshape_progress / reshape->after.data_disks;
3944 write_range = info->new_chunk/512;
3945 if (reshape->before.data_disks == reshape->after.data_disks)
3946 need_backup = array_size;
3947 else
3948 need_backup = reshape->backup_blocks;
3949 if (advancing) {
3950 if (read_offset < write_offset + write_range)
3951 max_progress = backup_point;
3952 else
3953 max_progress =
3954 read_offset * reshape->after.data_disks;
3955 } else {
3956 if (read_offset > write_offset - write_range)
3957 /* Can only progress as far as has been backed up,
3958 * which must be suspended */
3959 max_progress = backup_point;
3960 else if (info->reshape_progress <= need_backup)
3961 max_progress = backup_point;
3962 else {
3963 if (info->array.major_version >= 0)
3964 /* Can progress until backup is needed */
3965 max_progress = need_backup;
3966 else {
3967 /* Can progress until metadata update is required */
3968 max_progress =
3969 read_offset * reshape->after.data_disks;
3970 /* but data must be suspended */
3971 if (max_progress < *suspend_point)
3972 max_progress = *suspend_point;
3973 }
3974 }
3975 }
3976
3977 /* We know it is safe to progress to 'max_progress' providing
3978 * it is suspended or we are using native metadata.
3979 * Consider extending suspend_point 128M per device if it
3980 * is less than 64M per device beyond reshape_progress.
3981 * But always do a multiple of 'blocks'
3982 * FIXME this is too big - it takes to long to complete
3983 * this much.
3984 */
3985 target = 64*1024*2 * min(reshape->before.data_disks,
3986 reshape->after.data_disks);
3987 target /= reshape->backup_blocks;
3988 if (target < 2)
3989 target = 2;
3990 target *= reshape->backup_blocks;
3991
3992 /* For externally managed metadata we always need to suspend IO to
3993 * the area being reshaped so we regularly push suspend_point forward.
3994 * For native metadata we only need the suspend if we are going to do
3995 * a backup.
3996 */
3997 if (advancing) {
3998 if ((need_backup > info->reshape_progress ||
3999 info->array.major_version < 0) &&
4000 *suspend_point < info->reshape_progress + target) {
4001 if (need_backup < *suspend_point + 2 * target)
4002 *suspend_point = need_backup;
4003 else if (*suspend_point + 2 * target < array_size)
4004 *suspend_point += 2 * target;
4005 else
4006 *suspend_point = array_size;
4007 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4008 if (max_progress > *suspend_point)
4009 max_progress = *suspend_point;
4010 }
4011 } else {
4012 if (info->array.major_version >= 0) {
4013 /* Only need to suspend when about to backup */
4014 if (info->reshape_progress < need_backup * 2 &&
4015 *suspend_point > 0) {
4016 *suspend_point = 0;
4017 sysfs_set_num(info, NULL, "suspend_lo", 0);
4018 sysfs_set_num(info, NULL, "suspend_hi",
4019 need_backup);
4020 }
4021 } else {
4022 /* Need to suspend continually */
4023 if (info->reshape_progress < *suspend_point)
4024 *suspend_point = info->reshape_progress;
4025 if (*suspend_point + target < info->reshape_progress)
4026 /* No need to move suspend region yet */;
4027 else {
4028 if (*suspend_point >= 2 * target)
4029 *suspend_point -= 2 * target;
4030 else
4031 *suspend_point = 0;
4032 sysfs_set_num(info, NULL, "suspend_lo",
4033 *suspend_point);
4034 }
4035 if (max_progress < *suspend_point)
4036 max_progress = *suspend_point;
4037 }
4038 }
4039
4040 /* now set sync_max to allow that progress. sync_max, like
4041 * sync_completed is a count of sectors written per device, so
4042 * we find the difference between max_progress and the start point,
4043 * and divide that by after.data_disks to get a sync_max
4044 * number.
4045 * At the same time we convert wait_point to a similar number
4046 * for comparing against sync_completed.
4047 */
4048 /* scale down max_progress to per_disk */
4049 max_progress /= reshape->after.data_disks;
4050 /*
4051 * Round to chunk size as some kernels give an erroneously
4052 * high number
4053 */
4054 max_progress /= info->new_chunk/512;
4055 max_progress *= info->new_chunk/512;
4056 /* And round to old chunk size as the kernel wants that */
4057 max_progress /= info->array.chunk_size/512;
4058 max_progress *= info->array.chunk_size/512;
4059 /* Limit progress to the whole device */
4060 if (max_progress > info->component_size)
4061 max_progress = info->component_size;
4062 wait_point /= reshape->after.data_disks;
4063 if (!advancing) {
4064 /* switch from 'device offset' to 'processed block count' */
4065 max_progress = info->component_size - max_progress;
4066 wait_point = info->component_size - wait_point;
4067 }
4068
4069 if (!*frozen)
4070 sysfs_set_num(info, NULL, "sync_max", max_progress);
4071
4072 /* Now wait. If we have already reached the point that we were
4073 * asked to wait to, don't wait at all, else wait for any change.
4074 * We need to select on 'sync_completed' as that is the place that
4075 * notifications happen, but we are really interested in
4076 * 'reshape_position'
4077 */
4078 fd = sysfs_get_fd(info, NULL, "sync_completed");
4079 if (fd < 0)
4080 goto check_progress;
4081
4082 if (sysfs_fd_get_ll(fd, &completed) < 0)
4083 goto check_progress;
4084
4085 while (completed < max_progress && completed < wait_point) {
4086 /* Check that sync_action is still 'reshape' to avoid
4087 * waiting forever on a dead array
4088 */
4089 char action[20];
4090 if (sysfs_get_str(info, NULL, "sync_action", action, 20) <= 0 ||
4091 strncmp(action, "reshape", 7) != 0)
4092 break;
4093 /* Some kernels reset 'sync_completed' to zero
4094 * before setting 'sync_action' to 'idle'.
4095 * So we need these extra tests.
4096 */
4097 if (completed == 0 && advancing &&
4098 strncmp(action, "idle", 4) == 0 &&
4099 info->reshape_progress > 0)
4100 break;
4101 if (completed == 0 && !advancing &&
4102 strncmp(action, "idle", 4) == 0 &&
4103 info->reshape_progress <
4104 (info->component_size * reshape->after.data_disks))
4105 break;
4106 sysfs_wait(fd, NULL);
4107 if (sysfs_fd_get_ll(fd, &completed) < 0)
4108 goto check_progress;
4109 }
4110 /* Some kernels reset 'sync_completed' to zero,
4111 * we need to have real point we are in md.
4112 * So in that case, read 'reshape_position' from sysfs.
4113 */
4114 if (completed == 0) {
4115 unsigned long long reshapep;
4116 char action[20];
4117 if (sysfs_get_str(info, NULL, "sync_action", action, 20) > 0 &&
4118 strncmp(action, "idle", 4) == 0 &&
4119 sysfs_get_ll(info, NULL,
4120 "reshape_position", &reshapep) == 0)
4121 *reshape_completed = reshapep;
4122 } else {
4123 /* some kernels can give an incorrectly high
4124 * 'completed' number, so round down */
4125 completed /= (info->new_chunk/512);
4126 completed *= (info->new_chunk/512);
4127 /* Convert 'completed' back in to a 'progress' number */
4128 completed *= reshape->after.data_disks;
4129 if (!advancing)
4130 completed = (info->component_size
4131 * reshape->after.data_disks
4132 - completed);
4133 *reshape_completed = completed;
4134 }
4135
4136 close(fd);
4137
4138 /* We return the need_backup flag. Caller will decide
4139 * how much - a multiple of ->backup_blocks up to *suspend_point
4140 */
4141 if (advancing)
4142 return need_backup > info->reshape_progress;
4143 else
4144 return need_backup >= info->reshape_progress;
4145
4146 check_progress:
4147 /* if we couldn't read a number from sync_completed, then
4148 * either the reshape did complete, or it aborted.
4149 * We can tell which by checking for 'none' in reshape_position.
4150 * If it did abort, then it might immediately restart if it
4151 * it was just a device failure that leaves us degraded but
4152 * functioning.
4153 */
4154 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4155 sizeof(buf)) < 0 || strncmp(buf, "none", 4) != 0) {
4156 /* The abort might only be temporary. Wait up to 10
4157 * seconds for fd to contain a valid number again.
4158 */
4159 int wait = 10000;
4160 int rv = -2;
4161 unsigned long long new_sync_max;
4162 while (fd >= 0 && rv < 0 && wait > 0) {
4163 if (sysfs_wait(fd, &wait) != 1)
4164 break;
4165 switch (sysfs_fd_get_ll(fd, &completed)) {
4166 case 0:
4167 /* all good again */
4168 rv = 1;
4169 /* If "sync_max" is no longer max_progress
4170 * we need to freeze things
4171 */
4172 sysfs_get_ll(info, NULL, "sync_max",
4173 &new_sync_max);
4174 *frozen = (new_sync_max != max_progress);
4175 break;
4176 case -2: /* read error - abort */
4177 wait = 0;
4178 break;
4179 }
4180 }
4181 if (fd >= 0)
4182 close(fd);
4183 return rv; /* abort */
4184 } else {
4185 /* Maybe racing with array shutdown - check state */
4186 if (fd >= 0)
4187 close(fd);
4188 if (sysfs_get_str(info, NULL, "array_state", buf,
4189 sizeof(buf)) < 0 ||
4190 strncmp(buf, "inactive", 8) == 0 ||
4191 strncmp(buf, "clear",5) == 0)
4192 return -2; /* abort */
4193 return -1; /* complete */
4194 }
4195 }
4196
4197 /* FIXME return status is never checked */
4198 static int grow_backup(struct mdinfo *sra,
4199 unsigned long long offset, /* per device */
4200 unsigned long stripes, /* per device, in old chunks */
4201 int *sources, unsigned long long *offsets,
4202 int disks, int chunk, int level, int layout,
4203 int dests, int *destfd, unsigned long long *destoffsets,
4204 int part, int *degraded,
4205 char *buf)
4206 {
4207 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4208 * to storage 'destfd' (offset 'destoffsets'), after first
4209 * suspending IO. Then allow resync to continue
4210 * over the suspended section.
4211 * Use part 'part' of the backup-super-block.
4212 */
4213 int odata = disks;
4214 int rv = 0;
4215 int i;
4216 unsigned long long ll;
4217 int new_degraded;
4218 //printf("offset %llu\n", offset);
4219 if (level >= 4)
4220 odata--;
4221 if (level == 6)
4222 odata--;
4223
4224 /* Check that array hasn't become degraded, else we might backup the wrong data */
4225 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4226 return -1; /* FIXME this error is ignored */
4227 new_degraded = (int)ll;
4228 if (new_degraded != *degraded) {
4229 /* check each device to ensure it is still working */
4230 struct mdinfo *sd;
4231 for (sd = sra->devs ; sd ; sd = sd->next) {
4232 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4233 continue;
4234 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4235 char sbuf[100];
4236
4237 if (sysfs_get_str(sra, sd, "state",
4238 sbuf, sizeof(sbuf)) < 0 ||
4239 strstr(sbuf, "faulty") ||
4240 strstr(sbuf, "in_sync") == NULL) {
4241 /* this device is dead */
4242 sd->disk.state = (1<<MD_DISK_FAULTY);
4243 if (sd->disk.raid_disk >= 0 &&
4244 sources[sd->disk.raid_disk] >= 0) {
4245 close(sources[sd->disk.raid_disk]);
4246 sources[sd->disk.raid_disk] = -1;
4247 }
4248 }
4249 }
4250 }
4251 *degraded = new_degraded;
4252 }
4253 if (part) {
4254 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4255 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4256 } else {
4257 bsb.arraystart = __cpu_to_le64(offset * odata);
4258 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4259 }
4260 if (part)
4261 bsb.magic[15] = '2';
4262 for (i = 0; i < dests; i++)
4263 if (part)
4264 lseek64(destfd[i], destoffsets[i] +
4265 __le64_to_cpu(bsb.devstart2)*512, 0);
4266 else
4267 lseek64(destfd[i], destoffsets[i], 0);
4268
4269 rv = save_stripes(sources, offsets, disks, chunk, level, layout,
4270 dests, destfd, offset * 512 * odata,
4271 stripes * chunk * odata, buf);
4272
4273 if (rv)
4274 return rv;
4275 bsb.mtime = __cpu_to_le64(time(0));
4276 for (i = 0; i < dests; i++) {
4277 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4278
4279 bsb.sb_csum = bsb_csum((char*)&bsb,
4280 ((char*)&bsb.sb_csum)-((char*)&bsb));
4281 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4282 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4283 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4284
4285 rv = -1;
4286 if ((unsigned long long)lseek64(destfd[i],
4287 destoffsets[i] - 4096, 0) !=
4288 destoffsets[i] - 4096)
4289 break;
4290 if (write(destfd[i], &bsb, 512) != 512)
4291 break;
4292 if (destoffsets[i] > 4096) {
4293 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4294 destoffsets[i]+stripes*chunk*odata)
4295 break;
4296 if (write(destfd[i], &bsb, 512) != 512)
4297 break;
4298 }
4299 fsync(destfd[i]);
4300 rv = 0;
4301 }
4302
4303 return rv;
4304 }
4305
4306 /* in 2.6.30, the value reported by sync_completed can be
4307 * less that it should be by one stripe.
4308 * This only happens when reshape hits sync_max and pauses.
4309 * So allow wait_backup to either extent sync_max further
4310 * than strictly necessary, or return before the
4311 * sync has got quite as far as we would really like.
4312 * This is what 'blocks2' is for.
4313 * The various caller give appropriate values so that
4314 * every works.
4315 */
4316 /* FIXME return value is often ignored */
4317 static int forget_backup(int dests, int *destfd,
4318 unsigned long long *destoffsets,
4319 int part)
4320 {
4321 /*
4322 * Erase backup 'part' (which is 0 or 1)
4323 */
4324 int i;
4325 int rv;
4326
4327 if (part) {
4328 bsb.arraystart2 = __cpu_to_le64(0);
4329 bsb.length2 = __cpu_to_le64(0);
4330 } else {
4331 bsb.arraystart = __cpu_to_le64(0);
4332 bsb.length = __cpu_to_le64(0);
4333 }
4334 bsb.mtime = __cpu_to_le64(time(0));
4335 rv = 0;
4336 for (i = 0; i < dests; i++) {
4337 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4338 bsb.sb_csum = bsb_csum((char*)&bsb,
4339 ((char*)&bsb.sb_csum)-((char*)&bsb));
4340 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4341 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4342 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4343 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4344 destoffsets[i]-4096)
4345 rv = -1;
4346 if (rv == 0 && write(destfd[i], &bsb, 512) != 512)
4347 rv = -1;
4348 fsync(destfd[i]);
4349 }
4350 return rv;
4351 }
4352
4353 static void fail(char *msg)
4354 {
4355 int rv;
4356 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4357 rv |= (write(2, "\n", 1) != 1);
4358 exit(rv ? 1 : 2);
4359 }
4360
4361 static char *abuf, *bbuf;
4362 static unsigned long long abuflen;
4363 static void validate(int afd, int bfd, unsigned long long offset)
4364 {
4365 /* check that the data in the backup against the array.
4366 * This is only used for regression testing and should not
4367 * be used while the array is active
4368 */
4369 if (afd < 0)
4370 return;
4371 lseek64(bfd, offset - 4096, 0);
4372 if (read(bfd, &bsb2, 512) != 512)
4373 fail("cannot read bsb");
4374 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4375 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4376 fail("first csum bad");
4377 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4378 fail("magic is bad");
4379 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4380 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4381 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4382 fail("second csum bad");
4383
4384 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4385 fail("devstart is wrong");
4386
4387 if (bsb2.length) {
4388 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4389
4390 if (abuflen < len) {
4391 free(abuf);
4392 free(bbuf);
4393 abuflen = len;
4394 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4395 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4396 abuflen = 0;
4397 /* just stop validating on mem-alloc failure */
4398 return;
4399 }
4400 }
4401
4402 lseek64(bfd, offset, 0);
4403 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4404 //printf("len %llu\n", len);
4405 fail("read first backup failed");
4406 }
4407 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4408 if ((unsigned long long)read(afd, abuf, len) != len)
4409 fail("read first from array failed");
4410 if (memcmp(bbuf, abuf, len) != 0) {
4411 #if 0
4412 int i;
4413 printf("offset=%llu len=%llu\n",
4414 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4415 for (i=0; i<len; i++)
4416 if (bbuf[i] != abuf[i]) {
4417 printf("first diff byte %d\n", i);
4418 break;
4419 }
4420 #endif
4421 fail("data1 compare failed");
4422 }
4423 }
4424 if (bsb2.length2) {
4425 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4426
4427 if (abuflen < len) {
4428 free(abuf);
4429 free(bbuf);
4430 abuflen = len;
4431 abuf = xmalloc(abuflen);
4432 bbuf = xmalloc(abuflen);
4433 }
4434
4435 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4436 if ((unsigned long long)read(bfd, bbuf, len) != len)
4437 fail("read second backup failed");
4438 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4439 if ((unsigned long long)read(afd, abuf, len) != len)
4440 fail("read second from array failed");
4441 if (memcmp(bbuf, abuf, len) != 0)
4442 fail("data2 compare failed");
4443 }
4444 }
4445
4446 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4447 struct supertype *st, unsigned long blocks,
4448 int *fds, unsigned long long *offsets,
4449 int dests, int *destfd, unsigned long long *destoffsets)
4450 {
4451 /* Monitor a reshape where backup is being performed using
4452 * 'native' mechanism - either to a backup file, or
4453 * to some space in a spare.
4454 */
4455 char *buf;
4456 int degraded = -1;
4457 unsigned long long speed;
4458 unsigned long long suspend_point, array_size;
4459 unsigned long long backup_point, wait_point;
4460 unsigned long long reshape_completed;
4461 int done = 0;
4462 int increasing = reshape->after.data_disks >=
4463 reshape->before.data_disks;
4464 int part = 0; /* The next part of the backup area to fill. It
4465 * may already be full, so we need to check */
4466 int level = reshape->level;
4467 int layout = reshape->before.layout;
4468 int data = reshape->before.data_disks;
4469 int disks = reshape->before.data_disks + reshape->parity;
4470 int chunk = sra->array.chunk_size;
4471 struct mdinfo *sd;
4472 unsigned long stripes;
4473 int uuid[4];
4474 int frozen = 0;
4475
4476 /* set up the backup-super-block. This requires the
4477 * uuid from the array.
4478 */
4479 /* Find a superblock */
4480 for (sd = sra->devs; sd; sd = sd->next) {
4481 char *dn;
4482 int devfd;
4483 int ok;
4484 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4485 continue;
4486 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4487 devfd = dev_open(dn, O_RDONLY);
4488 if (devfd < 0)
4489 continue;
4490 ok = st->ss->load_super(st, devfd, NULL);
4491 close(devfd);
4492 if (ok == 0)
4493 break;
4494 }
4495 if (!sd) {
4496 pr_err("Cannot find a superblock\n");
4497 return 0;
4498 }
4499
4500 memset(&bsb, 0, 512);
4501 memcpy(bsb.magic, "md_backup_data-1", 16);
4502 st->ss->uuid_from_super(st, uuid);
4503 memcpy(bsb.set_uuid, uuid, 16);
4504 bsb.mtime = __cpu_to_le64(time(0));
4505 bsb.devstart2 = blocks;
4506
4507 stripes = blocks / (sra->array.chunk_size/512) /
4508 reshape->before.data_disks;
4509
4510 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4511 /* Don't start the 'reshape' */
4512 return 0;
4513 if (reshape->before.data_disks == reshape->after.data_disks) {
4514 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4515 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4516 }
4517
4518 if (increasing) {
4519 array_size = sra->component_size * reshape->after.data_disks;
4520 backup_point = sra->reshape_progress;
4521 suspend_point = 0;
4522 } else {
4523 array_size = sra->component_size * reshape->before.data_disks;
4524 backup_point = reshape->backup_blocks;
4525 suspend_point = array_size;
4526 }
4527
4528 while (!done) {
4529 int rv;
4530
4531 /* Want to return as soon the oldest backup slot can
4532 * be released as that allows us to start backing up
4533 * some more, providing suspend_point has been
4534 * advanced, which it should have.
4535 */
4536 if (increasing) {
4537 wait_point = array_size;
4538 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4539 wait_point = (__le64_to_cpu(bsb.arraystart) +
4540 __le64_to_cpu(bsb.length));
4541 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4542 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4543 __le64_to_cpu(bsb.length2));
4544 } else {
4545 wait_point = 0;
4546 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4547 wait_point = __le64_to_cpu(bsb.arraystart);
4548 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4549 wait_point = __le64_to_cpu(bsb.arraystart2);
4550 }
4551
4552 reshape_completed = sra->reshape_progress;
4553 rv = progress_reshape(sra, reshape,
4554 backup_point, wait_point,
4555 &suspend_point, &reshape_completed,
4556 &frozen);
4557 /* external metadata would need to ping_monitor here */
4558 sra->reshape_progress = reshape_completed;
4559
4560 /* Clear any backup region that is before 'here' */
4561 if (increasing) {
4562 if (__le64_to_cpu(bsb.length) > 0 &&
4563 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4564 __le64_to_cpu(bsb.length)))
4565 forget_backup(dests, destfd,
4566 destoffsets, 0);
4567 if (__le64_to_cpu(bsb.length2) > 0 &&
4568 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4569 __le64_to_cpu(bsb.length2)))
4570 forget_backup(dests, destfd,
4571 destoffsets, 1);
4572 } else {
4573 if (__le64_to_cpu(bsb.length) > 0 &&
4574 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4575 forget_backup(dests, destfd,
4576 destoffsets, 0);
4577 if (__le64_to_cpu(bsb.length2) > 0 &&
4578 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4579 forget_backup(dests, destfd,
4580 destoffsets, 1);
4581 }
4582 if (sigterm)
4583 rv = -2;
4584 if (rv < 0) {
4585 if (rv == -1)
4586 done = 1;
4587 break;
4588 }
4589 if (rv == 0 && increasing && !st->ss->external) {
4590 /* No longer need to monitor this reshape */
4591 sysfs_set_str(sra, NULL, "sync_max", "max");
4592 done = 1;
4593 break;
4594 }
4595
4596 while (rv) {
4597 unsigned long long offset;
4598 unsigned long actual_stripes;
4599 /* Need to backup some data.
4600 * If 'part' is not used and the desired
4601 * backup size is suspended, do a backup,
4602 * then consider the next part.
4603 */
4604 /* Check that 'part' is unused */
4605 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4606 break;
4607 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4608 break;
4609
4610 offset = backup_point / data;
4611 actual_stripes = stripes;
4612 if (increasing) {
4613 if (offset + actual_stripes * (chunk/512) >
4614 sra->component_size)
4615 actual_stripes = ((sra->component_size - offset)
4616 / (chunk/512));
4617 if (offset + actual_stripes * (chunk/512) >
4618 suspend_point/data)
4619 break;
4620 } else {
4621 if (offset < actual_stripes * (chunk/512))
4622 actual_stripes = offset / (chunk/512);
4623 offset -= actual_stripes * (chunk/512);
4624 if (offset < suspend_point/data)
4625 break;
4626 }
4627 if (actual_stripes == 0)
4628 break;
4629 grow_backup(sra, offset, actual_stripes, fds, offsets,
4630 disks, chunk, level, layout, dests, destfd,
4631 destoffsets, part, &degraded, buf);
4632 validate(afd, destfd[0], destoffsets[0]);
4633 /* record where 'part' is up to */
4634 part = !part;
4635 if (increasing)
4636 backup_point += actual_stripes * (chunk/512) * data;
4637 else
4638 backup_point -= actual_stripes * (chunk/512) * data;
4639 }
4640 }
4641
4642 /* FIXME maybe call progress_reshape one more time instead */
4643 /* remove any remaining suspension */
4644 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4645 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4646 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4647 sysfs_set_num(sra, NULL, "sync_min", 0);
4648
4649 if (reshape->before.data_disks == reshape->after.data_disks)
4650 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4651 free(buf);
4652 return done;
4653 }
4654
4655 /*
4656 * If any spare contains md_back_data-1 which is recent wrt mtime,
4657 * write that data into the array and update the super blocks with
4658 * the new reshape_progress
4659 */
4660 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist,
4661 int cnt, char *backup_file, int verbose)
4662 {
4663 int i, j;
4664 int old_disks;
4665 unsigned long long *offsets;
4666 unsigned long long nstripe, ostripe;
4667 int ndata, odata;
4668
4669 odata = info->array.raid_disks - info->delta_disks - 1;
4670 if (info->array.level == 6)
4671 odata--; /* number of data disks */
4672 ndata = info->array.raid_disks - 1;
4673 if (info->new_level == 6)
4674 ndata--;
4675
4676 old_disks = info->array.raid_disks - info->delta_disks;
4677
4678 if (info->delta_disks <= 0)
4679 /* Didn't grow, so the backup file must have
4680 * been used
4681 */
4682 old_disks = cnt;
4683 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4684 struct mdinfo dinfo;
4685 int fd;
4686 int bsbsize;
4687 char *devname, namebuf[20];
4688 unsigned long long lo, hi;
4689
4690 /* This was a spare and may have some saved data on it.
4691 * Load the superblock, find and load the
4692 * backup_super_block.
4693 * If either fail, go on to next device.
4694 * If the backup contains no new info, just return
4695 * else restore data and update all superblocks
4696 */
4697 if (i == old_disks-1) {
4698 fd = open(backup_file, O_RDONLY);
4699 if (fd<0) {
4700 pr_err("backup file %s inaccessible: %s\n",
4701 backup_file, strerror(errno));
4702 continue;
4703 }
4704 devname = backup_file;
4705 } else {
4706 fd = fdlist[i];
4707 if (fd < 0)
4708 continue;
4709 if (st->ss->load_super(st, fd, NULL))
4710 continue;
4711
4712 st->ss->getinfo_super(st, &dinfo, NULL);
4713 st->ss->free_super(st);
4714
4715 if (lseek64(fd,
4716 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4717 0) < 0) {
4718 pr_err("Cannot seek on device %d\n", i);
4719 continue; /* Cannot seek */
4720 }
4721 sprintf(namebuf, "device-%d", i);
4722 devname = namebuf;
4723 }
4724 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4725 if (verbose)
4726 pr_err("Cannot read from %s\n", devname);
4727 continue; /* Cannot read */
4728 }
4729 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4730 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4731 if (verbose)
4732 pr_err("No backup metadata on %s\n", devname);
4733 continue;
4734 }
4735 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4736 if (verbose)
4737 pr_err("Bad backup-metadata checksum on %s\n",
4738 devname);
4739 continue; /* bad checksum */
4740 }
4741 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4742 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4743 if (verbose)
4744 pr_err("Bad backup-metadata checksum2 on %s\n",
4745 devname);
4746 continue; /* Bad second checksum */
4747 }
4748 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4749 if (verbose)
4750 pr_err("Wrong uuid on backup-metadata on %s\n",
4751 devname);
4752 continue; /* Wrong uuid */
4753 }
4754
4755 /*
4756 * array utime and backup-mtime should be updated at
4757 * much the same time, but it seems that sometimes
4758 * they aren't... So allow considerable flexability in
4759 * matching, and allow this test to be overridden by
4760 * an environment variable.
4761 */
4762 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4763 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4764 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4765 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4766 (unsigned long)__le64_to_cpu(bsb.mtime),
4767 (unsigned long)info->array.utime);
4768 } else {
4769 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4770 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4771 continue; /* time stamp is too bad */
4772 }
4773 }
4774
4775 if (bsb.magic[15] == '1') {
4776 if (bsb.length == 0)
4777 continue;
4778 if (info->delta_disks >= 0) {
4779 /* reshape_progress is increasing */
4780 if (__le64_to_cpu(bsb.arraystart)
4781 + __le64_to_cpu(bsb.length)
4782 < info->reshape_progress) {
4783 nonew:
4784 if (verbose)
4785 pr_err("backup-metadata found on %s but is not needed\n", devname);
4786 continue; /* No new data here */
4787 }
4788 } else {
4789 /* reshape_progress is decreasing */
4790 if (__le64_to_cpu(bsb.arraystart) >=
4791 info->reshape_progress)
4792 goto nonew; /* No new data here */
4793 }
4794 } else {
4795 if (bsb.length == 0 && bsb.length2 == 0)
4796 continue;
4797 if (info->delta_disks >= 0) {
4798 /* reshape_progress is increasing */
4799 if ((__le64_to_cpu(bsb.arraystart)
4800 + __le64_to_cpu(bsb.length)
4801 < info->reshape_progress) &&
4802 (__le64_to_cpu(bsb.arraystart2)
4803 + __le64_to_cpu(bsb.length2)
4804 < info->reshape_progress))
4805 goto nonew; /* No new data here */
4806 } else {
4807 /* reshape_progress is decreasing */
4808 if (__le64_to_cpu(bsb.arraystart) >=
4809 info->reshape_progress &&
4810 __le64_to_cpu(bsb.arraystart2) >=
4811 info->reshape_progress)
4812 goto nonew; /* No new data here */
4813 }
4814 }
4815 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4816 second_fail:
4817 if (verbose)
4818 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4819 devname);
4820 continue; /* Cannot seek */
4821 }
4822 /* There should be a duplicate backup superblock 4k before here */
4823 if (lseek64(fd, -4096, 1) < 0 ||
4824 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4825 goto second_fail; /* Cannot find leading superblock */
4826 if (bsb.magic[15] == '1')
4827 bsbsize = offsetof(struct mdp_backup_super, pad1);
4828 else
4829 bsbsize = offsetof(struct mdp_backup_super, pad);
4830 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4831 goto second_fail; /* Cannot find leading superblock */
4832
4833 /* Now need the data offsets for all devices. */
4834 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4835 for(j=0; j<info->array.raid_disks; j++) {
4836 if (fdlist[j] < 0)
4837 continue;
4838 if (st->ss->load_super(st, fdlist[j], NULL))
4839 /* FIXME should be this be an error */
4840 continue;
4841 st->ss->getinfo_super(st, &dinfo, NULL);
4842 st->ss->free_super(st);
4843 offsets[j] = dinfo.data_offset * 512;
4844 }
4845 printf("%s: restoring critical section\n", Name);
4846
4847 if (restore_stripes(fdlist, offsets, info->array.raid_disks,
4848 info->new_chunk, info->new_level,
4849 info->new_layout, fd,
4850 __le64_to_cpu(bsb.devstart)*512,
4851 __le64_to_cpu(bsb.arraystart)*512,
4852 __le64_to_cpu(bsb.length)*512, NULL)) {
4853 /* didn't succeed, so giveup */
4854 if (verbose)
4855 pr_err("Error restoring backup from %s\n",
4856 devname);
4857 free(offsets);
4858 return 1;
4859 }
4860
4861 if (bsb.magic[15] == '2' &&
4862 restore_stripes(fdlist, offsets, info->array.raid_disks,
4863 info->new_chunk, info->new_level,
4864 info->new_layout, fd,
4865 __le64_to_cpu(bsb.devstart)*512 +
4866 __le64_to_cpu(bsb.devstart2)*512,
4867 __le64_to_cpu(bsb.arraystart2)*512,
4868 __le64_to_cpu(bsb.length2)*512, NULL)) {
4869 /* didn't succeed, so giveup */
4870 if (verbose)
4871 pr_err("Error restoring second backup from %s\n",
4872 devname);
4873 free(offsets);
4874 return 1;
4875 }
4876
4877 free(offsets);
4878
4879 /* Ok, so the data is restored. Let's update those superblocks. */
4880
4881 lo = hi = 0;
4882 if (bsb.length) {
4883 lo = __le64_to_cpu(bsb.arraystart);
4884 hi = lo + __le64_to_cpu(bsb.length);
4885 }
4886 if (bsb.magic[15] == '2' && bsb.length2) {
4887 unsigned long long lo1, hi1;
4888 lo1 = __le64_to_cpu(bsb.arraystart2);
4889 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4890 if (lo == hi) {
4891 lo = lo1;
4892 hi = hi1;
4893 } else if (lo < lo1)
4894 hi = hi1;
4895 else
4896 lo = lo1;
4897 }
4898 if (lo < hi && (info->reshape_progress < lo ||
4899 info->reshape_progress > hi))
4900 /* backup does not affect reshape_progress*/ ;
4901 else if (info->delta_disks >= 0) {
4902 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4903 __le64_to_cpu(bsb.length);
4904 if (bsb.magic[15] == '2') {
4905 unsigned long long p2;
4906
4907 p2 = __le64_to_cpu(bsb.arraystart2) +
4908 __le64_to_cpu(bsb.length2);
4909 if (p2 > info->reshape_progress)
4910 info->reshape_progress = p2;
4911 }
4912 } else {
4913 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4914 if (bsb.magic[15] == '2') {
4915 unsigned long long p2;
4916
4917 p2 = __le64_to_cpu(bsb.arraystart2);
4918 if (p2 < info->reshape_progress)
4919 info->reshape_progress = p2;
4920 }
4921 }
4922 for (j=0; j<info->array.raid_disks; j++) {
4923 if (fdlist[j] < 0)
4924 continue;
4925 if (st->ss->load_super(st, fdlist[j], NULL))
4926 continue;
4927 st->ss->getinfo_super(st, &dinfo, NULL);
4928 dinfo.reshape_progress = info->reshape_progress;
4929 st->ss->update_super(st, &dinfo, "_reshape_progress",
4930 NULL,0, 0, NULL);
4931 st->ss->store_super(st, fdlist[j]);
4932 st->ss->free_super(st);
4933 }
4934 return 0;
4935 }
4936 /* Didn't find any backup data, try to see if any
4937 * was needed.
4938 */
4939 if (info->delta_disks < 0) {
4940 /* When shrinking, the critical section is at the end.
4941 * So see if we are before the critical section.
4942 */
4943 unsigned long long first_block;
4944 nstripe = ostripe = 0;
4945 first_block = 0;
4946 while (ostripe >= nstripe) {
4947 ostripe += info->array.chunk_size / 512;
4948 first_block = ostripe * odata;
4949 nstripe = first_block / ndata / (info->new_chunk/512) *
4950 (info->new_chunk/512);
4951 }
4952
4953 if (info->reshape_progress >= first_block)
4954 return 0;
4955 }
4956 if (info->delta_disks > 0) {
4957 /* See if we are beyond the critical section. */
4958 unsigned long long last_block;
4959 nstripe = ostripe = 0;
4960 last_block = 0;
4961 while (nstripe >= ostripe) {
4962 nstripe += info->new_chunk / 512;
4963 last_block = nstripe * ndata;
4964 ostripe = last_block / odata / (info->array.chunk_size/512) *
4965 (info->array.chunk_size/512);
4966 }
4967
4968 if (info->reshape_progress >= last_block)
4969 return 0;
4970 }
4971 /* needed to recover critical section! */
4972 if (verbose)
4973 pr_err("Failed to find backup of critical section\n");
4974 return 1;
4975 }
4976
4977 int Grow_continue_command(char *devname, int fd,
4978 char *backup_file, int verbose)
4979 {
4980 int ret_val = 0;
4981 struct supertype *st = NULL;
4982 struct mdinfo *content = NULL;
4983 struct mdinfo array;
4984 char *subarray = NULL;
4985 struct mdinfo *cc = NULL;
4986 struct mdstat_ent *mdstat = NULL;
4987 int cfd = -1;
4988 int fd2;
4989
4990 dprintf("Grow continue from command line called for %s\n", devname);
4991
4992 st = super_by_fd(fd, &subarray);
4993 if (!st || !st->ss) {
4994 pr_err("Unable to determine metadata format for %s\n", devname);
4995 return 1;
4996 }
4997 dprintf("Grow continue is run for ");
4998 if (st->ss->external == 0) {
4999 int d;
5000 int cnt = 5;
5001 dprintf_cont("native array (%s)\n", devname);
5002 if (md_get_array_info(fd, &array.array) < 0) {
5003 pr_err("%s is not an active md array - aborting\n",
5004 devname);
5005 ret_val = 1;
5006 goto Grow_continue_command_exit;
5007 }
5008 content = &array;
5009 sysfs_init(content, fd, NULL);
5010 /* Need to load a superblock.
5011 * FIXME we should really get what we need from
5012 * sysfs
5013 */
5014 do {
5015 for (d = 0; d < MAX_DISKS; d++) {
5016 mdu_disk_info_t disk;
5017 char *dv;
5018 int err;
5019 disk.number = d;
5020 if (md_get_disk_info(fd, &disk) < 0)
5021 continue;
5022 if (disk.major == 0 && disk.minor == 0)
5023 continue;
5024 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5025 continue;
5026 dv = map_dev(disk.major, disk.minor, 1);
5027 if (!dv)
5028 continue;
5029 fd2 = dev_open(dv, O_RDONLY);
5030 if (fd2 < 0)
5031 continue;
5032 err = st->ss->load_super(st, fd2, NULL);
5033 close(fd2);
5034 if (err)
5035 continue;
5036 break;
5037 }
5038 if (d == MAX_DISKS) {
5039 pr_err("Unable to load metadata for %s\n",
5040 devname);
5041 ret_val = 1;
5042 goto Grow_continue_command_exit;
5043 }
5044 st->ss->getinfo_super(st, content, NULL);
5045 if (!content->reshape_active)
5046 sleep(3);
5047 else
5048 break;
5049 } while (cnt-- > 0);
5050 } else {
5051 char *container;
5052
5053 if (subarray) {
5054 dprintf_cont("subarray (%s)\n", subarray);
5055 container = st->container_devnm;
5056 cfd = open_dev_excl(st->container_devnm);
5057 } else {
5058 container = st->devnm;
5059 close(fd);
5060 cfd = open_dev_excl(st->devnm);
5061 dprintf_cont("container (%s)\n", container);
5062 fd = cfd;
5063 }
5064 if (cfd < 0) {
5065 pr_err("Unable to open container for %s\n", devname);
5066 ret_val = 1;
5067 goto Grow_continue_command_exit;
5068 }
5069
5070 /* find in container array under reshape
5071 */
5072 ret_val = st->ss->load_container(st, cfd, NULL);
5073 if (ret_val) {
5074 pr_err("Cannot read superblock for %s\n", devname);
5075 ret_val = 1;
5076 goto Grow_continue_command_exit;
5077 }
5078
5079 cc = st->ss->container_content(st, subarray);
5080 for (content = cc; content ; content = content->next) {
5081 char *array_name;
5082 int allow_reshape = 1;
5083
5084 if (content->reshape_active == 0)
5085 continue;
5086 /* The decision about array or container wide
5087 * reshape is taken in Grow_continue based
5088 * content->reshape_active state, therefore we
5089 * need to check_reshape based on
5090 * reshape_active and subarray name
5091 */
5092 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5093 allow_reshape = 0;
5094 if (content->reshape_active == CONTAINER_RESHAPE &&
5095 (content->array.state
5096 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5097 allow_reshape = 0;
5098
5099 if (!allow_reshape) {
5100 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5101 devname, container);
5102 ret_val = 1;
5103 goto Grow_continue_command_exit;
5104 }
5105
5106 array_name = strchr(content->text_version+1, '/')+1;
5107 mdstat = mdstat_by_subdev(array_name, container);
5108 if (!mdstat)
5109 continue;
5110 if (mdstat->active == 0) {
5111 pr_err("Skipping inactive array %s.\n",
5112 mdstat->devnm);
5113 free_mdstat(mdstat);
5114 mdstat = NULL;
5115 continue;
5116 }
5117 break;
5118 }
5119 if (!content) {
5120 pr_err("Unable to determine reshaped array for %s\n", devname);
5121 ret_val = 1;
5122 goto Grow_continue_command_exit;
5123 }
5124 fd2 = open_dev(mdstat->devnm);
5125 if (fd2 < 0) {
5126 pr_err("cannot open (%s)\n", mdstat->devnm);
5127 ret_val = 1;
5128 goto Grow_continue_command_exit;
5129 }
5130
5131 if (sysfs_init(content, fd2, mdstat->devnm)) {
5132 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5133 mdstat->devnm);
5134 ret_val = 1;
5135 close(fd2);
5136 goto Grow_continue_command_exit;
5137 }
5138
5139 close(fd2);
5140
5141 /* start mdmon in case it is not running
5142 */
5143 if (!mdmon_running(container))
5144 start_mdmon(container);
5145 ping_monitor(container);
5146
5147 if (mdmon_running(container))
5148 st->update_tail = &st->updates;
5149 else {
5150 pr_err("No mdmon found. Grow cannot continue.\n");
5151 ret_val = 1;
5152 goto Grow_continue_command_exit;
5153 }
5154 }
5155
5156 /* verify that array under reshape is started from
5157 * correct position
5158 */
5159 if (verify_reshape_position(content, content->array.level) < 0) {
5160 ret_val = 1;
5161 goto Grow_continue_command_exit;
5162 }
5163
5164 /* continue reshape
5165 */
5166 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5167
5168 Grow_continue_command_exit:
5169 if (cfd > -1)
5170 close(cfd);
5171 st->ss->free_super(st);
5172 free_mdstat(mdstat);
5173 sysfs_free(cc);
5174 free(subarray);
5175
5176 return ret_val;
5177 }
5178
5179 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5180 char *backup_file, int forked, int freeze_reshape)
5181 {
5182 int ret_val = 2;
5183
5184 if (!info->reshape_active)
5185 return ret_val;
5186
5187 if (st->ss->external) {
5188 int cfd = open_dev(st->container_devnm);
5189
5190 if (cfd < 0)
5191 return 1;
5192
5193 st->ss->load_container(st, cfd, st->container_devnm);
5194 close(cfd);
5195 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5196 st, info, 0, backup_file, 0,
5197 forked, 1 | info->reshape_active,
5198 freeze_reshape);
5199 } else
5200 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5201 NULL, INVALID_SECTORS, backup_file,
5202 0, forked, 1 | info->reshape_active,
5203 freeze_reshape);
5204
5205 return ret_val;
5206 }
5207
5208 char *make_backup(char *name)
5209 {
5210 char *base = "backup_file-";
5211 int len;
5212 char *fname;
5213
5214 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5215 fname = xmalloc(len);
5216 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5217 return fname;
5218 }
5219
5220 char *locate_backup(char *name)
5221 {
5222 char *fl = make_backup(name);
5223 struct stat stb;
5224
5225 if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode))
5226 return fl;
5227
5228 free(fl);
5229 return NULL;
5230 }