]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
mdadm/test: add new testcase for testing readonly/readwrite
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 dev_t rdev;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (md_get_array_info(fd, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
149 close(nfd);
150 free(st);
151 return 1;
152 }
153 /* now check out all the devices and make sure we can read the
154 * superblock */
155 for (d=0 ; d < info.array.raid_disks ; d++) {
156 mdu_disk_info_t disk;
157 char *dv;
158
159 st->ss->free_super(st);
160
161 disk.number = d;
162 if (md_get_disk_info(fd, &disk) < 0) {
163 pr_err("cannot get device detail for device %d\n",
164 d);
165 close(nfd);
166 free(st);
167 return 1;
168 }
169 dv = map_dev(disk.major, disk.minor, 1);
170 if (!dv) {
171 pr_err("cannot find device file for device %d\n",
172 d);
173 close(nfd);
174 free(st);
175 return 1;
176 }
177 fd2 = dev_open(dv, O_RDWR);
178 if (fd2 < 0) {
179 pr_err("cannot open device file %s\n", dv);
180 close(nfd);
181 free(st);
182 return 1;
183 }
184
185 if (st->ss->load_super(st, fd2, NULL)) {
186 pr_err("cannot find super block on %s\n", dv);
187 close(nfd);
188 close(fd2);
189 free(st);
190 return 1;
191 }
192 close(fd2);
193 }
194 /* Ok, looks good. Lets update the superblock and write it out to
195 * newdev.
196 */
197
198 info.disk.number = d;
199 info.disk.major = major(rdev);
200 info.disk.minor = minor(rdev);
201 info.disk.raid_disk = d;
202 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
203 st->ss->update_super(st, &info, "linear-grow-new", newdev,
204 0, 0, NULL);
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n",
208 newdev);
209 close(nfd);
210 return 1;
211 }
212 close(nfd);
213
214 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
215 pr_err("Cannot add new disk to this array\n");
216 return 1;
217 }
218 /* Well, that seems to have worked.
219 * Now go through and update all superblocks
220 */
221
222 if (md_get_array_info(fd, &info.array) < 0) {
223 pr_err("cannot get array info for %s\n", devname);
224 return 1;
225 }
226
227 nd = d;
228 for (d=0 ; d < info.array.raid_disks ; d++) {
229 mdu_disk_info_t disk;
230 char *dv;
231
232 disk.number = d;
233 if (md_get_disk_info(fd, &disk) < 0) {
234 pr_err("cannot get device detail for device %d\n",
235 d);
236 return 1;
237 }
238 dv = map_dev(disk.major, disk.minor, 1);
239 if (!dv) {
240 pr_err("cannot find device file for device %d\n",
241 d);
242 return 1;
243 }
244 fd2 = dev_open(dv, O_RDWR);
245 if (fd2 < 0) {
246 pr_err("cannot open device file %s\n", dv);
247 return 1;
248 }
249 if (st->ss->load_super(st, fd2, NULL)) {
250 pr_err("cannot find super block on %s\n", dv);
251 close(fd);
252 return 1;
253 }
254 info.array.raid_disks = nd+1;
255 info.array.nr_disks = nd+1;
256 info.array.active_disks = nd+1;
257 info.array.working_disks = nd+1;
258
259 st->ss->update_super(st, &info, "linear-grow-update", dv,
260 0, 0, NULL);
261
262 if (st->ss->store_super(st, fd2)) {
263 pr_err("Cannot store new superblock on %s\n", dv);
264 close(fd2);
265 return 1;
266 }
267 close(fd2);
268 }
269
270 return 0;
271 }
272
273 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
274 {
275 /*
276 * First check that array doesn't have a bitmap
277 * Then create the bitmap
278 * Then add it
279 *
280 * For internal bitmaps, we need to check the version,
281 * find all the active devices, and write the bitmap block
282 * to all devices
283 */
284 mdu_bitmap_file_t bmf;
285 mdu_array_info_t array;
286 struct supertype *st;
287 char *subarray = NULL;
288 int major = BITMAP_MAJOR_HI;
289 unsigned long long bitmapsize, array_size;
290 struct mdinfo *mdi;
291
292 /*
293 * We only ever get called if s->bitmap_file is != NULL, so this check
294 * is just here to quiet down static code checkers.
295 */
296 if (!s->bitmap_file)
297 return 1;
298
299 if (strcmp(s->bitmap_file, "clustered") == 0)
300 major = BITMAP_MAJOR_CLUSTERED;
301
302 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
303 if (errno == ENOMEM)
304 pr_err("Memory allocation failure.\n");
305 else
306 pr_err("bitmaps not supported by this kernel.\n");
307 return 1;
308 }
309 if (bmf.pathname[0]) {
310 if (strcmp(s->bitmap_file,"none") == 0) {
311 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
312 pr_err("failed to remove bitmap %s\n",
313 bmf.pathname);
314 return 1;
315 }
316 return 0;
317 }
318 pr_err("%s already has a bitmap (%s)\n",
319 devname, bmf.pathname);
320 return 1;
321 }
322 if (md_get_array_info(fd, &array) != 0) {
323 pr_err("cannot get array status for %s\n", devname);
324 return 1;
325 }
326 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
327 if (strcmp(s->bitmap_file, "none")==0) {
328 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
329 if (md_set_array_info(fd, &array) != 0) {
330 if (array.state & (1 << MD_SB_CLUSTERED))
331 pr_err("failed to remove clustered bitmap.\n");
332 else
333 pr_err("failed to remove internal bitmap.\n");
334 return 1;
335 }
336 return 0;
337 }
338 pr_err("bitmap already present on %s\n", devname);
339 return 1;
340 }
341
342 if (strcmp(s->bitmap_file, "none") == 0) {
343 pr_err("no bitmap found on %s\n", devname);
344 return 1;
345 }
346 if (array.level <= 0) {
347 pr_err("Bitmaps not meaningful with level %s\n",
348 map_num(pers, array.level)?:"of this array");
349 return 1;
350 }
351 bitmapsize = array.size;
352 bitmapsize <<= 1;
353 if (get_dev_size(fd, NULL, &array_size) &&
354 array_size > (0x7fffffffULL << 9)) {
355 /* Array is big enough that we cannot trust array.size
356 * try other approaches
357 */
358 bitmapsize = get_component_size(fd);
359 }
360 if (bitmapsize == 0) {
361 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
362 return 1;
363 }
364
365 if (array.level == 10) {
366 int ncopies;
367
368 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
369 bitmapsize = bitmapsize * array.raid_disks / ncopies;
370 }
371
372 st = super_by_fd(fd, &subarray);
373 if (!st) {
374 pr_err("Cannot understand version %d.%d\n",
375 array.major_version, array.minor_version);
376 return 1;
377 }
378 if (subarray) {
379 pr_err("Cannot add bitmaps to sub-arrays yet\n");
380 free(subarray);
381 free(st);
382 return 1;
383 }
384
385 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
386 if (mdi) {
387 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
388 pr_err("Cannot add bitmap to array with PPL\n");
389 free(mdi);
390 free(st);
391 return 1;
392 }
393 free(mdi);
394 }
395
396 if (strcmp(s->bitmap_file, "internal") == 0 ||
397 strcmp(s->bitmap_file, "clustered") == 0) {
398 int rv;
399 int d;
400 int offset_setable = 0;
401 if (st->ss->add_internal_bitmap == NULL) {
402 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
403 return 1;
404 }
405 st->nodes = c->nodes;
406 st->cluster_name = c->homecluster;
407 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
408 if (mdi)
409 offset_setable = 1;
410 for (d = 0; d < st->max_devs; d++) {
411 mdu_disk_info_t disk;
412 char *dv;
413 int fd2;
414
415 disk.number = d;
416 if (md_get_disk_info(fd, &disk) < 0)
417 continue;
418 if (disk.major == 0 && disk.minor == 0)
419 continue;
420 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
421 continue;
422 dv = map_dev(disk.major, disk.minor, 1);
423 if (!dv)
424 continue;
425 fd2 = dev_open(dv, O_RDWR);
426 if (fd2 < 0)
427 continue;
428 rv = st->ss->load_super(st, fd2, NULL);
429 if (!rv) {
430 rv = st->ss->add_internal_bitmap(
431 st, &s->bitmap_chunk, c->delay,
432 s->write_behind, bitmapsize,
433 offset_setable, major);
434 if (!rv) {
435 st->ss->write_bitmap(st, fd2,
436 NodeNumUpdate);
437 } else {
438 pr_err("failed to create internal bitmap - chunksize problem.\n");
439 }
440 } else {
441 pr_err("failed to load super-block.\n");
442 }
443 close(fd2);
444 if (rv)
445 return 1;
446 }
447 if (offset_setable) {
448 st->ss->getinfo_super(st, mdi, NULL);
449 if (sysfs_init(mdi, fd, NULL)) {
450 pr_err("failed to intialize sysfs.\n");
451 free(mdi);
452 }
453 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
454 mdi->bitmap_offset);
455 free(mdi);
456 } else {
457 if (strcmp(s->bitmap_file, "clustered") == 0)
458 array.state |= (1 << MD_SB_CLUSTERED);
459 array.state |= (1 << MD_SB_BITMAP_PRESENT);
460 rv = md_set_array_info(fd, &array);
461 }
462 if (rv < 0) {
463 if (errno == EBUSY)
464 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
465 pr_err("failed to set internal bitmap.\n");
466 return 1;
467 }
468 } else {
469 int uuid[4];
470 int bitmap_fd;
471 int d;
472 int max_devs = st->max_devs;
473
474 /* try to load a superblock */
475 for (d = 0; d < max_devs; d++) {
476 mdu_disk_info_t disk;
477 char *dv;
478 int fd2;
479 disk.number = d;
480 if (md_get_disk_info(fd, &disk) < 0)
481 continue;
482 if ((disk.major==0 && disk.minor == 0) ||
483 (disk.state & (1 << MD_DISK_REMOVED)))
484 continue;
485 dv = map_dev(disk.major, disk.minor, 1);
486 if (!dv)
487 continue;
488 fd2 = dev_open(dv, O_RDONLY);
489 if (fd2 >= 0) {
490 if (st->ss->load_super(st, fd2, NULL) == 0) {
491 close(fd2);
492 st->ss->uuid_from_super(st, uuid);
493 break;
494 }
495 close(fd2);
496 }
497 }
498 if (d == max_devs) {
499 pr_err("cannot find UUID for array!\n");
500 return 1;
501 }
502 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
503 s->bitmap_chunk, c->delay, s->write_behind,
504 bitmapsize, major)) {
505 return 1;
506 }
507 bitmap_fd = open(s->bitmap_file, O_RDWR);
508 if (bitmap_fd < 0) {
509 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
510 return 1;
511 }
512 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
513 int err = errno;
514 if (errno == EBUSY)
515 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
516 pr_err("Cannot set bitmap file for %s: %s\n",
517 devname, strerror(err));
518 return 1;
519 }
520 }
521
522 return 0;
523 }
524
525 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
526 {
527 struct supertype *st;
528 struct mdinfo *sra;
529 struct mdinfo *sd;
530 char *subarray = NULL;
531 int ret = 0;
532 char container_dev[PATH_MAX];
533 char buf[20];
534
535 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
536 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
537 pr_err("Operation not supported for consistency policy %s\n",
538 map_num(consistency_policies, s->consistency_policy));
539 return 1;
540 }
541
542 st = super_by_fd(fd, &subarray);
543 if (!st)
544 return 1;
545
546 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
547 GET_DEVS|GET_STATE);
548 if (!sra) {
549 ret = 1;
550 goto free_st;
551 }
552
553 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
554 !st->ss->write_init_ppl) {
555 pr_err("%s metadata does not support PPL\n", st->ss->name);
556 ret = 1;
557 goto free_info;
558 }
559
560 if (sra->array.level != 5) {
561 pr_err("Operation not supported for array level %d\n",
562 sra->array.level);
563 ret = 1;
564 goto free_info;
565 }
566
567 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
568 pr_err("Consistency policy is already %s\n",
569 map_num(consistency_policies, s->consistency_policy));
570 ret = 1;
571 goto free_info;
572 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
573 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
574 pr_err("Current consistency policy is %s, cannot change to %s\n",
575 map_num(consistency_policies, sra->consistency_policy),
576 map_num(consistency_policies, s->consistency_policy));
577 ret = 1;
578 goto free_info;
579 }
580
581 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
582 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
583 ret = 1;
584 goto free_info;
585 } else if (strcmp(buf, "reshape\n") == 0) {
586 pr_err("PPL cannot be enabled when reshape is in progress\n");
587 ret = 1;
588 goto free_info;
589 }
590 }
591
592 if (subarray) {
593 char *update;
594
595 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
596 update = "ppl";
597 else
598 update = "no-ppl";
599
600 sprintf(container_dev, "/dev/%s", st->container_devnm);
601
602 ret = Update_subarray(container_dev, subarray, update, NULL,
603 c->verbose);
604 if (ret)
605 goto free_info;
606 }
607
608 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
609 struct mdinfo info;
610
611 if (subarray) {
612 struct mdinfo *mdi;
613 int cfd;
614
615 cfd = open(container_dev, O_RDWR|O_EXCL);
616 if (cfd < 0) {
617 pr_err("Failed to open %s\n", container_dev);
618 ret = 1;
619 goto free_info;
620 }
621
622 ret = st->ss->load_container(st, cfd, st->container_devnm);
623 close(cfd);
624
625 if (ret) {
626 pr_err("Cannot read superblock for %s\n",
627 container_dev);
628 goto free_info;
629 }
630
631 mdi = st->ss->container_content(st, subarray);
632 info = *mdi;
633 free(mdi);
634 }
635
636 for (sd = sra->devs; sd; sd = sd->next) {
637 int dfd;
638 char *devpath;
639
640 if ((sd->disk.state & (1 << MD_DISK_SYNC)) == 0)
641 continue;
642
643 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
644 dfd = dev_open(devpath, O_RDWR);
645 if (dfd < 0) {
646 pr_err("Failed to open %s\n", devpath);
647 ret = 1;
648 goto free_info;
649 }
650
651 if (!subarray) {
652 ret = st->ss->load_super(st, dfd, NULL);
653 if (ret) {
654 pr_err("Failed to load super-block.\n");
655 close(dfd);
656 goto free_info;
657 }
658
659 ret = st->ss->update_super(st, sra, "ppl", devname,
660 c->verbose, 0, NULL);
661 if (ret) {
662 close(dfd);
663 st->ss->free_super(st);
664 goto free_info;
665 }
666 st->ss->getinfo_super(st, &info, NULL);
667 }
668
669 ret |= sysfs_set_num(sra, sd, "ppl_sector", info.ppl_sector);
670 ret |= sysfs_set_num(sra, sd, "ppl_size", info.ppl_size);
671
672 if (ret) {
673 pr_err("Failed to set PPL attributes for %s\n",
674 sd->sys_name);
675 close(dfd);
676 st->ss->free_super(st);
677 goto free_info;
678 }
679
680 ret = st->ss->write_init_ppl(st, &info, dfd);
681 if (ret)
682 pr_err("Failed to write PPL\n");
683
684 close(dfd);
685
686 if (!subarray)
687 st->ss->free_super(st);
688
689 if (ret)
690 goto free_info;
691 }
692 }
693
694 ret = sysfs_set_str(sra, NULL, "consistency_policy",
695 map_num(consistency_policies,
696 s->consistency_policy));
697 if (ret)
698 pr_err("Failed to change array consistency policy\n");
699
700 free_info:
701 sysfs_free(sra);
702 free_st:
703 free(st);
704 free(subarray);
705
706 return ret;
707 }
708
709 /*
710 * When reshaping an array we might need to backup some data.
711 * This is written to all spares with a 'super_block' describing it.
712 * The superblock goes 4K from the end of the used space on the
713 * device.
714 * It if written after the backup is complete.
715 * It has the following structure.
716 */
717
718 static struct mdp_backup_super {
719 char magic[16]; /* md_backup_data-1 or -2 */
720 __u8 set_uuid[16];
721 __u64 mtime;
722 /* start/sizes in 512byte sectors */
723 __u64 devstart; /* address on backup device/file of data */
724 __u64 arraystart;
725 __u64 length;
726 __u32 sb_csum; /* csum of preceeding bytes. */
727 __u32 pad1;
728 __u64 devstart2; /* offset in to data of second section */
729 __u64 arraystart2;
730 __u64 length2;
731 __u32 sb_csum2; /* csum of preceeding bytes. */
732 __u8 pad[512-68-32];
733 } __attribute__((aligned(512))) bsb, bsb2;
734
735 static __u32 bsb_csum(char *buf, int len)
736 {
737 int i;
738 int csum = 0;
739 for (i = 0; i < len; i++)
740 csum = (csum<<3) + buf[0];
741 return __cpu_to_le32(csum);
742 }
743
744 static int check_idle(struct supertype *st)
745 {
746 /* Check that all member arrays for this container, or the
747 * container of this array, are idle
748 */
749 char *container = (st->container_devnm[0]
750 ? st->container_devnm : st->devnm);
751 struct mdstat_ent *ent, *e;
752 int is_idle = 1;
753
754 ent = mdstat_read(0, 0);
755 for (e = ent ; e; e = e->next) {
756 if (!is_container_member(e, container))
757 continue;
758 if (e->percent >= 0) {
759 is_idle = 0;
760 break;
761 }
762 }
763 free_mdstat(ent);
764 return is_idle;
765 }
766
767 static int freeze_container(struct supertype *st)
768 {
769 char *container = (st->container_devnm[0]
770 ? st->container_devnm : st->devnm);
771
772 if (!check_idle(st))
773 return -1;
774
775 if (block_monitor(container, 1)) {
776 pr_err("failed to freeze container\n");
777 return -2;
778 }
779
780 return 1;
781 }
782
783 static void unfreeze_container(struct supertype *st)
784 {
785 char *container = (st->container_devnm[0]
786 ? st->container_devnm : st->devnm);
787
788 unblock_monitor(container, 1);
789 }
790
791 static int freeze(struct supertype *st)
792 {
793 /* Try to freeze resync/rebuild on this array/container.
794 * Return -1 if the array is busy,
795 * return -2 container cannot be frozen,
796 * return 0 if this kernel doesn't support 'frozen'
797 * return 1 if it worked.
798 */
799 if (st->ss->external)
800 return freeze_container(st);
801 else {
802 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
803 int err;
804 char buf[20];
805
806 if (!sra)
807 return -1;
808 /* Need to clear any 'read-auto' status */
809 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
810 strncmp(buf, "read-auto", 9) == 0)
811 sysfs_set_str(sra, NULL, "array_state", "clean");
812
813 err = sysfs_freeze_array(sra);
814 sysfs_free(sra);
815 return err;
816 }
817 }
818
819 static void unfreeze(struct supertype *st)
820 {
821 if (st->ss->external)
822 return unfreeze_container(st);
823 else {
824 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
825 char buf[20];
826
827 if (sra &&
828 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
829 strcmp(buf, "frozen\n") == 0)
830 sysfs_set_str(sra, NULL, "sync_action", "idle");
831 sysfs_free(sra);
832 }
833 }
834
835 static void wait_reshape(struct mdinfo *sra)
836 {
837 int fd = sysfs_get_fd(sra, NULL, "sync_action");
838 char action[20];
839
840 if (fd < 0)
841 return;
842
843 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
844 strncmp(action, "reshape", 7) == 0)
845 sysfs_wait(fd, NULL);
846 close(fd);
847 }
848
849 static int reshape_super(struct supertype *st, unsigned long long size,
850 int level, int layout, int chunksize, int raid_disks,
851 int delta_disks, char *backup_file, char *dev,
852 int direction, int verbose)
853 {
854 /* nothing extra to check in the native case */
855 if (!st->ss->external)
856 return 0;
857 if (!st->ss->reshape_super ||
858 !st->ss->manage_reshape) {
859 pr_err("%s metadata does not support reshape\n",
860 st->ss->name);
861 return 1;
862 }
863
864 return st->ss->reshape_super(st, size, level, layout, chunksize,
865 raid_disks, delta_disks, backup_file, dev,
866 direction, verbose);
867 }
868
869 static void sync_metadata(struct supertype *st)
870 {
871 if (st->ss->external) {
872 if (st->update_tail) {
873 flush_metadata_updates(st);
874 st->update_tail = &st->updates;
875 } else
876 st->ss->sync_metadata(st);
877 }
878 }
879
880 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
881 {
882 /* when dealing with external metadata subarrays we need to be
883 * prepared to handle EAGAIN. The kernel may need to wait for
884 * mdmon to mark the array active so the kernel can handle
885 * allocations/writeback when preparing the reshape action
886 * (md_allow_write()). We temporarily disable safe_mode_delay
887 * to close a race with the array_state going clean before the
888 * next write to raid_disks / stripe_cache_size
889 */
890 char safe[50];
891 int rc;
892
893 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
894 if (!container ||
895 (strcmp(name, "raid_disks") != 0 &&
896 strcmp(name, "stripe_cache_size") != 0))
897 return sysfs_set_num(sra, NULL, name, n);
898
899 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
900 if (rc <= 0)
901 return -1;
902 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
903 rc = sysfs_set_num(sra, NULL, name, n);
904 if (rc < 0 && errno == EAGAIN) {
905 ping_monitor(container);
906 /* if we get EAGAIN here then the monitor is not active
907 * so stop trying
908 */
909 rc = sysfs_set_num(sra, NULL, name, n);
910 }
911 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
912 return rc;
913 }
914
915 int start_reshape(struct mdinfo *sra, int already_running,
916 int before_data_disks, int data_disks)
917 {
918 int err;
919 unsigned long long sync_max_to_set;
920
921 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
922 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
923 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
924 sra->reshape_progress);
925 if (before_data_disks <= data_disks)
926 sync_max_to_set = sra->reshape_progress / data_disks;
927 else
928 sync_max_to_set = (sra->component_size * data_disks
929 - sra->reshape_progress) / data_disks;
930 if (!already_running)
931 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
932 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
933 if (!already_running && err == 0) {
934 int cnt = 5;
935 do {
936 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
937 if (err)
938 sleep(1);
939 } while (err && errno == EBUSY && cnt-- > 0);
940 }
941 return err;
942 }
943
944 void abort_reshape(struct mdinfo *sra)
945 {
946 sysfs_set_str(sra, NULL, "sync_action", "idle");
947 /*
948 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
949 * suspend_hi to decrease as well as increase.")
950 * you could only increase suspend_{lo,hi} unless the region they
951 * covered was empty. So to reset to 0, you need to push suspend_lo
952 * up past suspend_hi first. So to maximize the chance of mdadm
953 * working on all kernels, we want to keep doing that.
954 */
955 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
956 sysfs_set_num(sra, NULL, "suspend_hi", 0);
957 sysfs_set_num(sra, NULL, "suspend_lo", 0);
958 sysfs_set_num(sra, NULL, "sync_min", 0);
959 // It isn't safe to reset sync_max as we aren't monitoring.
960 // Array really should be stopped at this point.
961 }
962
963 int remove_disks_for_takeover(struct supertype *st,
964 struct mdinfo *sra,
965 int layout)
966 {
967 int nr_of_copies;
968 struct mdinfo *remaining;
969 int slot;
970
971 if (st->ss->external) {
972 int rv = 0;
973 struct mdinfo *arrays = st->ss->container_content(st, NULL);
974 /*
975 * containter_content returns list of arrays in container
976 * If arrays->next is not NULL it means that there are
977 * 2 arrays in container and operation should be blocked
978 */
979 if (arrays) {
980 if (arrays->next)
981 rv = 1;
982 sysfs_free(arrays);
983 if (rv) {
984 pr_err("Error. Cannot perform operation on /dev/%s\n", st->devnm);
985 pr_err("For this operation it MUST be single array in container\n");
986 return rv;
987 }
988 }
989 }
990
991 if (sra->array.level == 10)
992 nr_of_copies = layout & 0xff;
993 else if (sra->array.level == 1)
994 nr_of_copies = sra->array.raid_disks;
995 else
996 return 1;
997
998 remaining = sra->devs;
999 sra->devs = NULL;
1000 /* for each 'copy', select one device and remove from the list. */
1001 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
1002 struct mdinfo **diskp;
1003 int found = 0;
1004
1005 /* Find a working device to keep */
1006 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1007 struct mdinfo *disk = *diskp;
1008
1009 if (disk->disk.raid_disk < slot)
1010 continue;
1011 if (disk->disk.raid_disk >= slot + nr_of_copies)
1012 continue;
1013 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1014 continue;
1015 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1016 continue;
1017 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1018 continue;
1019
1020 /* We have found a good disk to use! */
1021 *diskp = disk->next;
1022 disk->next = sra->devs;
1023 sra->devs = disk;
1024 found = 1;
1025 break;
1026 }
1027 if (!found)
1028 break;
1029 }
1030
1031 if (slot < sra->array.raid_disks) {
1032 /* didn't find all slots */
1033 struct mdinfo **e;
1034 e = &remaining;
1035 while (*e)
1036 e = &(*e)->next;
1037 *e = sra->devs;
1038 sra->devs = remaining;
1039 return 1;
1040 }
1041
1042 /* Remove all 'remaining' devices from the array */
1043 while (remaining) {
1044 struct mdinfo *sd = remaining;
1045 remaining = sd->next;
1046
1047 sysfs_set_str(sra, sd, "state", "faulty");
1048 sysfs_set_str(sra, sd, "slot", "none");
1049 /* for external metadata disks should be removed in mdmon */
1050 if (!st->ss->external)
1051 sysfs_set_str(sra, sd, "state", "remove");
1052 sd->disk.state |= (1<<MD_DISK_REMOVED);
1053 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1054 sd->next = sra->devs;
1055 sra->devs = sd;
1056 }
1057 return 0;
1058 }
1059
1060 void reshape_free_fdlist(int *fdlist,
1061 unsigned long long *offsets,
1062 int size)
1063 {
1064 int i;
1065
1066 for (i = 0; i < size; i++)
1067 if (fdlist[i] >= 0)
1068 close(fdlist[i]);
1069
1070 free(fdlist);
1071 free(offsets);
1072 }
1073
1074 int reshape_prepare_fdlist(char *devname,
1075 struct mdinfo *sra,
1076 int raid_disks,
1077 int nrdisks,
1078 unsigned long blocks,
1079 char *backup_file,
1080 int *fdlist,
1081 unsigned long long *offsets)
1082 {
1083 int d = 0;
1084 struct mdinfo *sd;
1085
1086 enable_fds(nrdisks);
1087 for (d = 0; d <= nrdisks; d++)
1088 fdlist[d] = -1;
1089 d = raid_disks;
1090 for (sd = sra->devs; sd; sd = sd->next) {
1091 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1092 continue;
1093 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1094 sd->disk.raid_disk < raid_disks) {
1095 char *dn = map_dev(sd->disk.major,
1096 sd->disk.minor, 1);
1097 fdlist[sd->disk.raid_disk]
1098 = dev_open(dn, O_RDONLY);
1099 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1100 if (fdlist[sd->disk.raid_disk] < 0) {
1101 pr_err("%s: cannot open component %s\n",
1102 devname, dn ? dn : "-unknown-");
1103 d = -1;
1104 goto release;
1105 }
1106 } else if (backup_file == NULL) {
1107 /* spare */
1108 char *dn = map_dev(sd->disk.major,
1109 sd->disk.minor, 1);
1110 fdlist[d] = dev_open(dn, O_RDWR);
1111 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1112 if (fdlist[d] < 0) {
1113 pr_err("%s: cannot open component %s\n",
1114 devname, dn ? dn : "-unknown-");
1115 d = -1;
1116 goto release;
1117 }
1118 d++;
1119 }
1120 }
1121 release:
1122 return d;
1123 }
1124
1125 int reshape_open_backup_file(char *backup_file,
1126 int fd,
1127 char *devname,
1128 long blocks,
1129 int *fdlist,
1130 unsigned long long *offsets,
1131 char *sys_name,
1132 int restart)
1133 {
1134 /* Return 1 on success, 0 on any form of failure */
1135 /* need to check backup file is large enough */
1136 char buf[512];
1137 struct stat stb;
1138 unsigned int dev;
1139 int i;
1140
1141 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1142 S_IRUSR | S_IWUSR);
1143 *offsets = 8 * 512;
1144 if (*fdlist < 0) {
1145 pr_err("%s: cannot create backup file %s: %s\n",
1146 devname, backup_file, strerror(errno));
1147 return 0;
1148 }
1149 /* Guard against backup file being on array device.
1150 * If array is partitioned or if LVM etc is in the
1151 * way this will not notice, but it is better than
1152 * nothing.
1153 */
1154 fstat(*fdlist, &stb);
1155 dev = stb.st_dev;
1156 fstat(fd, &stb);
1157 if (stb.st_rdev == dev) {
1158 pr_err("backup file must NOT be on the array being reshaped.\n");
1159 close(*fdlist);
1160 return 0;
1161 }
1162
1163 memset(buf, 0, 512);
1164 for (i=0; i < blocks + 8 ; i++) {
1165 if (write(*fdlist, buf, 512) != 512) {
1166 pr_err("%s: cannot create backup file %s: %s\n",
1167 devname, backup_file, strerror(errno));
1168 return 0;
1169 }
1170 }
1171 if (fsync(*fdlist) != 0) {
1172 pr_err("%s: cannot create backup file %s: %s\n",
1173 devname, backup_file, strerror(errno));
1174 return 0;
1175 }
1176
1177 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1178 char *bu = make_backup(sys_name);
1179 if (symlink(backup_file, bu))
1180 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1181 strerror(errno));
1182 free(bu);
1183 }
1184
1185 return 1;
1186 }
1187
1188 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1189 unsigned int ndata, unsigned int odata)
1190 {
1191 unsigned long a, b, blocks;
1192 /* So how much do we need to backup.
1193 * We need an amount of data which is both a whole number of
1194 * old stripes and a whole number of new stripes.
1195 * So LCM for (chunksize*datadisks).
1196 */
1197 a = (ochunk/512) * odata;
1198 b = (nchunk/512) * ndata;
1199 /* Find GCD */
1200 a = GCD(a, b);
1201 /* LCM == product / GCD */
1202 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
1203
1204 return blocks;
1205 }
1206
1207 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1208 {
1209 /* Based on the current array state in info->array and
1210 * the changes in info->new_* etc, determine:
1211 * - whether the change is possible
1212 * - Intermediate level/raid_disks/layout
1213 * - whether a restriping reshape is needed
1214 * - number of sectors in minimum change unit. This
1215 * will cover a whole number of stripes in 'before' and
1216 * 'after'.
1217 *
1218 * Return message if the change should be rejected
1219 * NULL if the change can be achieved
1220 *
1221 * This can be called as part of starting a reshape, or
1222 * when assembling an array that is undergoing reshape.
1223 */
1224 int near, far, offset, copies;
1225 int new_disks;
1226 int old_chunk, new_chunk;
1227 /* delta_parity records change in number of devices
1228 * caused by level change
1229 */
1230 int delta_parity = 0;
1231
1232 memset(re, 0, sizeof(*re));
1233
1234 /* If a new level not explicitly given, we assume no-change */
1235 if (info->new_level == UnSet)
1236 info->new_level = info->array.level;
1237
1238 if (info->new_chunk)
1239 switch (info->new_level) {
1240 case 0:
1241 case 4:
1242 case 5:
1243 case 6:
1244 case 10:
1245 /* chunk size is meaningful, must divide component_size
1246 * evenly
1247 */
1248 if (info->component_size % (info->new_chunk/512)) {
1249 unsigned long long shrink = info->component_size;
1250 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1251 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1252 info->new_chunk/1024, info->component_size/2);
1253 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1254 devname, shrink/2);
1255 pr_err("will shrink the array so the given chunk size would work.\n");
1256 return "";
1257 }
1258 break;
1259 default:
1260 return "chunk size not meaningful for this level";
1261 }
1262 else
1263 info->new_chunk = info->array.chunk_size;
1264
1265 switch (info->array.level) {
1266 default:
1267 return "No reshape is possibly for this RAID level";
1268 case LEVEL_LINEAR:
1269 if (info->delta_disks != UnSet)
1270 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1271 else
1272 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1273 case 1:
1274 /* RAID1 can convert to RAID1 with different disks, or
1275 * raid5 with 2 disks, or
1276 * raid0 with 1 disk
1277 */
1278 if (info->new_level > 1 && (info->component_size & 7))
1279 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1280 if (info->new_level == 0) {
1281 if (info->delta_disks != UnSet &&
1282 info->delta_disks != 0)
1283 return "Cannot change number of disks with RAID1->RAID0 conversion";
1284 re->level = 0;
1285 re->before.data_disks = 1;
1286 re->after.data_disks = 1;
1287 return NULL;
1288 }
1289 if (info->new_level == 1) {
1290 if (info->delta_disks == UnSet)
1291 /* Don't know what to do */
1292 return "no change requested for Growing RAID1";
1293 re->level = 1;
1294 return NULL;
1295 }
1296 if (info->array.raid_disks != 2 && info->new_level == 5)
1297 return "Can only convert a 2-device array to RAID5";
1298 if (info->array.raid_disks == 2 && info->new_level == 5) {
1299 re->level = 5;
1300 re->before.data_disks = 1;
1301 if (info->delta_disks != UnSet &&
1302 info->delta_disks != 0)
1303 re->after.data_disks = 1 + info->delta_disks;
1304 else
1305 re->after.data_disks = 1;
1306 if (re->after.data_disks < 1)
1307 return "Number of disks too small for RAID5";
1308
1309 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1310 info->array.chunk_size = 65536;
1311 break;
1312 }
1313 /* Could do some multi-stage conversions, but leave that to
1314 * later.
1315 */
1316 return "Impossibly level change request for RAID1";
1317
1318 case 10:
1319 /* RAID10 can be converted from near mode to
1320 * RAID0 by removing some devices.
1321 * It can also be reshaped if the kernel supports
1322 * new_data_offset.
1323 */
1324 switch (info->new_level) {
1325 case 0:
1326 if ((info->array.layout & ~0xff) != 0x100)
1327 return "Cannot Grow RAID10 with far/offset layout";
1328 /* number of devices must be multiple of number of copies */
1329 if (info->array.raid_disks % (info->array.layout & 0xff))
1330 return "RAID10 layout too complex for Grow operation";
1331
1332 new_disks = (info->array.raid_disks
1333 / (info->array.layout & 0xff));
1334 if (info->delta_disks == UnSet)
1335 info->delta_disks = (new_disks
1336 - info->array.raid_disks);
1337
1338 if (info->delta_disks != new_disks - info->array.raid_disks)
1339 return "New number of raid-devices impossible for RAID10";
1340 if (info->new_chunk &&
1341 info->new_chunk != info->array.chunk_size)
1342 return "Cannot change chunk-size with RAID10 Grow";
1343
1344 /* looks good */
1345 re->level = 0;
1346 re->before.data_disks = new_disks;
1347 re->after.data_disks = re->before.data_disks;
1348 return NULL;
1349
1350 case 10:
1351 near = info->array.layout & 0xff;
1352 far = (info->array.layout >> 8) & 0xff;
1353 offset = info->array.layout & 0x10000;
1354 if (far > 1 && !offset)
1355 return "Cannot reshape RAID10 in far-mode";
1356 copies = near * far;
1357
1358 old_chunk = info->array.chunk_size * far;
1359
1360 if (info->new_layout == UnSet)
1361 info->new_layout = info->array.layout;
1362 else {
1363 near = info->new_layout & 0xff;
1364 far = (info->new_layout >> 8) & 0xff;
1365 offset = info->new_layout & 0x10000;
1366 if (far > 1 && !offset)
1367 return "Cannot reshape RAID10 to far-mode";
1368 if (near * far != copies)
1369 return "Cannot change number of copies when reshaping RAID10";
1370 }
1371 if (info->delta_disks == UnSet)
1372 info->delta_disks = 0;
1373 new_disks = (info->array.raid_disks +
1374 info->delta_disks);
1375
1376 new_chunk = info->new_chunk * far;
1377
1378 re->level = 10;
1379 re->before.layout = info->array.layout;
1380 re->before.data_disks = info->array.raid_disks;
1381 re->after.layout = info->new_layout;
1382 re->after.data_disks = new_disks;
1383 /* For RAID10 we don't do backup but do allow reshape,
1384 * so set backup_blocks to INVALID_SECTORS rather than
1385 * zero.
1386 * And there is no need to synchronise stripes on both
1387 * 'old' and 'new'. So the important
1388 * number is the minimum data_offset difference
1389 * which is the larger of (offset copies * chunk).
1390 */
1391 re->backup_blocks = INVALID_SECTORS;
1392 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1393 if (new_disks < re->before.data_disks &&
1394 info->space_after < re->min_offset_change)
1395 /* Reduce component size by one chunk */
1396 re->new_size = (info->component_size -
1397 re->min_offset_change);
1398 else
1399 re->new_size = info->component_size;
1400 re->new_size = re->new_size * new_disks / copies;
1401 return NULL;
1402
1403 default:
1404 return "RAID10 can only be changed to RAID0";
1405 }
1406 case 0:
1407 /* RAID0 can be converted to RAID10, or to RAID456 */
1408 if (info->new_level == 10) {
1409 if (info->new_layout == UnSet &&
1410 info->delta_disks == UnSet) {
1411 /* Assume near=2 layout */
1412 info->new_layout = 0x102;
1413 info->delta_disks = info->array.raid_disks;
1414 }
1415 if (info->new_layout == UnSet) {
1416 int copies = 1 + (info->delta_disks
1417 / info->array.raid_disks);
1418 if (info->array.raid_disks * (copies-1) !=
1419 info->delta_disks)
1420 return "Impossible number of devices for RAID0->RAID10";
1421 info->new_layout = 0x100 + copies;
1422 }
1423 if (info->delta_disks == UnSet) {
1424 int copies = info->new_layout & 0xff;
1425 if (info->new_layout != 0x100 + copies)
1426 return "New layout impossible for RAID0->RAID10";;
1427 info->delta_disks = (copies - 1) *
1428 info->array.raid_disks;
1429 }
1430 if (info->new_chunk &&
1431 info->new_chunk != info->array.chunk_size)
1432 return "Cannot change chunk-size with RAID0->RAID10";
1433 /* looks good */
1434 re->level = 10;
1435 re->before.data_disks = (info->array.raid_disks +
1436 info->delta_disks);
1437 re->after.data_disks = re->before.data_disks;
1438 re->before.layout = info->new_layout;
1439 return NULL;
1440 }
1441
1442 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1443 * a raid4 style layout of the final level.
1444 */
1445 switch (info->new_level) {
1446 case 4:
1447 delta_parity = 1;
1448 case 0:
1449 re->level = 4;
1450 re->before.layout = 0;
1451 break;
1452 case 5:
1453 delta_parity = 1;
1454 re->level = 5;
1455 re->before.layout = ALGORITHM_PARITY_N;
1456 if (info->new_layout == UnSet)
1457 info->new_layout = map_name(r5layout, "default");
1458 break;
1459 case 6:
1460 delta_parity = 2;
1461 re->level = 6;
1462 re->before.layout = ALGORITHM_PARITY_N;
1463 if (info->new_layout == UnSet)
1464 info->new_layout = map_name(r6layout, "default");
1465 break;
1466 default:
1467 return "Impossible level change requested";
1468 }
1469 re->before.data_disks = info->array.raid_disks;
1470 /* determining 'after' layout happens outside this 'switch' */
1471 break;
1472
1473 case 4:
1474 info->array.layout = ALGORITHM_PARITY_N;
1475 case 5:
1476 switch (info->new_level) {
1477 case 0:
1478 delta_parity = -1;
1479 case 4:
1480 re->level = info->array.level;
1481 re->before.data_disks = info->array.raid_disks - 1;
1482 re->before.layout = info->array.layout;
1483 break;
1484 case 5:
1485 re->level = 5;
1486 re->before.data_disks = info->array.raid_disks - 1;
1487 re->before.layout = info->array.layout;
1488 break;
1489 case 6:
1490 delta_parity = 1;
1491 re->level = 6;
1492 re->before.data_disks = info->array.raid_disks - 1;
1493 switch (info->array.layout) {
1494 case ALGORITHM_LEFT_ASYMMETRIC:
1495 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1496 break;
1497 case ALGORITHM_RIGHT_ASYMMETRIC:
1498 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1499 break;
1500 case ALGORITHM_LEFT_SYMMETRIC:
1501 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1502 break;
1503 case ALGORITHM_RIGHT_SYMMETRIC:
1504 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1505 break;
1506 case ALGORITHM_PARITY_0:
1507 re->before.layout = ALGORITHM_PARITY_0_6;
1508 break;
1509 case ALGORITHM_PARITY_N:
1510 re->before.layout = ALGORITHM_PARITY_N_6;
1511 break;
1512 default:
1513 return "Cannot convert an array with this layout";
1514 }
1515 break;
1516 case 1:
1517 if (info->array.raid_disks != 2)
1518 return "Can only convert a 2-device array to RAID1";
1519 if (info->delta_disks != UnSet &&
1520 info->delta_disks != 0)
1521 return "Cannot set raid_disk when converting RAID5->RAID1";
1522 re->level = 1;
1523 info->new_chunk = 0;
1524 return NULL;
1525 default:
1526 return "Impossible level change requested";
1527 }
1528 break;
1529 case 6:
1530 switch (info->new_level) {
1531 case 4:
1532 case 5:
1533 delta_parity = -1;
1534 case 6:
1535 re->level = 6;
1536 re->before.data_disks = info->array.raid_disks - 2;
1537 re->before.layout = info->array.layout;
1538 break;
1539 default:
1540 return "Impossible level change requested";
1541 }
1542 break;
1543 }
1544
1545 /* If we reached here then it looks like a re-stripe is
1546 * happening. We have determined the intermediate level
1547 * and initial raid_disks/layout and stored these in 're'.
1548 *
1549 * We need to deduce the final layout that can be atomically
1550 * converted to the end state.
1551 */
1552 switch (info->new_level) {
1553 case 0:
1554 /* We can only get to RAID0 from RAID4 or RAID5
1555 * with appropriate layout and one extra device
1556 */
1557 if (re->level != 4 && re->level != 5)
1558 return "Cannot covert to RAID0 from this level";
1559
1560 switch (re->level) {
1561 case 4:
1562 re->before.layout = 0;
1563 re->after.layout = 0;
1564 break;
1565 case 5:
1566 re->after.layout = ALGORITHM_PARITY_N;
1567 break;
1568 }
1569 break;
1570
1571 case 4:
1572 /* We can only get to RAID4 from RAID5 */
1573 if (re->level != 4 && re->level != 5)
1574 return "Cannot convert to RAID4 from this level";
1575
1576 switch (re->level) {
1577 case 4:
1578 re->after.layout = 0;
1579 break;
1580 case 5:
1581 re->after.layout = ALGORITHM_PARITY_N;
1582 break;
1583 }
1584 break;
1585
1586 case 5:
1587 /* We get to RAID5 from RAID5 or RAID6 */
1588 if (re->level != 5 && re->level != 6)
1589 return "Cannot convert to RAID5 from this level";
1590
1591 switch (re->level) {
1592 case 5:
1593 if (info->new_layout == UnSet)
1594 re->after.layout = re->before.layout;
1595 else
1596 re->after.layout = info->new_layout;
1597 break;
1598 case 6:
1599 if (info->new_layout == UnSet)
1600 info->new_layout = re->before.layout;
1601
1602 /* after.layout needs to be raid6 version of new_layout */
1603 if (info->new_layout == ALGORITHM_PARITY_N)
1604 re->after.layout = ALGORITHM_PARITY_N;
1605 else {
1606 char layout[40];
1607 char *ls = map_num(r5layout, info->new_layout);
1608 int l;
1609 if (ls) {
1610 /* Current RAID6 layout has a RAID5
1611 * equivalent - good
1612 */
1613 strcat(strcpy(layout, ls), "-6");
1614 l = map_name(r6layout, layout);
1615 if (l == UnSet)
1616 return "Cannot find RAID6 layout to convert to";
1617 } else {
1618 /* Current RAID6 has no equivalent.
1619 * If it is already a '-6' layout we
1620 * can leave it unchanged, else we must
1621 * fail
1622 */
1623 ls = map_num(r6layout, info->new_layout);
1624 if (!ls ||
1625 strcmp(ls+strlen(ls)-2, "-6") != 0)
1626 return "Please specify new layout";
1627 l = info->new_layout;
1628 }
1629 re->after.layout = l;
1630 }
1631 }
1632 break;
1633
1634 case 6:
1635 /* We must already be at level 6 */
1636 if (re->level != 6)
1637 return "Impossible level change";
1638 if (info->new_layout == UnSet)
1639 re->after.layout = info->array.layout;
1640 else
1641 re->after.layout = info->new_layout;
1642 break;
1643 default:
1644 return "Impossible level change requested";
1645 }
1646 if (info->delta_disks == UnSet)
1647 info->delta_disks = delta_parity;
1648
1649 re->after.data_disks =
1650 (re->before.data_disks + info->delta_disks - delta_parity);
1651
1652 switch (re->level) {
1653 case 6:
1654 re->parity = 2;
1655 break;
1656 case 4:
1657 case 5:
1658 re->parity = 1;
1659 break;
1660 default:
1661 re->parity = 0;
1662 break;
1663 }
1664 /* So we have a restripe operation, we need to calculate the number
1665 * of blocks per reshape operation.
1666 */
1667 re->new_size = info->component_size * re->before.data_disks;
1668 if (info->new_chunk == 0)
1669 info->new_chunk = info->array.chunk_size;
1670 if (re->after.data_disks == re->before.data_disks &&
1671 re->after.layout == re->before.layout &&
1672 info->new_chunk == info->array.chunk_size) {
1673 /* Nothing to change, can change level immediately. */
1674 re->level = info->new_level;
1675 re->backup_blocks = 0;
1676 return NULL;
1677 }
1678 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1679 /* chunk and layout changes make no difference */
1680 re->level = info->new_level;
1681 re->backup_blocks = 0;
1682 return NULL;
1683 }
1684
1685 if (re->after.data_disks == re->before.data_disks &&
1686 get_linux_version() < 2006032)
1687 return "in-place reshape is not safe before 2.6.32 - sorry.";
1688
1689 if (re->after.data_disks < re->before.data_disks &&
1690 get_linux_version() < 2006030)
1691 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1692
1693 re->backup_blocks = compute_backup_blocks(
1694 info->new_chunk, info->array.chunk_size,
1695 re->after.data_disks,
1696 re->before.data_disks);
1697 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1698
1699 re->new_size = info->component_size * re->after.data_disks;
1700 return NULL;
1701 }
1702
1703 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1704 char *text_version)
1705 {
1706 struct mdinfo *info;
1707 char *subarray;
1708 int ret_val = -1;
1709
1710 if ((st == NULL) || (sra == NULL))
1711 return ret_val;
1712
1713 if (text_version == NULL)
1714 text_version = sra->text_version;
1715 subarray = strchr(text_version + 1, '/')+1;
1716 info = st->ss->container_content(st, subarray);
1717 if (info) {
1718 unsigned long long current_size = 0;
1719 unsigned long long new_size =
1720 info->custom_array_size/2;
1721
1722 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1723 new_size > current_size) {
1724 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1725 < 0)
1726 dprintf("Error: Cannot set array size");
1727 else {
1728 ret_val = 0;
1729 dprintf("Array size changed");
1730 }
1731 dprintf_cont(" from %llu to %llu.\n",
1732 current_size, new_size);
1733 }
1734 sysfs_free(info);
1735 } else
1736 dprintf("Error: set_array_size(): info pointer in NULL\n");
1737
1738 return ret_val;
1739 }
1740
1741 static int reshape_array(char *container, int fd, char *devname,
1742 struct supertype *st, struct mdinfo *info,
1743 int force, struct mddev_dev *devlist,
1744 unsigned long long data_offset,
1745 char *backup_file, int verbose, int forked,
1746 int restart, int freeze_reshape);
1747 static int reshape_container(char *container, char *devname,
1748 int mdfd,
1749 struct supertype *st,
1750 struct mdinfo *info,
1751 int force,
1752 char *backup_file, int verbose,
1753 int forked, int restart, int freeze_reshape);
1754
1755 int Grow_reshape(char *devname, int fd,
1756 struct mddev_dev *devlist,
1757 unsigned long long data_offset,
1758 struct context *c, struct shape *s)
1759 {
1760 /* Make some changes in the shape of an array.
1761 * The kernel must support the change.
1762 *
1763 * There are three different changes. Each can trigger
1764 * a resync or recovery so we freeze that until we have
1765 * requested everything (if kernel supports freezing - 2.6.30).
1766 * The steps are:
1767 * - change size (i.e. component_size)
1768 * - change level
1769 * - change layout/chunksize/ndisks
1770 *
1771 * The last can require a reshape. It is different on different
1772 * levels so we need to check the level before actioning it.
1773 * Some times the level change needs to be requested after the
1774 * reshape (e.g. raid6->raid5, raid5->raid0)
1775 *
1776 */
1777 struct mdu_array_info_s array;
1778 int rv = 0;
1779 struct supertype *st;
1780 char *subarray = NULL;
1781
1782 int frozen;
1783 int changed = 0;
1784 char *container = NULL;
1785 int cfd = -1;
1786
1787 struct mddev_dev *dv;
1788 int added_disks;
1789
1790 struct mdinfo info;
1791 struct mdinfo *sra;
1792
1793 if (md_get_array_info(fd, &array) < 0) {
1794 pr_err("%s is not an active md array - aborting\n",
1795 devname);
1796 return 1;
1797 }
1798 if (data_offset != INVALID_SECTORS && array.level != 10 &&
1799 (array.level < 4 || array.level > 6)) {
1800 pr_err("--grow --data-offset not yet supported\n");
1801 return 1;
1802 }
1803
1804 if (s->size > 0 &&
1805 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1806 pr_err("cannot change component size at the same time as other changes.\n"
1807 " Change size first, then check data is intact before making other changes.\n");
1808 return 1;
1809 }
1810
1811 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1812 array.level > 1 && get_linux_version() < 2006032 &&
1813 !check_env("MDADM_FORCE_FEWER")) {
1814 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1815 " Please use a newer kernel\n");
1816 return 1;
1817 }
1818
1819 st = super_by_fd(fd, &subarray);
1820 if (!st) {
1821 pr_err("Unable to determine metadata format for %s\n", devname);
1822 return 1;
1823 }
1824 if (s->raiddisks > st->max_devs) {
1825 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1826 return 1;
1827 }
1828 if (s->level == 0 &&
1829 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1830 !(array.state & (1<<MD_SB_CLUSTERED))) {
1831 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1832 if (md_set_array_info(fd, &array)!= 0) {
1833 pr_err("failed to remove internal bitmap.\n");
1834 return 1;
1835 }
1836 }
1837
1838 /* in the external case we need to check that the requested reshape is
1839 * supported, and perform an initial check that the container holds the
1840 * pre-requisite spare devices (mdmon owns final validation)
1841 */
1842 if (st->ss->external) {
1843 int retval;
1844
1845 if (subarray) {
1846 container = st->container_devnm;
1847 cfd = open_dev_excl(st->container_devnm);
1848 } else {
1849 container = st->devnm;
1850 close(fd);
1851 cfd = open_dev_excl(st->devnm);
1852 fd = cfd;
1853 }
1854 if (cfd < 0) {
1855 pr_err("Unable to open container for %s\n",
1856 devname);
1857 free(subarray);
1858 return 1;
1859 }
1860
1861 retval = st->ss->load_container(st, cfd, NULL);
1862
1863 if (retval) {
1864 pr_err("Cannot read superblock for %s\n",
1865 devname);
1866 free(subarray);
1867 return 1;
1868 }
1869
1870 /* check if operation is supported for metadata handler */
1871 if (st->ss->container_content) {
1872 struct mdinfo *cc = NULL;
1873 struct mdinfo *content = NULL;
1874
1875 cc = st->ss->container_content(st, subarray);
1876 for (content = cc; content ; content = content->next) {
1877 int allow_reshape = 1;
1878
1879 /* check if reshape is allowed based on metadata
1880 * indications stored in content.array.status
1881 */
1882 if (content->array.state &
1883 (1 << MD_SB_BLOCK_VOLUME))
1884 allow_reshape = 0;
1885 if (content->array.state &
1886 (1 << MD_SB_BLOCK_CONTAINER_RESHAPE))
1887 allow_reshape = 0;
1888 if (!allow_reshape) {
1889 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1890 devname, container);
1891 sysfs_free(cc);
1892 free(subarray);
1893 return 1;
1894 }
1895 if (content->consistency_policy ==
1896 CONSISTENCY_POLICY_PPL) {
1897 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1898 sysfs_free(cc);
1899 free(subarray);
1900 return 1;
1901 }
1902 }
1903 sysfs_free(cc);
1904 }
1905 if (mdmon_running(container))
1906 st->update_tail = &st->updates;
1907 }
1908
1909 added_disks = 0;
1910 for (dv = devlist; dv; dv = dv->next)
1911 added_disks++;
1912 if (s->raiddisks > array.raid_disks &&
1913 array.spare_disks + added_disks < (s->raiddisks - array.raid_disks) &&
1914 !c->force) {
1915 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1916 " Use --force to over-ride this check.\n",
1917 s->raiddisks - array.raid_disks,
1918 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1919 array.spare_disks + added_disks);
1920 return 1;
1921 }
1922
1923 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1924 GET_STATE | GET_VERSION);
1925 if (sra) {
1926 if (st->ss->external && subarray == NULL) {
1927 array.level = LEVEL_CONTAINER;
1928 sra->array.level = LEVEL_CONTAINER;
1929 }
1930 } else {
1931 pr_err("failed to read sysfs parameters for %s\n",
1932 devname);
1933 return 1;
1934 }
1935 frozen = freeze(st);
1936 if (frozen < -1) {
1937 /* freeze() already spewed the reason */
1938 sysfs_free(sra);
1939 return 1;
1940 } else if (frozen < 0) {
1941 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1942 sysfs_free(sra);
1943 return 1;
1944 }
1945
1946 /* ========= set size =============== */
1947 if (s->size > 0 &&
1948 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1949 unsigned long long orig_size = get_component_size(fd)/2;
1950 unsigned long long min_csize;
1951 struct mdinfo *mdi;
1952 int raid0_takeover = 0;
1953
1954 if (orig_size == 0)
1955 orig_size = (unsigned) array.size;
1956
1957 if (orig_size == 0) {
1958 pr_err("Cannot set device size in this type of array.\n");
1959 rv = 1;
1960 goto release;
1961 }
1962
1963 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1964 devname, APPLY_METADATA_CHANGES,
1965 c->verbose > 0)) {
1966 rv = 1;
1967 goto release;
1968 }
1969 sync_metadata(st);
1970 if (st->ss->external) {
1971 /* metadata can have size limitation
1972 * update size value according to metadata information
1973 */
1974 struct mdinfo *sizeinfo =
1975 st->ss->container_content(st, subarray);
1976 if (sizeinfo) {
1977 unsigned long long new_size =
1978 sizeinfo->custom_array_size/2;
1979 int data_disks = get_data_disks(
1980 sizeinfo->array.level,
1981 sizeinfo->array.layout,
1982 sizeinfo->array.raid_disks);
1983 new_size /= data_disks;
1984 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
1985 orig_size, new_size,
1986 new_size * data_disks);
1987 s->size = new_size;
1988 sysfs_free(sizeinfo);
1989 }
1990 }
1991
1992 /* Update the size of each member device in case
1993 * they have been resized. This will never reduce
1994 * below the current used-size. The "size" attribute
1995 * understands '0' to mean 'max'.
1996 */
1997 min_csize = 0;
1998 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1999 sysfs_set_num(sra, mdi, "size", s->size == MAX_SIZE ? 0
2000 : s->size);
2001 if (array.not_persistent == 0 &&
2002 array.major_version == 0 &&
2003 get_linux_version() < 3001000) {
2004 /* Dangerous to allow size to exceed 2TB */
2005 unsigned long long csize;
2006 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
2007 if (csize >= 2ULL*1024*1024*1024)
2008 csize = 2ULL*1024*1024*1024;
2009 if ((min_csize == 0 || (min_csize
2010 > csize)))
2011 min_csize = csize;
2012 }
2013 }
2014 }
2015 if (min_csize && s->size > min_csize) {
2016 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2017 rv = 1;
2018 goto size_change_error;
2019 }
2020 if (min_csize && s->size == MAX_SIZE) {
2021 /* Don't let the kernel choose a size - it will get
2022 * it wrong
2023 */
2024 pr_err("Limited v0.90 array to 2TB per device\n");
2025 s->size = min_csize;
2026 }
2027 if (st->ss->external) {
2028 if (sra->array.level == 0) {
2029 rv = sysfs_set_str(sra, NULL, "level",
2030 "raid5");
2031 if (!rv) {
2032 raid0_takeover = 1;
2033 /* get array parameters after takeover
2034 * to change one parameter at time only
2035 */
2036 rv = md_get_array_info(fd, &array);
2037 }
2038 }
2039 /* make sure mdmon is
2040 * aware of the new level */
2041 if (!mdmon_running(st->container_devnm))
2042 start_mdmon(st->container_devnm);
2043 ping_monitor(container);
2044 if (mdmon_running(st->container_devnm) &&
2045 st->update_tail == NULL)
2046 st->update_tail = &st->updates;
2047 }
2048
2049 if (s->size == MAX_SIZE)
2050 s->size = 0;
2051 array.size = s->size;
2052 if (s->size & ~INT32_MAX) {
2053 /* got truncated to 32bit, write to
2054 * component_size instead
2055 */
2056 if (sra)
2057 rv = sysfs_set_num(sra, NULL,
2058 "component_size", s->size);
2059 else
2060 rv = -1;
2061 } else {
2062 rv = md_set_array_info(fd, &array);
2063
2064 /* manage array size when it is managed externally
2065 */
2066 if ((rv == 0) && st->ss->external)
2067 rv = set_array_size(st, sra, sra->text_version);
2068 }
2069
2070 if (raid0_takeover) {
2071 /* do not recync non-existing parity,
2072 * we will drop it anyway
2073 */
2074 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2075 /* go back to raid0, drop parity disk
2076 */
2077 sysfs_set_str(sra, NULL, "level", "raid0");
2078 md_get_array_info(fd, &array);
2079 }
2080
2081 size_change_error:
2082 if (rv != 0) {
2083 int err = errno;
2084
2085 /* restore metadata */
2086 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2087 UnSet, NULL, devname,
2088 ROLLBACK_METADATA_CHANGES,
2089 c->verbose) == 0)
2090 sync_metadata(st);
2091 pr_err("Cannot set device size for %s: %s\n",
2092 devname, strerror(err));
2093 if (err == EBUSY &&
2094 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2095 cont_err("Bitmap must be removed before size can be changed\n");
2096 rv = 1;
2097 goto release;
2098 }
2099 if (s->assume_clean) {
2100 /* This will fail on kernels older than 3.0 unless
2101 * a backport has been arranged.
2102 */
2103 if (sra == NULL ||
2104 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
2105 pr_err("--assume-clean not supported with --grow on this kernel\n");
2106 }
2107 md_get_array_info(fd, &array);
2108 s->size = get_component_size(fd)/2;
2109 if (s->size == 0)
2110 s->size = array.size;
2111 if (c->verbose >= 0) {
2112 if (s->size == orig_size)
2113 pr_err("component size of %s unchanged at %lluK\n",
2114 devname, s->size);
2115 else
2116 pr_err("component size of %s has been set to %lluK\n",
2117 devname, s->size);
2118 }
2119 changed = 1;
2120 } else if (array.level != LEVEL_CONTAINER) {
2121 s->size = get_component_size(fd)/2;
2122 if (s->size == 0)
2123 s->size = array.size;
2124 }
2125
2126 /* See if there is anything else to do */
2127 if ((s->level == UnSet || s->level == array.level) &&
2128 (s->layout_str == NULL) &&
2129 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2130 data_offset == INVALID_SECTORS &&
2131 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2132 /* Nothing more to do */
2133 if (!changed && c->verbose >= 0)
2134 pr_err("%s: no change requested\n",
2135 devname);
2136 goto release;
2137 }
2138
2139 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2140 * current implementation assumes that following conditions must be met:
2141 * - RAID10:
2142 * - far_copies == 1
2143 * - near_copies == 2
2144 */
2145 if ((s->level == 0 && array.level == 10 && sra &&
2146 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2147 (s->level == 0 && array.level == 1 && sra)) {
2148 int err;
2149 err = remove_disks_for_takeover(st, sra, array.layout);
2150 if (err) {
2151 dprintf("Array cannot be reshaped\n");
2152 if (cfd > -1)
2153 close(cfd);
2154 rv = 1;
2155 goto release;
2156 }
2157 /* Make sure mdmon has seen the device removal
2158 * and updated metadata before we continue with
2159 * level change
2160 */
2161 if (container)
2162 ping_monitor(container);
2163 }
2164
2165 memset(&info, 0, sizeof(info));
2166 info.array = array;
2167 if (sysfs_init(&info, fd, NULL)) {
2168 pr_err("failed to intialize sysfs.\n");
2169 rv = 1;
2170 goto release;
2171 }
2172 strcpy(info.text_version, sra->text_version);
2173 info.component_size = s->size*2;
2174 info.new_level = s->level;
2175 info.new_chunk = s->chunk * 1024;
2176 if (info.array.level == LEVEL_CONTAINER) {
2177 info.delta_disks = UnSet;
2178 info.array.raid_disks = s->raiddisks;
2179 } else if (s->raiddisks)
2180 info.delta_disks = s->raiddisks - info.array.raid_disks;
2181 else
2182 info.delta_disks = UnSet;
2183 if (s->layout_str == NULL) {
2184 info.new_layout = UnSet;
2185 if (info.array.level == 6 &&
2186 (info.new_level == 6 || info.new_level == UnSet) &&
2187 info.array.layout >= 16) {
2188 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2189 cont_err("during the reshape, please specify --layout=preserve\n");
2190 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2191 rv = 1;
2192 goto release;
2193 }
2194 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2195 strcmp(s->layout_str, "normalize") == 0) {
2196 /* If we have a -6 RAID6 layout, remove the '-6'. */
2197 info.new_layout = UnSet;
2198 if (info.array.level == 6 && info.new_level == UnSet) {
2199 char l[40], *h;
2200 strcpy(l, map_num(r6layout, info.array.layout));
2201 h = strrchr(l, '-');
2202 if (h && strcmp(h, "-6") == 0) {
2203 *h = 0;
2204 info.new_layout = map_name(r6layout, l);
2205 }
2206 } else {
2207 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2208 rv = 1;
2209 goto release;
2210 }
2211 } else if (strcmp(s->layout_str, "preserve") == 0) {
2212 /* This means that a non-standard RAID6 layout
2213 * is OK.
2214 * In particular:
2215 * - When reshape a RAID6 (e.g. adding a device)
2216 * which is in a non-standard layout, it is OK
2217 * to preserve that layout.
2218 * - When converting a RAID5 to RAID6, leave it in
2219 * the XXX-6 layout, don't re-layout.
2220 */
2221 if (info.array.level == 6 && info.new_level == UnSet)
2222 info.new_layout = info.array.layout;
2223 else if (info.array.level == 5 && info.new_level == 6) {
2224 char l[40];
2225 strcpy(l, map_num(r5layout, info.array.layout));
2226 strcat(l, "-6");
2227 info.new_layout = map_name(r6layout, l);
2228 } else {
2229 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2230 rv = 1;
2231 goto release;
2232 }
2233 } else {
2234 int l = info.new_level;
2235 if (l == UnSet)
2236 l = info.array.level;
2237 switch (l) {
2238 case 5:
2239 info.new_layout = map_name(r5layout, s->layout_str);
2240 break;
2241 case 6:
2242 info.new_layout = map_name(r6layout, s->layout_str);
2243 break;
2244 case 10:
2245 info.new_layout = parse_layout_10(s->layout_str);
2246 break;
2247 case LEVEL_FAULTY:
2248 info.new_layout = parse_layout_faulty(s->layout_str);
2249 break;
2250 default:
2251 pr_err("layout not meaningful with this level\n");
2252 rv = 1;
2253 goto release;
2254 }
2255 if (info.new_layout == UnSet) {
2256 pr_err("layout %s not understood for this level\n",
2257 s->layout_str);
2258 rv = 1;
2259 goto release;
2260 }
2261 }
2262
2263 if (array.level == LEVEL_FAULTY) {
2264 if (s->level != UnSet && s->level != array.level) {
2265 pr_err("cannot change level of Faulty device\n");
2266 rv =1 ;
2267 }
2268 if (s->chunk) {
2269 pr_err("cannot set chunksize of Faulty device\n");
2270 rv =1 ;
2271 }
2272 if (s->raiddisks && s->raiddisks != 1) {
2273 pr_err("cannot set raid_disks of Faulty device\n");
2274 rv =1 ;
2275 }
2276 if (s->layout_str) {
2277 if (md_get_array_info(fd, &array) != 0) {
2278 dprintf("Cannot get array information.\n");
2279 goto release;
2280 }
2281 array.layout = info.new_layout;
2282 if (md_set_array_info(fd, &array) != 0) {
2283 pr_err("failed to set new layout\n");
2284 rv = 1;
2285 } else if (c->verbose >= 0)
2286 printf("layout for %s set to %d\n",
2287 devname, array.layout);
2288 }
2289 } else if (array.level == LEVEL_CONTAINER) {
2290 /* This change is to be applied to every array in the
2291 * container. This is only needed when the metadata imposes
2292 * restraints of the various arrays in the container.
2293 * Currently we only know that IMSM requires all arrays
2294 * to have the same number of devices so changing the
2295 * number of devices (On-Line Capacity Expansion) must be
2296 * performed at the level of the container
2297 */
2298 if (fd > 0) {
2299 close(fd);
2300 fd = -1;
2301 }
2302 rv = reshape_container(container, devname, -1, st, &info,
2303 c->force, c->backup_file, c->verbose, 0, 0, 0);
2304 frozen = 0;
2305 } else {
2306 /* get spare devices from external metadata
2307 */
2308 if (st->ss->external) {
2309 struct mdinfo *info2;
2310
2311 info2 = st->ss->container_content(st, subarray);
2312 if (info2) {
2313 info.array.spare_disks =
2314 info2->array.spare_disks;
2315 sysfs_free(info2);
2316 }
2317 }
2318
2319 /* Impose these changes on a single array. First
2320 * check that the metadata is OK with the change. */
2321
2322 if (reshape_super(st, 0, info.new_level,
2323 info.new_layout, info.new_chunk,
2324 info.array.raid_disks, info.delta_disks,
2325 c->backup_file, devname, APPLY_METADATA_CHANGES,
2326 c->verbose)) {
2327 rv = 1;
2328 goto release;
2329 }
2330 sync_metadata(st);
2331 rv = reshape_array(container, fd, devname, st, &info, c->force,
2332 devlist, data_offset, c->backup_file, c->verbose,
2333 0, 0, 0);
2334 frozen = 0;
2335 }
2336 release:
2337 sysfs_free(sra);
2338 if (frozen > 0)
2339 unfreeze(st);
2340 return rv;
2341 }
2342
2343 /* verify_reshape_position()
2344 * Function checks if reshape position in metadata is not farther
2345 * than position in md.
2346 * Return value:
2347 * 0 : not valid sysfs entry
2348 * it can be caused by not started reshape, it should be started
2349 * by reshape array or raid0 array is before takeover
2350 * -1 : error, reshape position is obviously wrong
2351 * 1 : success, reshape progress correct or updated
2352 */
2353 static int verify_reshape_position(struct mdinfo *info, int level)
2354 {
2355 int ret_val = 0;
2356 char buf[40];
2357 int rv;
2358
2359 /* read sync_max, failure can mean raid0 array */
2360 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2361
2362 if (rv > 0) {
2363 char *ep;
2364 unsigned long long position = strtoull(buf, &ep, 0);
2365
2366 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2367 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2368 position *= get_data_disks(level,
2369 info->new_layout,
2370 info->array.raid_disks);
2371 if (info->reshape_progress < position) {
2372 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2373 info->reshape_progress, position);
2374 info->reshape_progress = position;
2375 ret_val = 1;
2376 } else if (info->reshape_progress > position) {
2377 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2378 position, info->reshape_progress);
2379 ret_val = -1;
2380 } else {
2381 dprintf("Reshape position in md and metadata are the same;");
2382 ret_val = 1;
2383 }
2384 }
2385 } else if (rv == 0) {
2386 /* for valid sysfs entry, 0-length content
2387 * should be indicated as error
2388 */
2389 ret_val = -1;
2390 }
2391
2392 return ret_val;
2393 }
2394
2395 static unsigned long long choose_offset(unsigned long long lo,
2396 unsigned long long hi,
2397 unsigned long long min,
2398 unsigned long long max)
2399 {
2400 /* Choose a new offset between hi and lo.
2401 * It must be between min and max, but
2402 * we would prefer something near the middle of hi/lo, and also
2403 * prefer to be aligned to a big power of 2.
2404 *
2405 * So we start with the middle, then for each bit,
2406 * starting at '1' and increasing, if it is set, we either
2407 * add it or subtract it if possible, preferring the option
2408 * which is furthest from the boundary.
2409 *
2410 * We stop once we get a 1MB alignment. As units are in sectors,
2411 * 1MB = 2*1024 sectors.
2412 */
2413 unsigned long long choice = (lo + hi) / 2;
2414 unsigned long long bit = 1;
2415
2416 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2417 unsigned long long bigger, smaller;
2418 if (! (bit & choice))
2419 continue;
2420 bigger = choice + bit;
2421 smaller = choice - bit;
2422 if (bigger > max && smaller < min)
2423 break;
2424 if (bigger > max)
2425 choice = smaller;
2426 else if (smaller < min)
2427 choice = bigger;
2428 else if (hi - bigger > smaller - lo)
2429 choice = bigger;
2430 else
2431 choice = smaller;
2432 }
2433 return choice;
2434 }
2435
2436 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2437 char *devname, int delta_disks,
2438 unsigned long long data_offset,
2439 unsigned long long min,
2440 int can_fallback)
2441 {
2442 struct mdinfo *sd;
2443 int dir = 0;
2444 int err = 0;
2445 unsigned long long before, after;
2446
2447 /* Need to find min space before and after so same is used
2448 * on all devices
2449 */
2450 before = UINT64_MAX;
2451 after = UINT64_MAX;
2452 for (sd = sra->devs; sd; sd = sd->next) {
2453 char *dn;
2454 int dfd;
2455 int rv;
2456 struct supertype *st2;
2457 struct mdinfo info2;
2458
2459 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2460 continue;
2461 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2462 dfd = dev_open(dn, O_RDONLY);
2463 if (dfd < 0) {
2464 pr_err("%s: cannot open component %s\n",
2465 devname, dn ? dn : "-unknown-");
2466 goto release;
2467 }
2468 st2 = dup_super(st);
2469 rv = st2->ss->load_super(st2,dfd, NULL);
2470 close(dfd);
2471 if (rv) {
2472 free(st2);
2473 pr_err("%s: cannot get superblock from %s\n",
2474 devname, dn);
2475 goto release;
2476 }
2477 st2->ss->getinfo_super(st2, &info2, NULL);
2478 st2->ss->free_super(st2);
2479 free(st2);
2480 if (info2.space_before == 0 &&
2481 info2.space_after == 0) {
2482 /* Metadata doesn't support data_offset changes */
2483 if (!can_fallback)
2484 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2485 devname);
2486 goto fallback;
2487 }
2488 if (before > info2.space_before)
2489 before = info2.space_before;
2490 if (after > info2.space_after)
2491 after = info2.space_after;
2492
2493 if (data_offset != INVALID_SECTORS) {
2494 if (dir == 0) {
2495 if (info2.data_offset == data_offset) {
2496 pr_err("%s: already has that data_offset\n",
2497 dn);
2498 goto release;
2499 }
2500 if (data_offset < info2.data_offset)
2501 dir = -1;
2502 else
2503 dir = 1;
2504 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2505 (data_offset >= info2.data_offset && dir == -1)) {
2506 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2507 dn);
2508 goto release;
2509 }
2510 }
2511 }
2512 if (before == UINT64_MAX)
2513 /* impossible really, there must be no devices */
2514 return 1;
2515
2516 for (sd = sra->devs; sd; sd = sd->next) {
2517 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2518 unsigned long long new_data_offset;
2519
2520 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2521 continue;
2522 if (delta_disks < 0) {
2523 /* Don't need any space as array is shrinking
2524 * just move data_offset up by min
2525 */
2526 if (data_offset == INVALID_SECTORS)
2527 new_data_offset = sd->data_offset + min;
2528 else {
2529 if (data_offset < sd->data_offset + min) {
2530 pr_err("--data-offset too small for %s\n",
2531 dn);
2532 goto release;
2533 }
2534 new_data_offset = data_offset;
2535 }
2536 } else if (delta_disks > 0) {
2537 /* need space before */
2538 if (before < min) {
2539 if (can_fallback)
2540 goto fallback;
2541 pr_err("Insufficient head-space for reshape on %s\n",
2542 dn);
2543 goto release;
2544 }
2545 if (data_offset == INVALID_SECTORS)
2546 new_data_offset = sd->data_offset - min;
2547 else {
2548 if (data_offset > sd->data_offset - min) {
2549 pr_err("--data-offset too large for %s\n",
2550 dn);
2551 goto release;
2552 }
2553 new_data_offset = data_offset;
2554 }
2555 } else {
2556 if (dir == 0) {
2557 /* can move up or down. If 'data_offset'
2558 * was set we would have already decided,
2559 * so just choose direction with most space.
2560 */
2561 if (before > after)
2562 dir = -1;
2563 else
2564 dir = 1;
2565 }
2566 sysfs_set_str(sra, NULL, "reshape_direction",
2567 dir == 1 ? "backwards" : "forwards");
2568 if (dir > 0) {
2569 /* Increase data offset */
2570 if (after < min) {
2571 if (can_fallback)
2572 goto fallback;
2573 pr_err("Insufficient tail-space for reshape on %s\n",
2574 dn);
2575 goto release;
2576 }
2577 if (data_offset != INVALID_SECTORS &&
2578 data_offset < sd->data_offset + min) {
2579 pr_err("--data-offset too small on %s\n",
2580 dn);
2581 goto release;
2582 }
2583 if (data_offset != INVALID_SECTORS)
2584 new_data_offset = data_offset;
2585 else
2586 new_data_offset = choose_offset(sd->data_offset,
2587 sd->data_offset + after,
2588 sd->data_offset + min,
2589 sd->data_offset + after);
2590 } else {
2591 /* Decrease data offset */
2592 if (before < min) {
2593 if (can_fallback)
2594 goto fallback;
2595 pr_err("insufficient head-room on %s\n",
2596 dn);
2597 goto release;
2598 }
2599 if (data_offset != INVALID_SECTORS &&
2600 data_offset < sd->data_offset - min) {
2601 pr_err("--data-offset too small on %s\n",
2602 dn);
2603 goto release;
2604 }
2605 if (data_offset != INVALID_SECTORS)
2606 new_data_offset = data_offset;
2607 else
2608 new_data_offset = choose_offset(sd->data_offset - before,
2609 sd->data_offset,
2610 sd->data_offset - before,
2611 sd->data_offset - min);
2612 }
2613 }
2614 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2615 if (err < 0 && errno == E2BIG) {
2616 /* try again after increasing data size to max */
2617 err = sysfs_set_num(sra, sd, "size", 0);
2618 if (err < 0 && errno == EINVAL &&
2619 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2620 /* some kernels have a bug where you cannot
2621 * use '0' on spare devices. */
2622 sysfs_set_num(sra, sd, "size",
2623 (sra->component_size + after)/2);
2624 }
2625 err = sysfs_set_num(sra, sd, "new_offset",
2626 new_data_offset);
2627 }
2628 if (err < 0) {
2629 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2630 pr_err("data-offset is too big for %s\n",
2631 dn);
2632 goto release;
2633 }
2634 if (sd == sra->devs &&
2635 (errno == ENOENT || errno == E2BIG))
2636 /* Early kernel, no 'new_offset' file,
2637 * or kernel doesn't like us.
2638 * For RAID5/6 this is not fatal
2639 */
2640 return 1;
2641 pr_err("Cannot set new_offset for %s\n",
2642 dn);
2643 break;
2644 }
2645 }
2646 return err;
2647 release:
2648 return -1;
2649 fallback:
2650 /* Just use a backup file */
2651 return 1;
2652 }
2653
2654 static int raid10_reshape(char *container, int fd, char *devname,
2655 struct supertype *st, struct mdinfo *info,
2656 struct reshape *reshape,
2657 unsigned long long data_offset,
2658 int force, int verbose)
2659 {
2660 /* Changing raid_disks, layout, chunksize or possibly
2661 * just data_offset for a RAID10.
2662 * We must always change data_offset. We change by at least
2663 * ->min_offset_change which is the largest of the old and new
2664 * chunk sizes.
2665 * If raid_disks is increasing, then data_offset must decrease
2666 * by at least this copy size.
2667 * If raid_disks is unchanged, data_offset must increase or
2668 * decrease by at least min_offset_change but preferably by much more.
2669 * We choose half of the available space.
2670 * If raid_disks is decreasing, data_offset must increase by
2671 * at least min_offset_change. To allow of this, component_size
2672 * must be decreased by the same amount.
2673 *
2674 * So we calculate the required minimum and direction, possibly
2675 * reduce the component_size, then iterate through the devices
2676 * and set the new_data_offset.
2677 * If that all works, we set chunk_size, layout, raid_disks, and start
2678 * 'reshape'
2679 */
2680 struct mdinfo *sra;
2681 unsigned long long min;
2682 int err = 0;
2683
2684 sra = sysfs_read(fd, NULL,
2685 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2686 );
2687 if (!sra) {
2688 pr_err("%s: Cannot get array details from sysfs\n",
2689 devname);
2690 goto release;
2691 }
2692 min = reshape->min_offset_change;
2693
2694 if (info->delta_disks)
2695 sysfs_set_str(sra, NULL, "reshape_direction",
2696 info->delta_disks < 0 ? "backwards" : "forwards");
2697 if (info->delta_disks < 0 &&
2698 info->space_after < min) {
2699 int rv = sysfs_set_num(sra, NULL, "component_size",
2700 (sra->component_size -
2701 min)/2);
2702 if (rv) {
2703 pr_err("cannot reduce component size\n");
2704 goto release;
2705 }
2706 }
2707 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2708 min, 0);
2709 if (err == 1) {
2710 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2711 cont_err("supported on this kernel\n");
2712 err = -1;
2713 }
2714 if (err < 0)
2715 goto release;
2716
2717 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2718 err = errno;
2719 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2720 err = errno;
2721 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2722 info->array.raid_disks + info->delta_disks) < 0)
2723 err = errno;
2724 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2725 err = errno;
2726 if (err) {
2727 pr_err("Cannot set array shape for %s\n",
2728 devname);
2729 if (err == EBUSY &&
2730 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2731 cont_err(" Bitmap must be removed before shape can be changed\n");
2732 goto release;
2733 }
2734 sysfs_free(sra);
2735 return 0;
2736 release:
2737 sysfs_free(sra);
2738 return 1;
2739 }
2740
2741 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2742 {
2743 struct mdinfo *sra, *sd;
2744 /* Initialisation to silence compiler warning */
2745 unsigned long long min_space_before = 0, min_space_after = 0;
2746 int first = 1;
2747
2748 sra = sysfs_read(fd, NULL, GET_DEVS);
2749 if (!sra)
2750 return;
2751 for (sd = sra->devs; sd; sd = sd->next) {
2752 char *dn;
2753 int dfd;
2754 struct supertype *st2;
2755 struct mdinfo info2;
2756
2757 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2758 continue;
2759 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2760 dfd = dev_open(dn, O_RDONLY);
2761 if (dfd < 0)
2762 break;
2763 st2 = dup_super(st);
2764 if (st2->ss->load_super(st2,dfd, NULL)) {
2765 close(dfd);
2766 free(st2);
2767 break;
2768 }
2769 close(dfd);
2770 st2->ss->getinfo_super(st2, &info2, NULL);
2771 st2->ss->free_super(st2);
2772 free(st2);
2773 if (first ||
2774 min_space_before > info2.space_before)
2775 min_space_before = info2.space_before;
2776 if (first ||
2777 min_space_after > info2.space_after)
2778 min_space_after = info2.space_after;
2779 first = 0;
2780 }
2781 if (sd == NULL && !first) {
2782 info->space_after = min_space_after;
2783 info->space_before = min_space_before;
2784 }
2785 sysfs_free(sra);
2786 }
2787
2788 static void update_cache_size(char *container, struct mdinfo *sra,
2789 struct mdinfo *info,
2790 int disks, unsigned long long blocks)
2791 {
2792 /* Check that the internal stripe cache is
2793 * large enough, or it won't work.
2794 * It must hold at least 4 stripes of the larger
2795 * chunk size
2796 */
2797 unsigned long cache;
2798 cache = max(info->array.chunk_size, info->new_chunk);
2799 cache *= 4; /* 4 stripes minimum */
2800 cache /= 512; /* convert to sectors */
2801 /* make sure there is room for 'blocks' with a bit to spare */
2802 if (cache < 16 + blocks / disks)
2803 cache = 16 + blocks / disks;
2804 cache /= (4096/512); /* Convert from sectors to pages */
2805
2806 if (sra->cache_size < cache)
2807 subarray_set_num(container, sra, "stripe_cache_size",
2808 cache+1);
2809 }
2810
2811 static int impose_reshape(struct mdinfo *sra,
2812 struct mdinfo *info,
2813 struct supertype *st,
2814 int fd,
2815 int restart,
2816 char *devname, char *container,
2817 struct reshape *reshape)
2818 {
2819 struct mdu_array_info_s array;
2820
2821 sra->new_chunk = info->new_chunk;
2822
2823 if (restart) {
2824 /* for external metadata checkpoint saved by mdmon can be lost
2825 * or missed /due to e.g. crash/. Check if md is not during
2826 * restart farther than metadata points to.
2827 * If so, this means metadata information is obsolete.
2828 */
2829 if (st->ss->external)
2830 verify_reshape_position(info, reshape->level);
2831 sra->reshape_progress = info->reshape_progress;
2832 } else {
2833 sra->reshape_progress = 0;
2834 if (reshape->after.data_disks < reshape->before.data_disks)
2835 /* start from the end of the new array */
2836 sra->reshape_progress = (sra->component_size
2837 * reshape->after.data_disks);
2838 }
2839
2840 md_get_array_info(fd, &array);
2841 if (info->array.chunk_size == info->new_chunk &&
2842 reshape->before.layout == reshape->after.layout &&
2843 st->ss->external == 0) {
2844 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2845 array.raid_disks = reshape->after.data_disks + reshape->parity;
2846 if (!restart && md_set_array_info(fd, &array) != 0) {
2847 int err = errno;
2848
2849 pr_err("Cannot set device shape for %s: %s\n",
2850 devname, strerror(errno));
2851
2852 if (err == EBUSY &&
2853 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2854 cont_err("Bitmap must be removed before shape can be changed\n");
2855
2856 goto release;
2857 }
2858 } else if (!restart) {
2859 /* set them all just in case some old 'new_*' value
2860 * persists from some earlier problem.
2861 */
2862 int err = 0;
2863 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2864 err = errno;
2865 if (!err && sysfs_set_num(sra, NULL, "layout",
2866 reshape->after.layout) < 0)
2867 err = errno;
2868 if (!err && subarray_set_num(container, sra, "raid_disks",
2869 reshape->after.data_disks +
2870 reshape->parity) < 0)
2871 err = errno;
2872 if (err) {
2873 pr_err("Cannot set device shape for %s\n",
2874 devname);
2875
2876 if (err == EBUSY &&
2877 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2878 cont_err("Bitmap must be removed before shape can be changed\n");
2879 goto release;
2880 }
2881 }
2882 return 0;
2883 release:
2884 return -1;
2885 }
2886
2887 static int impose_level(int fd, int level, char *devname, int verbose)
2888 {
2889 char *c;
2890 struct mdu_array_info_s array;
2891 struct mdinfo info;
2892
2893 if (sysfs_init(&info, fd, NULL)) {
2894 pr_err("failed to intialize sysfs.\n");
2895 return 1;
2896 }
2897
2898 md_get_array_info(fd, &array);
2899 if (level == 0 &&
2900 (array.level >= 4 && array.level <= 6)) {
2901 /* To convert to RAID0 we need to fail and
2902 * remove any non-data devices. */
2903 int found = 0;
2904 int d;
2905 int data_disks = array.raid_disks - 1;
2906 if (array.level == 6)
2907 data_disks -= 1;
2908 if (array.level == 5 &&
2909 array.layout != ALGORITHM_PARITY_N)
2910 return -1;
2911 if (array.level == 6 &&
2912 array.layout != ALGORITHM_PARITY_N_6)
2913 return -1;
2914 sysfs_set_str(&info, NULL,"sync_action", "idle");
2915 /* First remove any spares so no recovery starts */
2916 for (d = 0, found = 0;
2917 d < MAX_DISKS && found < array.nr_disks;
2918 d++) {
2919 mdu_disk_info_t disk;
2920 disk.number = d;
2921 if (md_get_disk_info(fd, &disk) < 0)
2922 continue;
2923 if (disk.major == 0 && disk.minor == 0)
2924 continue;
2925 found++;
2926 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2927 disk.raid_disk < data_disks)
2928 /* keep this */
2929 continue;
2930 ioctl(fd, HOT_REMOVE_DISK,
2931 makedev(disk.major, disk.minor));
2932 }
2933 /* Now fail anything left */
2934 md_get_array_info(fd, &array);
2935 for (d = 0, found = 0;
2936 d < MAX_DISKS && found < array.nr_disks;
2937 d++) {
2938 mdu_disk_info_t disk;
2939 disk.number = d;
2940 if (md_get_disk_info(fd, &disk) < 0)
2941 continue;
2942 if (disk.major == 0 && disk.minor == 0)
2943 continue;
2944 found++;
2945 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2946 disk.raid_disk < data_disks)
2947 /* keep this */
2948 continue;
2949 ioctl(fd, SET_DISK_FAULTY,
2950 makedev(disk.major, disk.minor));
2951 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2952 }
2953 }
2954 c = map_num(pers, level);
2955 if (c) {
2956 int err = sysfs_set_str(&info, NULL, "level", c);
2957 if (err) {
2958 err = errno;
2959 pr_err("%s: could not set level to %s\n",
2960 devname, c);
2961 if (err == EBUSY &&
2962 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2963 cont_err("Bitmap must be removed before level can be changed\n");
2964 return err;
2965 }
2966 if (verbose >= 0)
2967 pr_err("level of %s changed to %s\n",
2968 devname, c);
2969 }
2970 return 0;
2971 }
2972
2973 int sigterm = 0;
2974 static void catch_term(int sig)
2975 {
2976 sigterm = 1;
2977 }
2978
2979 static int continue_via_systemd(char *devnm)
2980 {
2981 int skipped, i, pid, status;
2982 char pathbuf[1024];
2983 /* In a systemd/udev world, it is best to get systemd to
2984 * run "mdadm --grow --continue" rather than running in the
2985 * background.
2986 */
2987 switch(fork()) {
2988 case 0:
2989 /* FIXME yuk. CLOSE_EXEC?? */
2990 skipped = 0;
2991 for (i = 3; skipped < 20; i++)
2992 if (close(i) < 0)
2993 skipped++;
2994 else
2995 skipped = 0;
2996
2997 /* Don't want to see error messages from
2998 * systemctl. If the service doesn't exist,
2999 * we fork ourselves.
3000 */
3001 close(2);
3002 open("/dev/null", O_WRONLY);
3003 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
3004 devnm);
3005 status = execl("/usr/bin/systemctl", "systemctl",
3006 "start",
3007 pathbuf, NULL);
3008 status = execl("/bin/systemctl", "systemctl", "start",
3009 pathbuf, NULL);
3010 exit(1);
3011 case -1: /* Just do it ourselves. */
3012 break;
3013 default: /* parent - good */
3014 pid = wait(&status);
3015 if (pid >= 0 && status == 0)
3016 return 1;
3017 }
3018 return 0;
3019 }
3020
3021 static int reshape_array(char *container, int fd, char *devname,
3022 struct supertype *st, struct mdinfo *info,
3023 int force, struct mddev_dev *devlist,
3024 unsigned long long data_offset,
3025 char *backup_file, int verbose, int forked,
3026 int restart, int freeze_reshape)
3027 {
3028 struct reshape reshape;
3029 int spares_needed;
3030 char *msg;
3031 int orig_level = UnSet;
3032 int odisks;
3033 int delayed;
3034
3035 struct mdu_array_info_s array;
3036 char *c;
3037
3038 struct mddev_dev *dv;
3039 int added_disks;
3040
3041 int *fdlist = NULL;
3042 unsigned long long *offsets = NULL;
3043 int d;
3044 int nrdisks;
3045 int err;
3046 unsigned long blocks;
3047 unsigned long long array_size;
3048 int done;
3049 struct mdinfo *sra = NULL;
3050 char buf[20];
3051
3052 /* when reshaping a RAID0, the component_size might be zero.
3053 * So try to fix that up.
3054 */
3055 if (md_get_array_info(fd, &array) != 0) {
3056 dprintf("Cannot get array information.\n");
3057 goto release;
3058 }
3059 if (array.level == 0 && info->component_size == 0) {
3060 get_dev_size(fd, NULL, &array_size);
3061 info->component_size = array_size / array.raid_disks;
3062 }
3063
3064 if (array.level == 10)
3065 /* Need space_after info */
3066 get_space_after(fd, st, info);
3067
3068 if (info->reshape_active) {
3069 int new_level = info->new_level;
3070 info->new_level = UnSet;
3071 if (info->delta_disks > 0)
3072 info->array.raid_disks -= info->delta_disks;
3073 msg = analyse_change(devname, info, &reshape);
3074 info->new_level = new_level;
3075 if (info->delta_disks > 0)
3076 info->array.raid_disks += info->delta_disks;
3077 if (!restart)
3078 /* Make sure the array isn't read-only */
3079 ioctl(fd, RESTART_ARRAY_RW, 0);
3080 } else
3081 msg = analyse_change(devname, info, &reshape);
3082 if (msg) {
3083 /* if msg == "", error has already been printed */
3084 if (msg[0])
3085 pr_err("%s\n", msg);
3086 goto release;
3087 }
3088 if (restart &&
3089 (reshape.level != info->array.level ||
3090 reshape.before.layout != info->array.layout ||
3091 reshape.before.data_disks + reshape.parity !=
3092 info->array.raid_disks - max(0, info->delta_disks))) {
3093 pr_err("reshape info is not in native format - cannot continue.\n");
3094 goto release;
3095 }
3096
3097 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3098 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
3099 (strncmp(buf, "reshape", 7) == 0))) {
3100 /* When reshape is restarted from '0', very begin of array
3101 * it is possible that for external metadata reshape and array
3102 * configuration doesn't happen.
3103 * Check if md has the same opinion, and reshape is restarted
3104 * from 0. If so, this is regular reshape start after reshape
3105 * switch in metadata to next array only.
3106 */
3107 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3108 (info->reshape_progress == 0))
3109 restart = 0;
3110 }
3111 if (restart) {
3112 /* reshape already started. just skip to monitoring the reshape */
3113 if (reshape.backup_blocks == 0)
3114 return 0;
3115 if (restart & RESHAPE_NO_BACKUP)
3116 return 0;
3117
3118 /* Need 'sra' down at 'started:' */
3119 sra = sysfs_read(fd, NULL,
3120 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3121 GET_CACHE);
3122 if (!sra) {
3123 pr_err("%s: Cannot get array details from sysfs\n",
3124 devname);
3125 goto release;
3126 }
3127
3128 if (!backup_file)
3129 backup_file = locate_backup(sra->sys_name);
3130
3131 goto started;
3132 }
3133 /* The container is frozen but the array may not be.
3134 * So freeze the array so spares don't get put to the wrong use
3135 * FIXME there should probably be a cleaner separation between
3136 * freeze_array and freeze_container.
3137 */
3138 sysfs_freeze_array(info);
3139 /* Check we have enough spares to not be degraded */
3140 added_disks = 0;
3141 for (dv = devlist; dv ; dv=dv->next)
3142 added_disks++;
3143 spares_needed = max(reshape.before.data_disks,
3144 reshape.after.data_disks)
3145 + reshape.parity - array.raid_disks;
3146
3147 if (!force &&
3148 info->new_level > 1 && info->array.level > 1 &&
3149 spares_needed > info->array.spare_disks + added_disks) {
3150 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3151 " Use --force to over-ride this check.\n",
3152 spares_needed,
3153 spares_needed == 1 ? "" : "s",
3154 info->array.spare_disks + added_disks);
3155 goto release;
3156 }
3157 /* Check we have enough spares to not fail */
3158 spares_needed = max(reshape.before.data_disks,
3159 reshape.after.data_disks)
3160 - array.raid_disks;
3161 if ((info->new_level > 1 || info->new_level == 0) &&
3162 spares_needed > info->array.spare_disks +added_disks) {
3163 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3164 spares_needed,
3165 spares_needed == 1 ? "" : "s",
3166 info->array.spare_disks + added_disks);
3167 goto release;
3168 }
3169
3170 if (reshape.level != array.level) {
3171 int err = impose_level(fd, reshape.level, devname, verbose);
3172 if (err)
3173 goto release;
3174 info->new_layout = UnSet; /* after level change,
3175 * layout is meaningless */
3176 orig_level = array.level;
3177 sysfs_freeze_array(info);
3178
3179 if (reshape.level > 0 && st->ss->external) {
3180 /* make sure mdmon is aware of the new level */
3181 if (mdmon_running(container))
3182 flush_mdmon(container);
3183
3184 if (!mdmon_running(container))
3185 start_mdmon(container);
3186 ping_monitor(container);
3187 if (mdmon_running(container) &&
3188 st->update_tail == NULL)
3189 st->update_tail = &st->updates;
3190 }
3191 }
3192 /* ->reshape_super might have chosen some spares from the
3193 * container that it wants to be part of the new array.
3194 * We can collect them with ->container_content and give
3195 * them to the kernel.
3196 */
3197 if (st->ss->reshape_super && st->ss->container_content) {
3198 char *subarray = strchr(info->text_version+1, '/')+1;
3199 struct mdinfo *info2 =
3200 st->ss->container_content(st, subarray);
3201 struct mdinfo *d;
3202
3203 if (info2) {
3204 if (sysfs_init(info2, fd, st->devnm)) {
3205 pr_err("unable to initialize sysfs for %s\n",
3206 st->devnm);
3207 free(info2);
3208 goto release;
3209 }
3210 /* When increasing number of devices, we need to set
3211 * new raid_disks before adding these, or they might
3212 * be rejected.
3213 */
3214 if (reshape.backup_blocks &&
3215 reshape.after.data_disks > reshape.before.data_disks)
3216 subarray_set_num(container, info2, "raid_disks",
3217 reshape.after.data_disks +
3218 reshape.parity);
3219 for (d = info2->devs; d; d = d->next) {
3220 if (d->disk.state == 0 &&
3221 d->disk.raid_disk >= 0) {
3222 /* This is a spare that wants to
3223 * be part of the array.
3224 */
3225 add_disk(fd, st, info2, d);
3226 }
3227 }
3228 sysfs_free(info2);
3229 }
3230 }
3231 /* We might have been given some devices to add to the
3232 * array. Now that the array has been changed to the right
3233 * level and frozen, we can safely add them.
3234 */
3235 if (devlist) {
3236 if (Manage_subdevs(devname, fd, devlist, verbose,
3237 0, NULL, 0))
3238 goto release;
3239 }
3240
3241 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3242 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3243 if (reshape.backup_blocks == 0) {
3244 /* No restriping needed, but we might need to impose
3245 * some more changes: layout, raid_disks, chunk_size
3246 */
3247 /* read current array info */
3248 if (md_get_array_info(fd, &array) != 0) {
3249 dprintf("Cannot get array information.\n");
3250 goto release;
3251 }
3252 /* compare current array info with new values and if
3253 * it is different update them to new */
3254 if (info->new_layout != UnSet &&
3255 info->new_layout != array.layout) {
3256 array.layout = info->new_layout;
3257 if (md_set_array_info(fd, &array) != 0) {
3258 pr_err("failed to set new layout\n");
3259 goto release;
3260 } else if (verbose >= 0)
3261 printf("layout for %s set to %d\n",
3262 devname, array.layout);
3263 }
3264 if (info->delta_disks != UnSet &&
3265 info->delta_disks != 0 &&
3266 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3267 array.raid_disks += info->delta_disks;
3268 if (md_set_array_info(fd, &array) != 0) {
3269 pr_err("failed to set raid disks\n");
3270 goto release;
3271 } else if (verbose >= 0) {
3272 printf("raid_disks for %s set to %d\n",
3273 devname, array.raid_disks);
3274 }
3275 }
3276 if (info->new_chunk != 0 &&
3277 info->new_chunk != array.chunk_size) {
3278 if (sysfs_set_num(info, NULL,
3279 "chunk_size", info->new_chunk) != 0) {
3280 pr_err("failed to set chunk size\n");
3281 goto release;
3282 } else if (verbose >= 0)
3283 printf("chunk size for %s set to %d\n",
3284 devname, array.chunk_size);
3285 }
3286 unfreeze(st);
3287 return 0;
3288 }
3289
3290 /*
3291 * There are three possibilities.
3292 * 1/ The array will shrink.
3293 * We need to ensure the reshape will pause before reaching
3294 * the 'critical section'. We also need to fork and wait for
3295 * that to happen. When it does we
3296 * suspend/backup/complete/unfreeze
3297 *
3298 * 2/ The array will not change size.
3299 * This requires that we keep a backup of a sliding window
3300 * so that we can restore data after a crash. So we need
3301 * to fork and monitor progress.
3302 * In future we will allow the data_offset to change, so
3303 * a sliding backup becomes unnecessary.
3304 *
3305 * 3/ The array will grow. This is relatively easy.
3306 * However the kernel's restripe routines will cheerfully
3307 * overwrite some early data before it is safe. So we
3308 * need to make a backup of the early parts of the array
3309 * and be ready to restore it if rebuild aborts very early.
3310 * For externally managed metadata, we still need a forked
3311 * child to monitor the reshape and suspend IO over the region
3312 * that is being reshaped.
3313 *
3314 * We backup data by writing it to one spare, or to a
3315 * file which was given on command line.
3316 *
3317 * In each case, we first make sure that storage is available
3318 * for the required backup.
3319 * Then we:
3320 * - request the shape change.
3321 * - fork to handle backup etc.
3322 */
3323 /* Check that we can hold all the data */
3324 get_dev_size(fd, NULL, &array_size);
3325 if (reshape.new_size < (array_size/512)) {
3326 pr_err("this change will reduce the size of the array.\n"
3327 " use --grow --array-size first to truncate array.\n"
3328 " e.g. mdadm --grow %s --array-size %llu\n",
3329 devname, reshape.new_size/2);
3330 goto release;
3331 }
3332
3333 if (array.level == 10) {
3334 /* Reshaping RAID10 does not require any data backup by
3335 * user-space. Instead it requires that the data_offset
3336 * is changed to avoid the need for backup.
3337 * So this is handled very separately
3338 */
3339 if (restart)
3340 /* Nothing to do. */
3341 return 0;
3342 return raid10_reshape(container, fd, devname, st, info,
3343 &reshape, data_offset,
3344 force, verbose);
3345 }
3346 sra = sysfs_read(fd, NULL,
3347 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3348 GET_CACHE);
3349 if (!sra) {
3350 pr_err("%s: Cannot get array details from sysfs\n",
3351 devname);
3352 goto release;
3353 }
3354
3355 if (!backup_file)
3356 switch(set_new_data_offset(sra, st, devname,
3357 reshape.after.data_disks - reshape.before.data_disks,
3358 data_offset,
3359 reshape.min_offset_change, 1)) {
3360 case -1:
3361 goto release;
3362 case 0:
3363 /* Updated data_offset, so it's easy now */
3364 update_cache_size(container, sra, info,
3365 min(reshape.before.data_disks,
3366 reshape.after.data_disks),
3367 reshape.backup_blocks);
3368
3369 /* Right, everything seems fine. Let's kick things off.
3370 */
3371 sync_metadata(st);
3372
3373 if (impose_reshape(sra, info, st, fd, restart,
3374 devname, container, &reshape) < 0)
3375 goto release;
3376 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3377 struct mdinfo *sd;
3378 if (errno != EINVAL) {
3379 pr_err("Failed to initiate reshape!\n");
3380 goto release;
3381 }
3382 /* revert data_offset and try the old way */
3383 for (sd = sra->devs; sd; sd = sd->next) {
3384 sysfs_set_num(sra, sd, "new_offset",
3385 sd->data_offset);
3386 sysfs_set_str(sra, NULL, "reshape_direction",
3387 "forwards");
3388 }
3389 break;
3390 }
3391 if (info->new_level == reshape.level)
3392 return 0;
3393 /* need to adjust level when reshape completes */
3394 switch(fork()) {
3395 case -1: /* ignore error, but don't wait */
3396 return 0;
3397 default: /* parent */
3398 return 0;
3399 case 0:
3400 map_fork();
3401 break;
3402 }
3403 close(fd);
3404 wait_reshape(sra);
3405 fd = open_dev(sra->sys_name);
3406 if (fd >= 0)
3407 impose_level(fd, info->new_level, devname, verbose);
3408 return 0;
3409 case 1: /* Couldn't set data_offset, try the old way */
3410 if (data_offset != INVALID_SECTORS) {
3411 pr_err("Cannot update data_offset on this array\n");
3412 goto release;
3413 }
3414 break;
3415 }
3416
3417 started:
3418 /* Decide how many blocks (sectors) for a reshape
3419 * unit. The number we have so far is just a minimum
3420 */
3421 blocks = reshape.backup_blocks;
3422 if (reshape.before.data_disks ==
3423 reshape.after.data_disks) {
3424 /* Make 'blocks' bigger for better throughput, but
3425 * not so big that we reject it below.
3426 * Try for 16 megabytes
3427 */
3428 while (blocks * 32 < sra->component_size &&
3429 blocks < 16*1024*2)
3430 blocks *= 2;
3431 } else
3432 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3433
3434 if (blocks >= sra->component_size/2) {
3435 pr_err("%s: Something wrong - reshape aborted\n",
3436 devname);
3437 goto release;
3438 }
3439
3440 /* Now we need to open all these devices so we can read/write.
3441 */
3442 nrdisks = max(reshape.before.data_disks,
3443 reshape.after.data_disks) + reshape.parity
3444 + sra->array.spare_disks;
3445 fdlist = xcalloc((1+nrdisks), sizeof(int));
3446 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3447
3448 odisks = reshape.before.data_disks + reshape.parity;
3449 d = reshape_prepare_fdlist(devname, sra, odisks,
3450 nrdisks, blocks, backup_file,
3451 fdlist, offsets);
3452 if (d < odisks) {
3453 goto release;
3454 }
3455 if ((st->ss->manage_reshape == NULL) ||
3456 (st->ss->recover_backup == NULL)) {
3457 if (backup_file == NULL) {
3458 if (reshape.after.data_disks <=
3459 reshape.before.data_disks) {
3460 pr_err("%s: Cannot grow - need backup-file\n",
3461 devname);
3462 pr_err(" Please provide one with \"--backup=...\"\n");
3463 goto release;
3464 } else if (d == odisks) {
3465 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3466 goto release;
3467 }
3468 } else {
3469 if (!reshape_open_backup_file(backup_file, fd, devname,
3470 (signed)blocks,
3471 fdlist+d, offsets+d,
3472 sra->sys_name,
3473 restart)) {
3474 goto release;
3475 }
3476 d++;
3477 }
3478 }
3479
3480 update_cache_size(container, sra, info,
3481 min(reshape.before.data_disks, reshape.after.data_disks),
3482 blocks);
3483
3484 /* Right, everything seems fine. Let's kick things off.
3485 * If only changing raid_disks, use ioctl, else use
3486 * sysfs.
3487 */
3488 sync_metadata(st);
3489
3490 if (impose_reshape(sra, info, st, fd, restart,
3491 devname, container, &reshape) < 0)
3492 goto release;
3493
3494 err = start_reshape(sra, restart, reshape.before.data_disks,
3495 reshape.after.data_disks);
3496 if (err) {
3497 pr_err("Cannot %s reshape for %s\n",
3498 restart ? "continue" : "start",
3499 devname);
3500 goto release;
3501 }
3502 if (restart)
3503 sysfs_set_str(sra, NULL, "array_state", "active");
3504 if (freeze_reshape) {
3505 free(fdlist);
3506 free(offsets);
3507 sysfs_free(sra);
3508 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3509 sra->reshape_progress);
3510 return 1;
3511 }
3512
3513 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3514 if (continue_via_systemd(container ?: sra->sys_name)) {
3515 free(fdlist);
3516 free(offsets);
3517 sysfs_free(sra);
3518 return 0;
3519 }
3520
3521 /* Now we just need to kick off the reshape and watch, while
3522 * handling backups of the data...
3523 * This is all done by a forked background process.
3524 */
3525 switch(forked ? 0 : fork()) {
3526 case -1:
3527 pr_err("Cannot run child to monitor reshape: %s\n",
3528 strerror(errno));
3529 abort_reshape(sra);
3530 goto release;
3531 default:
3532 free(fdlist);
3533 free(offsets);
3534 sysfs_free(sra);
3535 return 0;
3536 case 0:
3537 map_fork();
3538 break;
3539 }
3540
3541 /* If another array on the same devices is busy, the
3542 * reshape will wait for them. This would mean that
3543 * the first section that we suspend will stay suspended
3544 * for a long time. So check on that possibility
3545 * by looking for "DELAYED" in /proc/mdstat, and if found,
3546 * wait a while
3547 */
3548 do {
3549 struct mdstat_ent *mds, *m;
3550 delayed = 0;
3551 mds = mdstat_read(1, 0);
3552 for (m = mds; m; m = m->next)
3553 if (strcmp(m->devnm, sra->sys_name) == 0) {
3554 if (m->resync &&
3555 m->percent == RESYNC_DELAYED)
3556 delayed = 1;
3557 if (m->resync == 0)
3558 /* Haven't started the reshape thread
3559 * yet, wait a bit
3560 */
3561 delayed = 2;
3562 break;
3563 }
3564 free_mdstat(mds);
3565 if (delayed == 1 && get_linux_version() < 3007000) {
3566 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3567 " You might experience problems until other reshapes complete.\n");
3568 delayed = 0;
3569 }
3570 if (delayed)
3571 mdstat_wait(30 - (delayed-1) * 25);
3572 } while (delayed);
3573 mdstat_close();
3574 close(fd);
3575 if (check_env("MDADM_GROW_VERIFY"))
3576 fd = open(devname, O_RDONLY | O_DIRECT);
3577 else
3578 fd = -1;
3579 mlockall(MCL_FUTURE);
3580
3581 signal(SIGTERM, catch_term);
3582
3583 if (st->ss->external) {
3584 /* metadata handler takes it from here */
3585 done = st->ss->manage_reshape(
3586 fd, sra, &reshape, st, blocks,
3587 fdlist, offsets,
3588 d - odisks, fdlist+odisks,
3589 offsets+odisks);
3590 } else
3591 done = child_monitor(
3592 fd, sra, &reshape, st, blocks,
3593 fdlist, offsets,
3594 d - odisks, fdlist+odisks,
3595 offsets+odisks);
3596
3597 free(fdlist);
3598 free(offsets);
3599
3600 if (backup_file && done) {
3601 char *bul;
3602 bul = make_backup(sra->sys_name);
3603 if (bul) {
3604 char buf[1024];
3605 int l = readlink(bul, buf, sizeof(buf) - 1);
3606 if (l > 0) {
3607 buf[l]=0;
3608 unlink(buf);
3609 }
3610 unlink(bul);
3611 free(bul);
3612 }
3613 unlink(backup_file);
3614 }
3615 if (!done) {
3616 abort_reshape(sra);
3617 goto out;
3618 }
3619
3620 if (!st->ss->external &&
3621 !(reshape.before.data_disks != reshape.after.data_disks &&
3622 info->custom_array_size) && info->new_level == reshape.level &&
3623 !forked) {
3624 /* no need to wait for the reshape to finish as
3625 * there is nothing more to do.
3626 */
3627 sysfs_free(sra);
3628 exit(0);
3629 }
3630 wait_reshape(sra);
3631
3632 if (st->ss->external) {
3633 /* Re-load the metadata as much could have changed */
3634 int cfd = open_dev(st->container_devnm);
3635 if (cfd >= 0) {
3636 flush_mdmon(container);
3637 st->ss->free_super(st);
3638 st->ss->load_container(st, cfd, container);
3639 close(cfd);
3640 }
3641 }
3642
3643 /* set new array size if required customer_array_size is used
3644 * by this metadata.
3645 */
3646 if (reshape.before.data_disks !=
3647 reshape.after.data_disks &&
3648 info->custom_array_size)
3649 set_array_size(st, info, info->text_version);
3650
3651 if (info->new_level != reshape.level) {
3652 if (fd < 0)
3653 fd = open(devname, O_RDONLY);
3654 impose_level(fd, info->new_level, devname, verbose);
3655 close(fd);
3656 if (info->new_level == 0)
3657 st->update_tail = NULL;
3658 }
3659 out:
3660 sysfs_free(sra);
3661 if (forked)
3662 return 0;
3663 unfreeze(st);
3664 exit(0);
3665
3666 release:
3667 free(fdlist);
3668 free(offsets);
3669 if (orig_level != UnSet && sra) {
3670 c = map_num(pers, orig_level);
3671 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3672 pr_err("aborting level change\n");
3673 }
3674 sysfs_free(sra);
3675 if (!forked)
3676 unfreeze(st);
3677 return 1;
3678 }
3679
3680 /* mdfd handle is passed to be closed in child process (after fork).
3681 */
3682 int reshape_container(char *container, char *devname,
3683 int mdfd,
3684 struct supertype *st,
3685 struct mdinfo *info,
3686 int force,
3687 char *backup_file, int verbose,
3688 int forked, int restart, int freeze_reshape)
3689 {
3690 struct mdinfo *cc = NULL;
3691 int rv = restart;
3692 char last_devnm[32] = "";
3693
3694 /* component_size is not meaningful for a container,
3695 * so pass '0' meaning 'no change'
3696 */
3697 if (!restart &&
3698 reshape_super(st, 0, info->new_level,
3699 info->new_layout, info->new_chunk,
3700 info->array.raid_disks, info->delta_disks,
3701 backup_file, devname, APPLY_METADATA_CHANGES,
3702 verbose)) {
3703 unfreeze(st);
3704 return 1;
3705 }
3706
3707 sync_metadata(st);
3708
3709 /* ping monitor to be sure that update is on disk
3710 */
3711 ping_monitor(container);
3712
3713 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3714 if (continue_via_systemd(container))
3715 return 0;
3716
3717 switch (forked ? 0 : fork()) {
3718 case -1: /* error */
3719 perror("Cannot fork to complete reshape\n");
3720 unfreeze(st);
3721 return 1;
3722 default: /* parent */
3723 if (!freeze_reshape)
3724 printf("%s: multi-array reshape continues in background\n", Name);
3725 return 0;
3726 case 0: /* child */
3727 map_fork();
3728 break;
3729 }
3730
3731 /* close unused handle in child process
3732 */
3733 if (mdfd > -1)
3734 close(mdfd);
3735
3736 while(1) {
3737 /* For each member array with reshape_active,
3738 * we need to perform the reshape.
3739 * We pick the first array that needs reshaping and
3740 * reshape it. reshape_array() will re-read the metadata
3741 * so the next time through a different array should be
3742 * ready for reshape.
3743 * It is possible that the 'different' array will not
3744 * be assembled yet. In that case we simple exit.
3745 * When it is assembled, the mdadm which assembles it
3746 * will take over the reshape.
3747 */
3748 struct mdinfo *content;
3749 int fd;
3750 struct mdstat_ent *mdstat;
3751 char *adev;
3752 dev_t devid;
3753
3754 sysfs_free(cc);
3755
3756 cc = st->ss->container_content(st, NULL);
3757
3758 for (content = cc; content ; content = content->next) {
3759 char *subarray;
3760 if (!content->reshape_active)
3761 continue;
3762
3763 subarray = strchr(content->text_version+1, '/')+1;
3764 mdstat = mdstat_by_subdev(subarray, container);
3765 if (!mdstat)
3766 continue;
3767 if (mdstat->active == 0) {
3768 pr_err("Skipping inactive array %s.\n",
3769 mdstat->devnm);
3770 free_mdstat(mdstat);
3771 mdstat = NULL;
3772 continue;
3773 }
3774 break;
3775 }
3776 if (!content)
3777 break;
3778
3779 devid = devnm2devid(mdstat->devnm);
3780 adev = map_dev(major(devid), minor(devid), 0);
3781 if (!adev)
3782 adev = content->text_version;
3783
3784 fd = open_dev(mdstat->devnm);
3785 if (fd < 0) {
3786 pr_err("Device %s cannot be opened for reshape.\n", adev);
3787 break;
3788 }
3789
3790 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3791 /* Do not allow for multiple reshape_array() calls for
3792 * the same array.
3793 * It can happen when reshape_array() returns without
3794 * error, when reshape is not finished (wrong reshape
3795 * starting/continuation conditions). Mdmon doesn't
3796 * switch to next array in container and reentry
3797 * conditions for the same array occur.
3798 * This is possibly interim until the behaviour of
3799 * reshape_array is resolved().
3800 */
3801 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3802 close(fd);
3803 break;
3804 }
3805 strcpy(last_devnm, mdstat->devnm);
3806
3807 if (sysfs_init(content, fd, mdstat->devnm)) {
3808 pr_err("Unable to initialize sysfs for %s\n",
3809 mdstat->devnm);
3810 rv = 1;
3811 break;
3812 }
3813
3814 if (mdmon_running(container))
3815 flush_mdmon(container);
3816
3817 rv = reshape_array(container, fd, adev, st,
3818 content, force, NULL, INVALID_SECTORS,
3819 backup_file, verbose, 1, restart,
3820 freeze_reshape);
3821 close(fd);
3822
3823 if (freeze_reshape) {
3824 sysfs_free(cc);
3825 exit(0);
3826 }
3827
3828 restart = 0;
3829 if (rv)
3830 break;
3831
3832 if (mdmon_running(container))
3833 flush_mdmon(container);
3834 }
3835 if (!rv)
3836 unfreeze(st);
3837 sysfs_free(cc);
3838 exit(0);
3839 }
3840
3841 /*
3842 * We run a child process in the background which performs the following
3843 * steps:
3844 * - wait for resync to reach a certain point
3845 * - suspend io to the following section
3846 * - backup that section
3847 * - allow resync to proceed further
3848 * - resume io
3849 * - discard the backup.
3850 *
3851 * When are combined in slightly different ways in the three cases.
3852 * Grow:
3853 * - suspend/backup/allow/wait/resume/discard
3854 * Shrink:
3855 * - allow/wait/suspend/backup/allow/wait/resume/discard
3856 * same-size:
3857 * - wait/resume/discard/suspend/backup/allow
3858 *
3859 * suspend/backup/allow always come together
3860 * wait/resume/discard do too.
3861 * For the same-size case we have two backups to improve flow.
3862 *
3863 */
3864
3865 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3866 unsigned long long backup_point,
3867 unsigned long long wait_point,
3868 unsigned long long *suspend_point,
3869 unsigned long long *reshape_completed, int *frozen)
3870 {
3871 /* This function is called repeatedly by the reshape manager.
3872 * It determines how much progress can safely be made and allows
3873 * that progress.
3874 * - 'info' identifies the array and particularly records in
3875 * ->reshape_progress the metadata's knowledge of progress
3876 * This is a sector offset from the start of the array
3877 * of the next array block to be relocated. This number
3878 * may increase from 0 or decrease from array_size, depending
3879 * on the type of reshape that is happening.
3880 * Note that in contrast, 'sync_completed' is a block count of the
3881 * reshape so far. It gives the distance between the start point
3882 * (head or tail of device) and the next place that data will be
3883 * written. It always increases.
3884 * - 'reshape' is the structure created by analyse_change
3885 * - 'backup_point' shows how much the metadata manager has backed-up
3886 * data. For reshapes with increasing progress, it is the next address
3887 * to be backed up, previous addresses have been backed-up. For
3888 * decreasing progress, it is the earliest address that has been
3889 * backed up - later address are also backed up.
3890 * So addresses between reshape_progress and backup_point are
3891 * backed up providing those are in the 'correct' order.
3892 * - 'wait_point' is an array address. When reshape_completed
3893 * passes this point, progress_reshape should return. It might
3894 * return earlier if it determines that ->reshape_progress needs
3895 * to be updated or further backup is needed.
3896 * - suspend_point is maintained by progress_reshape and the caller
3897 * should not touch it except to initialise to zero.
3898 * It is an array address and it only increases in 2.6.37 and earlier.
3899 * This makes it difficult to handle reducing reshapes with
3900 * external metadata.
3901 * However: it is similar to backup_point in that it records the
3902 * other end of a suspended region from reshape_progress.
3903 * it is moved to extend the region that is safe to backup and/or
3904 * reshape
3905 * - reshape_completed is read from sysfs and returned. The caller
3906 * should copy this into ->reshape_progress when it has reason to
3907 * believe that the metadata knows this, and any backup outside this
3908 * has been erased.
3909 *
3910 * Return value is:
3911 * 1 if more data from backup_point - but only as far as suspend_point,
3912 * should be backed up
3913 * 0 if things are progressing smoothly
3914 * -1 if the reshape is finished because it is all done,
3915 * -2 if the reshape is finished due to an error.
3916 */
3917
3918 int advancing = (reshape->after.data_disks
3919 >= reshape->before.data_disks);
3920 unsigned long long need_backup; /* All data between start of array and
3921 * here will at some point need to
3922 * be backed up.
3923 */
3924 unsigned long long read_offset, write_offset;
3925 unsigned long long write_range;
3926 unsigned long long max_progress, target, completed;
3927 unsigned long long array_size = (info->component_size
3928 * reshape->before.data_disks);
3929 int fd;
3930 char buf[20];
3931
3932 /* First, we unsuspend any region that is now known to be safe.
3933 * If suspend_point is on the 'wrong' side of reshape_progress, then
3934 * we don't have or need suspension at the moment. This is true for
3935 * native metadata when we don't need to back-up.
3936 */
3937 if (advancing) {
3938 if (info->reshape_progress <= *suspend_point)
3939 sysfs_set_num(info, NULL, "suspend_lo",
3940 info->reshape_progress);
3941 } else {
3942 /* Note: this won't work in 2.6.37 and before.
3943 * Something somewhere should make sure we don't need it!
3944 */
3945 if (info->reshape_progress >= *suspend_point)
3946 sysfs_set_num(info, NULL, "suspend_hi",
3947 info->reshape_progress);
3948 }
3949
3950 /* Now work out how far it is safe to progress.
3951 * If the read_offset for ->reshape_progress is less than
3952 * 'blocks' beyond the write_offset, we can only progress as far
3953 * as a backup.
3954 * Otherwise we can progress until the write_offset for the new location
3955 * reaches (within 'blocks' of) the read_offset at the current location.
3956 * However that region must be suspended unless we are using native
3957 * metadata.
3958 * If we need to suspend more, we limit it to 128M per device, which is
3959 * rather arbitrary and should be some time-based calculation.
3960 */
3961 read_offset = info->reshape_progress / reshape->before.data_disks;
3962 write_offset = info->reshape_progress / reshape->after.data_disks;
3963 write_range = info->new_chunk/512;
3964 if (reshape->before.data_disks == reshape->after.data_disks)
3965 need_backup = array_size;
3966 else
3967 need_backup = reshape->backup_blocks;
3968 if (advancing) {
3969 if (read_offset < write_offset + write_range)
3970 max_progress = backup_point;
3971 else
3972 max_progress =
3973 read_offset *
3974 reshape->after.data_disks;
3975 } else {
3976 if (read_offset > write_offset - write_range)
3977 /* Can only progress as far as has been backed up,
3978 * which must be suspended */
3979 max_progress = backup_point;
3980 else if (info->reshape_progress <= need_backup)
3981 max_progress = backup_point;
3982 else {
3983 if (info->array.major_version >= 0)
3984 /* Can progress until backup is needed */
3985 max_progress = need_backup;
3986 else {
3987 /* Can progress until metadata update is required */
3988 max_progress =
3989 read_offset *
3990 reshape->after.data_disks;
3991 /* but data must be suspended */
3992 if (max_progress < *suspend_point)
3993 max_progress = *suspend_point;
3994 }
3995 }
3996 }
3997
3998 /* We know it is safe to progress to 'max_progress' providing
3999 * it is suspended or we are using native metadata.
4000 * Consider extending suspend_point 128M per device if it
4001 * is less than 64M per device beyond reshape_progress.
4002 * But always do a multiple of 'blocks'
4003 * FIXME this is too big - it takes to long to complete
4004 * this much.
4005 */
4006 target = 64*1024*2 * min(reshape->before.data_disks,
4007 reshape->after.data_disks);
4008 target /= reshape->backup_blocks;
4009 if (target < 2)
4010 target = 2;
4011 target *= reshape->backup_blocks;
4012
4013 /* For externally managed metadata we always need to suspend IO to
4014 * the area being reshaped so we regularly push suspend_point forward.
4015 * For native metadata we only need the suspend if we are going to do
4016 * a backup.
4017 */
4018 if (advancing) {
4019 if ((need_backup > info->reshape_progress ||
4020 info->array.major_version < 0) &&
4021 *suspend_point < info->reshape_progress + target) {
4022 if (need_backup < *suspend_point + 2 * target)
4023 *suspend_point = need_backup;
4024 else if (*suspend_point + 2 * target < array_size)
4025 *suspend_point += 2 * target;
4026 else
4027 *suspend_point = array_size;
4028 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4029 if (max_progress > *suspend_point)
4030 max_progress = *suspend_point;
4031 }
4032 } else {
4033 if (info->array.major_version >= 0) {
4034 /* Only need to suspend when about to backup */
4035 if (info->reshape_progress < need_backup * 2 &&
4036 *suspend_point > 0) {
4037 *suspend_point = 0;
4038 sysfs_set_num(info, NULL, "suspend_lo", 0);
4039 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
4040 }
4041 } else {
4042 /* Need to suspend continually */
4043 if (info->reshape_progress < *suspend_point)
4044 *suspend_point = info->reshape_progress;
4045 if (*suspend_point + target < info->reshape_progress)
4046 /* No need to move suspend region yet */;
4047 else {
4048 if (*suspend_point >= 2 * target)
4049 *suspend_point -= 2 * target;
4050 else
4051 *suspend_point = 0;
4052 sysfs_set_num(info, NULL, "suspend_lo",
4053 *suspend_point);
4054 }
4055 if (max_progress < *suspend_point)
4056 max_progress = *suspend_point;
4057 }
4058 }
4059
4060 /* now set sync_max to allow that progress. sync_max, like
4061 * sync_completed is a count of sectors written per device, so
4062 * we find the difference between max_progress and the start point,
4063 * and divide that by after.data_disks to get a sync_max
4064 * number.
4065 * At the same time we convert wait_point to a similar number
4066 * for comparing against sync_completed.
4067 */
4068 /* scale down max_progress to per_disk */
4069 max_progress /= reshape->after.data_disks;
4070 /* Round to chunk size as some kernels give an erroneously high number */
4071 max_progress /= info->new_chunk/512;
4072 max_progress *= info->new_chunk/512;
4073 /* And round to old chunk size as the kernel wants that */
4074 max_progress /= info->array.chunk_size/512;
4075 max_progress *= info->array.chunk_size/512;
4076 /* Limit progress to the whole device */
4077 if (max_progress > info->component_size)
4078 max_progress = info->component_size;
4079 wait_point /= reshape->after.data_disks;
4080 if (!advancing) {
4081 /* switch from 'device offset' to 'processed block count' */
4082 max_progress = info->component_size - max_progress;
4083 wait_point = info->component_size - wait_point;
4084 }
4085
4086 if (!*frozen)
4087 sysfs_set_num(info, NULL, "sync_max", max_progress);
4088
4089 /* Now wait. If we have already reached the point that we were
4090 * asked to wait to, don't wait at all, else wait for any change.
4091 * We need to select on 'sync_completed' as that is the place that
4092 * notifications happen, but we are really interested in
4093 * 'reshape_position'
4094 */
4095 fd = sysfs_get_fd(info, NULL, "sync_completed");
4096 if (fd < 0)
4097 goto check_progress;
4098
4099 if (sysfs_fd_get_ll(fd, &completed) < 0)
4100 goto check_progress;
4101
4102 while (completed < max_progress && completed < wait_point) {
4103 /* Check that sync_action is still 'reshape' to avoid
4104 * waiting forever on a dead array
4105 */
4106 char action[20];
4107 if (sysfs_get_str(info, NULL, "sync_action",
4108 action, 20) <= 0 ||
4109 strncmp(action, "reshape", 7) != 0)
4110 break;
4111 /* Some kernels reset 'sync_completed' to zero
4112 * before setting 'sync_action' to 'idle'.
4113 * So we need these extra tests.
4114 */
4115 if (completed == 0 && advancing &&
4116 strncmp(action, "idle", 4) == 0 &&
4117 info->reshape_progress > 0)
4118 break;
4119 if (completed == 0 && !advancing &&
4120 strncmp(action, "idle", 4) == 0 &&
4121 info->reshape_progress < (info->component_size
4122 * reshape->after.data_disks))
4123 break;
4124 sysfs_wait(fd, NULL);
4125 if (sysfs_fd_get_ll(fd, &completed) < 0)
4126 goto check_progress;
4127 }
4128 /* Some kernels reset 'sync_completed' to zero,
4129 * we need to have real point we are in md.
4130 * So in that case, read 'reshape_position' from sysfs.
4131 */
4132 if (completed == 0) {
4133 unsigned long long reshapep;
4134 char action[20];
4135 if (sysfs_get_str(info, NULL, "sync_action",
4136 action, 20) > 0 &&
4137 strncmp(action, "idle", 4) == 0 &&
4138 sysfs_get_ll(info, NULL,
4139 "reshape_position", &reshapep) == 0)
4140 *reshape_completed = reshapep;
4141 } else {
4142 /* some kernels can give an incorrectly high
4143 * 'completed' number, so round down */
4144 completed /= (info->new_chunk/512);
4145 completed *= (info->new_chunk/512);
4146 /* Convert 'completed' back in to a 'progress' number */
4147 completed *= reshape->after.data_disks;
4148 if (!advancing)
4149 completed = (info->component_size
4150 * reshape->after.data_disks
4151 - completed);
4152 *reshape_completed = completed;
4153 }
4154
4155 close(fd);
4156
4157 /* We return the need_backup flag. Caller will decide
4158 * how much - a multiple of ->backup_blocks up to *suspend_point
4159 */
4160 if (advancing)
4161 return need_backup > info->reshape_progress;
4162 else
4163 return need_backup >= info->reshape_progress;
4164
4165 check_progress:
4166 /* if we couldn't read a number from sync_completed, then
4167 * either the reshape did complete, or it aborted.
4168 * We can tell which by checking for 'none' in reshape_position.
4169 * If it did abort, then it might immediately restart if it
4170 * it was just a device failure that leaves us degraded but
4171 * functioning.
4172 */
4173 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4174 sizeof(buf)) < 0 ||
4175 strncmp(buf, "none", 4) != 0) {
4176 /* The abort might only be temporary. Wait up to 10
4177 * seconds for fd to contain a valid number again.
4178 */
4179 int wait = 10000;
4180 int rv = -2;
4181 unsigned long long new_sync_max;
4182 while (fd >= 0 && rv < 0 && wait > 0) {
4183 if (sysfs_wait(fd, &wait) != 1)
4184 break;
4185 switch (sysfs_fd_get_ll(fd, &completed)) {
4186 case 0:
4187 /* all good again */
4188 rv = 1;
4189 /* If "sync_max" is no longer max_progress
4190 * we need to freeze things
4191 */
4192 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
4193 *frozen = (new_sync_max != max_progress);
4194 break;
4195 case -2: /* read error - abort */
4196 wait = 0;
4197 break;
4198 }
4199 }
4200 if (fd >= 0)
4201 close(fd);
4202 return rv; /* abort */
4203 } else {
4204 /* Maybe racing with array shutdown - check state */
4205 if (fd >= 0)
4206 close(fd);
4207 if (sysfs_get_str(info, NULL, "array_state", buf,
4208 sizeof(buf)) < 0 ||
4209 strncmp(buf, "inactive", 8) == 0 ||
4210 strncmp(buf, "clear",5) == 0)
4211 return -2; /* abort */
4212 return -1; /* complete */
4213 }
4214 }
4215
4216 /* FIXME return status is never checked */
4217 static int grow_backup(struct mdinfo *sra,
4218 unsigned long long offset, /* per device */
4219 unsigned long stripes, /* per device, in old chunks */
4220 int *sources, unsigned long long *offsets,
4221 int disks, int chunk, int level, int layout,
4222 int dests, int *destfd, unsigned long long *destoffsets,
4223 int part, int *degraded,
4224 char *buf)
4225 {
4226 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4227 * to storage 'destfd' (offset 'destoffsets'), after first
4228 * suspending IO. Then allow resync to continue
4229 * over the suspended section.
4230 * Use part 'part' of the backup-super-block.
4231 */
4232 int odata = disks;
4233 int rv = 0;
4234 int i;
4235 unsigned long long ll;
4236 int new_degraded;
4237 //printf("offset %llu\n", offset);
4238 if (level >= 4)
4239 odata--;
4240 if (level == 6)
4241 odata--;
4242
4243 /* Check that array hasn't become degraded, else we might backup the wrong data */
4244 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4245 return -1; /* FIXME this error is ignored */
4246 new_degraded = (int)ll;
4247 if (new_degraded != *degraded) {
4248 /* check each device to ensure it is still working */
4249 struct mdinfo *sd;
4250 for (sd = sra->devs ; sd ; sd = sd->next) {
4251 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4252 continue;
4253 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4254 char sbuf[100];
4255
4256 if (sysfs_get_str(sra, sd, "state",
4257 sbuf, sizeof(sbuf)) < 0 ||
4258 strstr(sbuf, "faulty") ||
4259 strstr(sbuf, "in_sync") == NULL) {
4260 /* this device is dead */
4261 sd->disk.state = (1<<MD_DISK_FAULTY);
4262 if (sd->disk.raid_disk >= 0 &&
4263 sources[sd->disk.raid_disk] >= 0) {
4264 close(sources[sd->disk.raid_disk]);
4265 sources[sd->disk.raid_disk] = -1;
4266 }
4267 }
4268 }
4269 }
4270 *degraded = new_degraded;
4271 }
4272 if (part) {
4273 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4274 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4275 } else {
4276 bsb.arraystart = __cpu_to_le64(offset * odata);
4277 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4278 }
4279 if (part)
4280 bsb.magic[15] = '2';
4281 for (i = 0; i < dests; i++)
4282 if (part)
4283 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4284 else
4285 lseek64(destfd[i], destoffsets[i], 0);
4286
4287 rv = save_stripes(sources, offsets,
4288 disks, chunk, level, layout,
4289 dests, destfd,
4290 offset*512*odata, stripes * chunk * odata,
4291 buf);
4292
4293 if (rv)
4294 return rv;
4295 bsb.mtime = __cpu_to_le64(time(0));
4296 for (i = 0; i < dests; i++) {
4297 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4298
4299 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4300 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4301 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4302 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4303
4304 rv = -1;
4305 if ((unsigned long long)lseek64(destfd[i],
4306 destoffsets[i] - 4096, 0) !=
4307 destoffsets[i] - 4096)
4308 break;
4309 if (write(destfd[i], &bsb, 512) != 512)
4310 break;
4311 if (destoffsets[i] > 4096) {
4312 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4313 destoffsets[i]+stripes*chunk*odata)
4314 break;
4315 if (write(destfd[i], &bsb, 512) != 512)
4316 break;
4317 }
4318 fsync(destfd[i]);
4319 rv = 0;
4320 }
4321
4322 return rv;
4323 }
4324
4325 /* in 2.6.30, the value reported by sync_completed can be
4326 * less that it should be by one stripe.
4327 * This only happens when reshape hits sync_max and pauses.
4328 * So allow wait_backup to either extent sync_max further
4329 * than strictly necessary, or return before the
4330 * sync has got quite as far as we would really like.
4331 * This is what 'blocks2' is for.
4332 * The various caller give appropriate values so that
4333 * every works.
4334 */
4335 /* FIXME return value is often ignored */
4336 static int forget_backup(int dests, int *destfd,
4337 unsigned long long *destoffsets,
4338 int part)
4339 {
4340 /*
4341 * Erase backup 'part' (which is 0 or 1)
4342 */
4343 int i;
4344 int rv;
4345
4346 if (part) {
4347 bsb.arraystart2 = __cpu_to_le64(0);
4348 bsb.length2 = __cpu_to_le64(0);
4349 } else {
4350 bsb.arraystart = __cpu_to_le64(0);
4351 bsb.length = __cpu_to_le64(0);
4352 }
4353 bsb.mtime = __cpu_to_le64(time(0));
4354 rv = 0;
4355 for (i = 0; i < dests; i++) {
4356 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4357 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4358 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4359 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4360 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4361 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4362 destoffsets[i]-4096)
4363 rv = -1;
4364 if (rv == 0 &&
4365 write(destfd[i], &bsb, 512) != 512)
4366 rv = -1;
4367 fsync(destfd[i]);
4368 }
4369 return rv;
4370 }
4371
4372 static void fail(char *msg)
4373 {
4374 int rv;
4375 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4376 rv |= (write(2, "\n", 1) != 1);
4377 exit(rv ? 1 : 2);
4378 }
4379
4380 static char *abuf, *bbuf;
4381 static unsigned long long abuflen;
4382 static void validate(int afd, int bfd, unsigned long long offset)
4383 {
4384 /* check that the data in the backup against the array.
4385 * This is only used for regression testing and should not
4386 * be used while the array is active
4387 */
4388 if (afd < 0)
4389 return;
4390 lseek64(bfd, offset - 4096, 0);
4391 if (read(bfd, &bsb2, 512) != 512)
4392 fail("cannot read bsb");
4393 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4394 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4395 fail("first csum bad");
4396 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4397 fail("magic is bad");
4398 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4399 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4400 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4401 fail("second csum bad");
4402
4403 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4404 fail("devstart is wrong");
4405
4406 if (bsb2.length) {
4407 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4408
4409 if (abuflen < len) {
4410 free(abuf);
4411 free(bbuf);
4412 abuflen = len;
4413 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4414 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4415 abuflen = 0;
4416 /* just stop validating on mem-alloc failure */
4417 return;
4418 }
4419 }
4420
4421 lseek64(bfd, offset, 0);
4422 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4423 //printf("len %llu\n", len);
4424 fail("read first backup failed");
4425 }
4426 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4427 if ((unsigned long long)read(afd, abuf, len) != len)
4428 fail("read first from array failed");
4429 if (memcmp(bbuf, abuf, len) != 0) {
4430 #if 0
4431 int i;
4432 printf("offset=%llu len=%llu\n",
4433 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4434 for (i=0; i<len; i++)
4435 if (bbuf[i] != abuf[i]) {
4436 printf("first diff byte %d\n", i);
4437 break;
4438 }
4439 #endif
4440 fail("data1 compare failed");
4441 }
4442 }
4443 if (bsb2.length2) {
4444 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4445
4446 if (abuflen < len) {
4447 free(abuf);
4448 free(bbuf);
4449 abuflen = len;
4450 abuf = xmalloc(abuflen);
4451 bbuf = xmalloc(abuflen);
4452 }
4453
4454 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4455 if ((unsigned long long)read(bfd, bbuf, len) != len)
4456 fail("read second backup failed");
4457 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4458 if ((unsigned long long)read(afd, abuf, len) != len)
4459 fail("read second from array failed");
4460 if (memcmp(bbuf, abuf, len) != 0)
4461 fail("data2 compare failed");
4462 }
4463 }
4464
4465 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4466 struct supertype *st, unsigned long blocks,
4467 int *fds, unsigned long long *offsets,
4468 int dests, int *destfd, unsigned long long *destoffsets)
4469 {
4470 /* Monitor a reshape where backup is being performed using
4471 * 'native' mechanism - either to a backup file, or
4472 * to some space in a spare.
4473 */
4474 char *buf;
4475 int degraded = -1;
4476 unsigned long long speed;
4477 unsigned long long suspend_point, array_size;
4478 unsigned long long backup_point, wait_point;
4479 unsigned long long reshape_completed;
4480 int done = 0;
4481 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4482 int part = 0; /* The next part of the backup area to fill. It may already
4483 * be full, so we need to check */
4484 int level = reshape->level;
4485 int layout = reshape->before.layout;
4486 int data = reshape->before.data_disks;
4487 int disks = reshape->before.data_disks + reshape->parity;
4488 int chunk = sra->array.chunk_size;
4489 struct mdinfo *sd;
4490 unsigned long stripes;
4491 int uuid[4];
4492 int frozen = 0;
4493
4494 /* set up the backup-super-block. This requires the
4495 * uuid from the array.
4496 */
4497 /* Find a superblock */
4498 for (sd = sra->devs; sd; sd = sd->next) {
4499 char *dn;
4500 int devfd;
4501 int ok;
4502 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4503 continue;
4504 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4505 devfd = dev_open(dn, O_RDONLY);
4506 if (devfd < 0)
4507 continue;
4508 ok = st->ss->load_super(st, devfd, NULL);
4509 close(devfd);
4510 if (ok == 0)
4511 break;
4512 }
4513 if (!sd) {
4514 pr_err("Cannot find a superblock\n");
4515 return 0;
4516 }
4517
4518 memset(&bsb, 0, 512);
4519 memcpy(bsb.magic, "md_backup_data-1", 16);
4520 st->ss->uuid_from_super(st, uuid);
4521 memcpy(bsb.set_uuid, uuid, 16);
4522 bsb.mtime = __cpu_to_le64(time(0));
4523 bsb.devstart2 = blocks;
4524
4525 stripes = blocks / (sra->array.chunk_size/512) /
4526 reshape->before.data_disks;
4527
4528 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4529 /* Don't start the 'reshape' */
4530 return 0;
4531 if (reshape->before.data_disks == reshape->after.data_disks) {
4532 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4533 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4534 }
4535
4536 if (increasing) {
4537 array_size = sra->component_size * reshape->after.data_disks;
4538 backup_point = sra->reshape_progress;
4539 suspend_point = 0;
4540 } else {
4541 array_size = sra->component_size * reshape->before.data_disks;
4542 backup_point = reshape->backup_blocks;
4543 suspend_point = array_size;
4544 }
4545
4546 while (!done) {
4547 int rv;
4548
4549 /* Want to return as soon the oldest backup slot can
4550 * be released as that allows us to start backing up
4551 * some more, providing suspend_point has been
4552 * advanced, which it should have.
4553 */
4554 if (increasing) {
4555 wait_point = array_size;
4556 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4557 wait_point = (__le64_to_cpu(bsb.arraystart) +
4558 __le64_to_cpu(bsb.length));
4559 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4560 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4561 __le64_to_cpu(bsb.length2));
4562 } else {
4563 wait_point = 0;
4564 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4565 wait_point = __le64_to_cpu(bsb.arraystart);
4566 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4567 wait_point = __le64_to_cpu(bsb.arraystart2);
4568 }
4569
4570 reshape_completed = sra->reshape_progress;
4571 rv = progress_reshape(sra, reshape,
4572 backup_point, wait_point,
4573 &suspend_point, &reshape_completed,
4574 &frozen);
4575 /* external metadata would need to ping_monitor here */
4576 sra->reshape_progress = reshape_completed;
4577
4578 /* Clear any backup region that is before 'here' */
4579 if (increasing) {
4580 if (__le64_to_cpu(bsb.length) > 0 &&
4581 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4582 __le64_to_cpu(bsb.length)))
4583 forget_backup(dests, destfd,
4584 destoffsets, 0);
4585 if (__le64_to_cpu(bsb.length2) > 0 &&
4586 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4587 __le64_to_cpu(bsb.length2)))
4588 forget_backup(dests, destfd,
4589 destoffsets, 1);
4590 } else {
4591 if (__le64_to_cpu(bsb.length) > 0 &&
4592 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4593 forget_backup(dests, destfd,
4594 destoffsets, 0);
4595 if (__le64_to_cpu(bsb.length2) > 0 &&
4596 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4597 forget_backup(dests, destfd,
4598 destoffsets, 1);
4599 }
4600 if (sigterm)
4601 rv = -2;
4602 if (rv < 0) {
4603 if (rv == -1)
4604 done = 1;
4605 break;
4606 }
4607 if (rv == 0 && increasing && !st->ss->external) {
4608 /* No longer need to monitor this reshape */
4609 sysfs_set_str(sra, NULL, "sync_max", "max");
4610 done = 1;
4611 break;
4612 }
4613
4614 while (rv) {
4615 unsigned long long offset;
4616 unsigned long actual_stripes;
4617 /* Need to backup some data.
4618 * If 'part' is not used and the desired
4619 * backup size is suspended, do a backup,
4620 * then consider the next part.
4621 */
4622 /* Check that 'part' is unused */
4623 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4624 break;
4625 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4626 break;
4627
4628 offset = backup_point / data;
4629 actual_stripes = stripes;
4630 if (increasing) {
4631 if (offset + actual_stripes * (chunk/512) >
4632 sra->component_size)
4633 actual_stripes = ((sra->component_size - offset)
4634 / (chunk/512));
4635 if (offset + actual_stripes * (chunk/512) >
4636 suspend_point/data)
4637 break;
4638 } else {
4639 if (offset < actual_stripes * (chunk/512))
4640 actual_stripes = offset / (chunk/512);
4641 offset -= actual_stripes * (chunk/512);
4642 if (offset < suspend_point/data)
4643 break;
4644 }
4645 if (actual_stripes == 0)
4646 break;
4647 grow_backup(sra, offset, actual_stripes,
4648 fds, offsets,
4649 disks, chunk, level, layout,
4650 dests, destfd, destoffsets,
4651 part, &degraded, buf);
4652 validate(afd, destfd[0], destoffsets[0]);
4653 /* record where 'part' is up to */
4654 part = !part;
4655 if (increasing)
4656 backup_point += actual_stripes * (chunk/512) * data;
4657 else
4658 backup_point -= actual_stripes * (chunk/512) * data;
4659 }
4660 }
4661
4662 /* FIXME maybe call progress_reshape one more time instead */
4663 /* remove any remaining suspension */
4664 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4665 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4666 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4667 sysfs_set_num(sra, NULL, "sync_min", 0);
4668
4669 if (reshape->before.data_disks == reshape->after.data_disks)
4670 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4671 free(buf);
4672 return done;
4673 }
4674
4675 /*
4676 * If any spare contains md_back_data-1 which is recent wrt mtime,
4677 * write that data into the array and update the super blocks with
4678 * the new reshape_progress
4679 */
4680 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4681 char *backup_file, int verbose)
4682 {
4683 int i, j;
4684 int old_disks;
4685 unsigned long long *offsets;
4686 unsigned long long nstripe, ostripe;
4687 int ndata, odata;
4688
4689 odata = info->array.raid_disks - info->delta_disks - 1;
4690 if (info->array.level == 6) odata--; /* number of data disks */
4691 ndata = info->array.raid_disks - 1;
4692 if (info->new_level == 6) ndata--;
4693
4694 old_disks = info->array.raid_disks - info->delta_disks;
4695
4696 if (info->delta_disks <= 0)
4697 /* Didn't grow, so the backup file must have
4698 * been used
4699 */
4700 old_disks = cnt;
4701 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4702 struct mdinfo dinfo;
4703 int fd;
4704 int bsbsize;
4705 char *devname, namebuf[20];
4706 unsigned long long lo, hi;
4707
4708 /* This was a spare and may have some saved data on it.
4709 * Load the superblock, find and load the
4710 * backup_super_block.
4711 * If either fail, go on to next device.
4712 * If the backup contains no new info, just return
4713 * else restore data and update all superblocks
4714 */
4715 if (i == old_disks-1) {
4716 fd = open(backup_file, O_RDONLY);
4717 if (fd<0) {
4718 pr_err("backup file %s inaccessible: %s\n",
4719 backup_file, strerror(errno));
4720 continue;
4721 }
4722 devname = backup_file;
4723 } else {
4724 fd = fdlist[i];
4725 if (fd < 0)
4726 continue;
4727 if (st->ss->load_super(st, fd, NULL))
4728 continue;
4729
4730 st->ss->getinfo_super(st, &dinfo, NULL);
4731 st->ss->free_super(st);
4732
4733 if (lseek64(fd,
4734 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4735 0) < 0) {
4736 pr_err("Cannot seek on device %d\n", i);
4737 continue; /* Cannot seek */
4738 }
4739 sprintf(namebuf, "device-%d", i);
4740 devname = namebuf;
4741 }
4742 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4743 if (verbose)
4744 pr_err("Cannot read from %s\n", devname);
4745 continue; /* Cannot read */
4746 }
4747 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4748 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4749 if (verbose)
4750 pr_err("No backup metadata on %s\n", devname);
4751 continue;
4752 }
4753 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4754 if (verbose)
4755 pr_err("Bad backup-metadata checksum on %s\n", devname);
4756 continue; /* bad checksum */
4757 }
4758 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4759 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4760 if (verbose)
4761 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4762 continue; /* Bad second checksum */
4763 }
4764 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4765 if (verbose)
4766 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4767 continue; /* Wrong uuid */
4768 }
4769
4770 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4771 * sometimes they aren't... So allow considerable flexability in matching, and allow
4772 * this test to be overridden by an environment variable.
4773 */
4774 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4775 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4776 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4777 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4778 (unsigned long)__le64_to_cpu(bsb.mtime),
4779 (unsigned long)info->array.utime);
4780 } else {
4781 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4782 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4783 continue; /* time stamp is too bad */
4784 }
4785 }
4786
4787 if (bsb.magic[15] == '1') {
4788 if (bsb.length == 0)
4789 continue;
4790 if (info->delta_disks >= 0) {
4791 /* reshape_progress is increasing */
4792 if (__le64_to_cpu(bsb.arraystart)
4793 + __le64_to_cpu(bsb.length)
4794 < info->reshape_progress) {
4795 nonew:
4796 if (verbose)
4797 pr_err("backup-metadata found on %s but is not needed\n", devname);
4798 continue; /* No new data here */
4799 }
4800 } else {
4801 /* reshape_progress is decreasing */
4802 if (__le64_to_cpu(bsb.arraystart) >=
4803 info->reshape_progress)
4804 goto nonew; /* No new data here */
4805 }
4806 } else {
4807 if (bsb.length == 0 && bsb.length2 == 0)
4808 continue;
4809 if (info->delta_disks >= 0) {
4810 /* reshape_progress is increasing */
4811 if ((__le64_to_cpu(bsb.arraystart)
4812 + __le64_to_cpu(bsb.length)
4813 < info->reshape_progress) &&
4814 (__le64_to_cpu(bsb.arraystart2)
4815 + __le64_to_cpu(bsb.length2)
4816 < info->reshape_progress))
4817 goto nonew; /* No new data here */
4818 } else {
4819 /* reshape_progress is decreasing */
4820 if (__le64_to_cpu(bsb.arraystart) >=
4821 info->reshape_progress &&
4822 __le64_to_cpu(bsb.arraystart2) >=
4823 info->reshape_progress)
4824 goto nonew; /* No new data here */
4825 }
4826 }
4827 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4828 second_fail:
4829 if (verbose)
4830 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4831 devname);
4832 continue; /* Cannot seek */
4833 }
4834 /* There should be a duplicate backup superblock 4k before here */
4835 if (lseek64(fd, -4096, 1) < 0 ||
4836 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4837 goto second_fail; /* Cannot find leading superblock */
4838 if (bsb.magic[15] == '1')
4839 bsbsize = offsetof(struct mdp_backup_super, pad1);
4840 else
4841 bsbsize = offsetof(struct mdp_backup_super, pad);
4842 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4843 goto second_fail; /* Cannot find leading superblock */
4844
4845 /* Now need the data offsets for all devices. */
4846 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4847 for(j=0; j<info->array.raid_disks; j++) {
4848 if (fdlist[j] < 0)
4849 continue;
4850 if (st->ss->load_super(st, fdlist[j], NULL))
4851 /* FIXME should be this be an error */
4852 continue;
4853 st->ss->getinfo_super(st, &dinfo, NULL);
4854 st->ss->free_super(st);
4855 offsets[j] = dinfo.data_offset * 512;
4856 }
4857 printf("%s: restoring critical section\n", Name);
4858
4859 if (restore_stripes(fdlist, offsets,
4860 info->array.raid_disks,
4861 info->new_chunk,
4862 info->new_level,
4863 info->new_layout,
4864 fd, __le64_to_cpu(bsb.devstart)*512,
4865 __le64_to_cpu(bsb.arraystart)*512,
4866 __le64_to_cpu(bsb.length)*512, NULL)) {
4867 /* didn't succeed, so giveup */
4868 if (verbose)
4869 pr_err("Error restoring backup from %s\n",
4870 devname);
4871 free(offsets);
4872 return 1;
4873 }
4874
4875 if (bsb.magic[15] == '2' &&
4876 restore_stripes(fdlist, offsets,
4877 info->array.raid_disks,
4878 info->new_chunk,
4879 info->new_level,
4880 info->new_layout,
4881 fd, __le64_to_cpu(bsb.devstart)*512 +
4882 __le64_to_cpu(bsb.devstart2)*512,
4883 __le64_to_cpu(bsb.arraystart2)*512,
4884 __le64_to_cpu(bsb.length2)*512, NULL)) {
4885 /* didn't succeed, so giveup */
4886 if (verbose)
4887 pr_err("Error restoring second backup from %s\n",
4888 devname);
4889 free(offsets);
4890 return 1;
4891 }
4892
4893 free(offsets);
4894
4895 /* Ok, so the data is restored. Let's update those superblocks. */
4896
4897 lo = hi = 0;
4898 if (bsb.length) {
4899 lo = __le64_to_cpu(bsb.arraystart);
4900 hi = lo + __le64_to_cpu(bsb.length);
4901 }
4902 if (bsb.magic[15] == '2' && bsb.length2) {
4903 unsigned long long lo1, hi1;
4904 lo1 = __le64_to_cpu(bsb.arraystart2);
4905 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4906 if (lo == hi) {
4907 lo = lo1;
4908 hi = hi1;
4909 } else if (lo < lo1)
4910 hi = hi1;
4911 else
4912 lo = lo1;
4913 }
4914 if (lo < hi &&
4915 (info->reshape_progress < lo ||
4916 info->reshape_progress > hi))
4917 /* backup does not affect reshape_progress*/ ;
4918 else if (info->delta_disks >= 0) {
4919 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4920 __le64_to_cpu(bsb.length);
4921 if (bsb.magic[15] == '2') {
4922 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4923 __le64_to_cpu(bsb.length2);
4924 if (p2 > info->reshape_progress)
4925 info->reshape_progress = p2;
4926 }
4927 } else {
4928 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4929 if (bsb.magic[15] == '2') {
4930 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4931 if (p2 < info->reshape_progress)
4932 info->reshape_progress = p2;
4933 }
4934 }
4935 for (j=0; j<info->array.raid_disks; j++) {
4936 if (fdlist[j] < 0)
4937 continue;
4938 if (st->ss->load_super(st, fdlist[j], NULL))
4939 continue;
4940 st->ss->getinfo_super(st, &dinfo, NULL);
4941 dinfo.reshape_progress = info->reshape_progress;
4942 st->ss->update_super(st, &dinfo,
4943 "_reshape_progress",
4944 NULL,0, 0, NULL);
4945 st->ss->store_super(st, fdlist[j]);
4946 st->ss->free_super(st);
4947 }
4948 return 0;
4949 }
4950 /* Didn't find any backup data, try to see if any
4951 * was needed.
4952 */
4953 if (info->delta_disks < 0) {
4954 /* When shrinking, the critical section is at the end.
4955 * So see if we are before the critical section.
4956 */
4957 unsigned long long first_block;
4958 nstripe = ostripe = 0;
4959 first_block = 0;
4960 while (ostripe >= nstripe) {
4961 ostripe += info->array.chunk_size / 512;
4962 first_block = ostripe * odata;
4963 nstripe = first_block / ndata / (info->new_chunk/512) *
4964 (info->new_chunk/512);
4965 }
4966
4967 if (info->reshape_progress >= first_block)
4968 return 0;
4969 }
4970 if (info->delta_disks > 0) {
4971 /* See if we are beyond the critical section. */
4972 unsigned long long last_block;
4973 nstripe = ostripe = 0;
4974 last_block = 0;
4975 while (nstripe >= ostripe) {
4976 nstripe += info->new_chunk / 512;
4977 last_block = nstripe * ndata;
4978 ostripe = last_block / odata / (info->array.chunk_size/512) *
4979 (info->array.chunk_size/512);
4980 }
4981
4982 if (info->reshape_progress >= last_block)
4983 return 0;
4984 }
4985 /* needed to recover critical section! */
4986 if (verbose)
4987 pr_err("Failed to find backup of critical section\n");
4988 return 1;
4989 }
4990
4991 int Grow_continue_command(char *devname, int fd,
4992 char *backup_file, int verbose)
4993 {
4994 int ret_val = 0;
4995 struct supertype *st = NULL;
4996 struct mdinfo *content = NULL;
4997 struct mdinfo array;
4998 char *subarray = NULL;
4999 struct mdinfo *cc = NULL;
5000 struct mdstat_ent *mdstat = NULL;
5001 int cfd = -1;
5002 int fd2;
5003
5004 dprintf("Grow continue from command line called for %s\n",
5005 devname);
5006
5007 st = super_by_fd(fd, &subarray);
5008 if (!st || !st->ss) {
5009 pr_err("Unable to determine metadata format for %s\n",
5010 devname);
5011 return 1;
5012 }
5013 dprintf("Grow continue is run for ");
5014 if (st->ss->external == 0) {
5015 int d;
5016 int cnt = 5;
5017 dprintf_cont("native array (%s)\n", devname);
5018 if (md_get_array_info(fd, &array.array) < 0) {
5019 pr_err("%s is not an active md array - aborting\n",
5020 devname);
5021 ret_val = 1;
5022 goto Grow_continue_command_exit;
5023 }
5024 content = &array;
5025 sysfs_init(content, fd, NULL);
5026 /* Need to load a superblock.
5027 * FIXME we should really get what we need from
5028 * sysfs
5029 */
5030 do {
5031 for (d = 0; d < MAX_DISKS; d++) {
5032 mdu_disk_info_t disk;
5033 char *dv;
5034 int err;
5035 disk.number = d;
5036 if (md_get_disk_info(fd, &disk) < 0)
5037 continue;
5038 if (disk.major == 0 && disk.minor == 0)
5039 continue;
5040 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5041 continue;
5042 dv = map_dev(disk.major, disk.minor, 1);
5043 if (!dv)
5044 continue;
5045 fd2 = dev_open(dv, O_RDONLY);
5046 if (fd2 < 0)
5047 continue;
5048 err = st->ss->load_super(st, fd2, NULL);
5049 close(fd2);
5050 if (err)
5051 continue;
5052 break;
5053 }
5054 if (d == MAX_DISKS) {
5055 pr_err("Unable to load metadata for %s\n",
5056 devname);
5057 ret_val = 1;
5058 goto Grow_continue_command_exit;
5059 }
5060 st->ss->getinfo_super(st, content, NULL);
5061 if (!content->reshape_active)
5062 sleep(3);
5063 else
5064 break;
5065 } while (cnt-- > 0);
5066 } else {
5067 char *container;
5068
5069 if (subarray) {
5070 dprintf_cont("subarray (%s)\n", subarray);
5071 container = st->container_devnm;
5072 cfd = open_dev_excl(st->container_devnm);
5073 } else {
5074 container = st->devnm;
5075 close(fd);
5076 cfd = open_dev_excl(st->devnm);
5077 dprintf_cont("container (%s)\n", container);
5078 fd = cfd;
5079 }
5080 if (cfd < 0) {
5081 pr_err("Unable to open container for %s\n", devname);
5082 ret_val = 1;
5083 goto Grow_continue_command_exit;
5084 }
5085
5086 /* find in container array under reshape
5087 */
5088 ret_val = st->ss->load_container(st, cfd, NULL);
5089 if (ret_val) {
5090 pr_err("Cannot read superblock for %s\n",
5091 devname);
5092 ret_val = 1;
5093 goto Grow_continue_command_exit;
5094 }
5095
5096 cc = st->ss->container_content(st, subarray);
5097 for (content = cc; content ; content = content->next) {
5098 char *array_name;
5099 int allow_reshape = 1;
5100
5101 if (content->reshape_active == 0)
5102 continue;
5103 /* The decision about array or container wide
5104 * reshape is taken in Grow_continue based
5105 * content->reshape_active state, therefore we
5106 * need to check_reshape based on
5107 * reshape_active and subarray name
5108 */
5109 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5110 allow_reshape = 0;
5111 if (content->reshape_active == CONTAINER_RESHAPE &&
5112 (content->array.state
5113 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5114 allow_reshape = 0;
5115
5116 if (!allow_reshape) {
5117 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5118 devname, container);
5119 ret_val = 1;
5120 goto Grow_continue_command_exit;
5121 }
5122
5123 array_name = strchr(content->text_version+1, '/')+1;
5124 mdstat = mdstat_by_subdev(array_name, container);
5125 if (!mdstat)
5126 continue;
5127 if (mdstat->active == 0) {
5128 pr_err("Skipping inactive array %s.\n",
5129 mdstat->devnm);
5130 free_mdstat(mdstat);
5131 mdstat = NULL;
5132 continue;
5133 }
5134 break;
5135 }
5136 if (!content) {
5137 pr_err("Unable to determine reshaped array for %s\n", devname);
5138 ret_val = 1;
5139 goto Grow_continue_command_exit;
5140 }
5141 fd2 = open_dev(mdstat->devnm);
5142 if (fd2 < 0) {
5143 pr_err("cannot open (%s)\n", mdstat->devnm);
5144 ret_val = 1;
5145 goto Grow_continue_command_exit;
5146 }
5147
5148 if (sysfs_init(content, fd2, mdstat->devnm)) {
5149 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5150 mdstat->devnm);
5151 ret_val = 1;
5152 close(fd2);
5153 goto Grow_continue_command_exit;
5154 }
5155
5156 close(fd2);
5157
5158 /* start mdmon in case it is not running
5159 */
5160 if (!mdmon_running(container))
5161 start_mdmon(container);
5162 ping_monitor(container);
5163
5164 if (mdmon_running(container))
5165 st->update_tail = &st->updates;
5166 else {
5167 pr_err("No mdmon found. Grow cannot continue.\n");
5168 ret_val = 1;
5169 goto Grow_continue_command_exit;
5170 }
5171 }
5172
5173 /* verify that array under reshape is started from
5174 * correct position
5175 */
5176 if (verify_reshape_position(content, content->array.level) < 0) {
5177 ret_val = 1;
5178 goto Grow_continue_command_exit;
5179 }
5180
5181 /* continue reshape
5182 */
5183 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5184
5185 Grow_continue_command_exit:
5186 if (cfd > -1)
5187 close(cfd);
5188 st->ss->free_super(st);
5189 free_mdstat(mdstat);
5190 sysfs_free(cc);
5191 free(subarray);
5192
5193 return ret_val;
5194 }
5195
5196 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5197 char *backup_file, int forked, int freeze_reshape)
5198 {
5199 int ret_val = 2;
5200
5201 if (!info->reshape_active)
5202 return ret_val;
5203
5204 if (st->ss->external) {
5205 int cfd = open_dev(st->container_devnm);
5206
5207 if (cfd < 0)
5208 return 1;
5209
5210 st->ss->load_container(st, cfd, st->container_devnm);
5211 close(cfd);
5212 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5213 st, info, 0, backup_file,
5214 0, forked,
5215 1 | info->reshape_active,
5216 freeze_reshape);
5217 } else
5218 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5219 NULL, INVALID_SECTORS,
5220 backup_file, 0, forked,
5221 1 | info->reshape_active,
5222 freeze_reshape);
5223
5224 return ret_val;
5225 }
5226
5227 char *make_backup(char *name)
5228 {
5229 char *base = "backup_file-";
5230 int len;
5231 char *fname;
5232
5233 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5234 fname = xmalloc(len);
5235 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5236 return fname;
5237 }
5238
5239 char *locate_backup(char *name)
5240 {
5241 char *fl = make_backup(name);
5242 struct stat stb;
5243
5244 if (stat(fl, &stb) == 0 &&
5245 S_ISREG(stb.st_mode))
5246 return fl;
5247
5248 free(fl);
5249 return NULL;
5250 }