]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Create.c: fix uclibc build
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61
62 sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor);
63 fd = dev_open(buf, O_RDWR);
64
65 if (dev->disk.raid_disk >= 0)
66 fdlist[dev->disk.raid_disk] = fd;
67 else
68 fdlist[next_spare++] = fd;
69 }
70
71 if (!backup_file) {
72 backup_file = locate_backup(content->sys_name);
73 *backup_filep = backup_file;
74 }
75
76 if (st->ss->external && st->ss->recover_backup)
77 err = st->ss->recover_backup(st, content);
78 else
79 err = Grow_restart(st, content, fdlist, next_spare,
80 backup_file, verbose > 0);
81
82 while (next_spare > 0) {
83 next_spare--;
84 if (fdlist[next_spare] >= 0)
85 close(fdlist[next_spare]);
86 }
87 free(fdlist);
88 if (err) {
89 pr_err("Failed to restore critical section for reshape - sorry.\n");
90 if (!backup_file)
91 pr_err("Possibly you need to specify a --backup-file\n");
92 return 1;
93 }
94
95 dprintf("restore_backup() returns status OK.\n");
96 return 0;
97 }
98
99 int Grow_Add_device(char *devname, int fd, char *newdev)
100 {
101 /* Add a device to an active array.
102 * Currently, just extend a linear array.
103 * This requires writing a new superblock on the
104 * new device, calling the kernel to add the device,
105 * and if that succeeds, update the superblock on
106 * all other devices.
107 * This means that we need to *find* all other devices.
108 */
109 struct mdinfo info;
110
111 dev_t rdev;
112 int nfd, fd2;
113 int d, nd;
114 struct supertype *st = NULL;
115 char *subarray = NULL;
116
117 if (md_get_array_info(fd, &info.array) < 0) {
118 pr_err("cannot get array info for %s\n", devname);
119 return 1;
120 }
121
122 if (info.array.level != -1) {
123 pr_err("can only add devices to linear arrays\n");
124 return 1;
125 }
126
127 st = super_by_fd(fd, &subarray);
128 if (!st) {
129 pr_err("cannot handle arrays with superblock version %d\n",
130 info.array.major_version);
131 return 1;
132 }
133
134 if (subarray) {
135 pr_err("Cannot grow linear sub-arrays yet\n");
136 free(subarray);
137 free(st);
138 return 1;
139 }
140
141 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
142 if (nfd < 0) {
143 pr_err("cannot open %s\n", newdev);
144 free(st);
145 return 1;
146 }
147 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
148 close(nfd);
149 free(st);
150 return 1;
151 }
152 /* now check out all the devices and make sure we can read the
153 * superblock */
154 for (d=0 ; d < info.array.raid_disks ; d++) {
155 mdu_disk_info_t disk;
156 char *dv;
157
158 st->ss->free_super(st);
159
160 disk.number = d;
161 if (md_get_disk_info(fd, &disk) < 0) {
162 pr_err("cannot get device detail for device %d\n", d);
163 close(nfd);
164 free(st);
165 return 1;
166 }
167 dv = map_dev(disk.major, disk.minor, 1);
168 if (!dv) {
169 pr_err("cannot find device file for device %d\n", d);
170 close(nfd);
171 free(st);
172 return 1;
173 }
174 fd2 = dev_open(dv, O_RDWR);
175 if (fd2 < 0) {
176 pr_err("cannot open device file %s\n", dv);
177 close(nfd);
178 free(st);
179 return 1;
180 }
181
182 if (st->ss->load_super(st, fd2, NULL)) {
183 pr_err("cannot find super block on %s\n", dv);
184 close(nfd);
185 close(fd2);
186 free(st);
187 return 1;
188 }
189 close(fd2);
190 }
191 /* Ok, looks good. Lets update the superblock and write it out to
192 * newdev.
193 */
194
195 info.disk.number = d;
196 info.disk.major = major(rdev);
197 info.disk.minor = minor(rdev);
198 info.disk.raid_disk = d;
199 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
200 st->ss->update_super(st, &info, "linear-grow-new", newdev, 0, 0, NULL);
201
202 if (st->ss->store_super(st, nfd)) {
203 pr_err("Cannot store new superblock on %s\n", newdev);
204 close(nfd);
205 return 1;
206 }
207 close(nfd);
208
209 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
210 pr_err("Cannot add new disk to this array\n");
211 return 1;
212 }
213 /* Well, that seems to have worked.
214 * Now go through and update all superblocks
215 */
216
217 if (md_get_array_info(fd, &info.array) < 0) {
218 pr_err("cannot get array info for %s\n", devname);
219 return 1;
220 }
221
222 nd = d;
223 for (d=0 ; d < info.array.raid_disks ; d++) {
224 mdu_disk_info_t disk;
225 char *dv;
226
227 disk.number = d;
228 if (md_get_disk_info(fd, &disk) < 0) {
229 pr_err("cannot get device detail for device %d\n", d);
230 return 1;
231 }
232 dv = map_dev(disk.major, disk.minor, 1);
233 if (!dv) {
234 pr_err("cannot find device file for device %d\n", d);
235 return 1;
236 }
237 fd2 = dev_open(dv, O_RDWR);
238 if (fd2 < 0) {
239 pr_err("cannot open device file %s\n", dv);
240 return 1;
241 }
242 if (st->ss->load_super(st, fd2, NULL)) {
243 pr_err("cannot find super block on %s\n", dv);
244 close(fd);
245 close(fd2);
246 return 1;
247 }
248 info.array.raid_disks = nd+1;
249 info.array.nr_disks = nd+1;
250 info.array.active_disks = nd+1;
251 info.array.working_disks = nd+1;
252
253 st->ss->update_super(st, &info, "linear-grow-update", dv,
254 0, 0, NULL);
255
256 if (st->ss->store_super(st, fd2)) {
257 pr_err("Cannot store new superblock on %s\n", dv);
258 close(fd2);
259 return 1;
260 }
261 close(fd2);
262 }
263
264 return 0;
265 }
266
267 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
268 {
269 /*
270 * First check that array doesn't have a bitmap
271 * Then create the bitmap
272 * Then add it
273 *
274 * For internal bitmaps, we need to check the version,
275 * find all the active devices, and write the bitmap block
276 * to all devices
277 */
278 mdu_bitmap_file_t bmf;
279 mdu_array_info_t array;
280 struct supertype *st;
281 char *subarray = NULL;
282 int major = BITMAP_MAJOR_HI;
283 unsigned long long bitmapsize, array_size;
284 struct mdinfo *mdi;
285
286 /*
287 * We only ever get called if s->bitmap_file is != NULL, so this check
288 * is just here to quiet down static code checkers.
289 */
290 if (!s->bitmap_file)
291 return 1;
292
293 if (strcmp(s->bitmap_file, "clustered") == 0)
294 major = BITMAP_MAJOR_CLUSTERED;
295
296 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
297 if (errno == ENOMEM)
298 pr_err("Memory allocation failure.\n");
299 else
300 pr_err("bitmaps not supported by this kernel.\n");
301 return 1;
302 }
303 if (bmf.pathname[0]) {
304 if (strcmp(s->bitmap_file,"none") == 0) {
305 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
306 pr_err("failed to remove bitmap %s\n",
307 bmf.pathname);
308 return 1;
309 }
310 return 0;
311 }
312 pr_err("%s already has a bitmap (%s)\n", devname, bmf.pathname);
313 return 1;
314 }
315 if (md_get_array_info(fd, &array) != 0) {
316 pr_err("cannot get array status for %s\n", devname);
317 return 1;
318 }
319 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
320 if (strcmp(s->bitmap_file, "none")==0) {
321 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
322 if (md_set_array_info(fd, &array) != 0) {
323 if (array.state & (1 << MD_SB_CLUSTERED))
324 pr_err("failed to remove clustered bitmap.\n");
325 else
326 pr_err("failed to remove internal bitmap.\n");
327 return 1;
328 }
329 return 0;
330 }
331 pr_err("bitmap already present on %s\n", devname);
332 return 1;
333 }
334
335 if (strcmp(s->bitmap_file, "none") == 0) {
336 pr_err("no bitmap found on %s\n", devname);
337 return 1;
338 }
339 if (array.level <= 0) {
340 pr_err("Bitmaps not meaningful with level %s\n",
341 map_num(pers, array.level)?:"of this array");
342 return 1;
343 }
344 bitmapsize = array.size;
345 bitmapsize <<= 1;
346 if (get_dev_size(fd, NULL, &array_size) &&
347 array_size > (0x7fffffffULL << 9)) {
348 /* Array is big enough that we cannot trust array.size
349 * try other approaches
350 */
351 bitmapsize = get_component_size(fd);
352 }
353 if (bitmapsize == 0) {
354 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
355 return 1;
356 }
357
358 if (array.level == 10) {
359 int ncopies;
360
361 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
362 bitmapsize = bitmapsize * array.raid_disks / ncopies;
363
364 if (strcmp(s->bitmap_file, "clustered") == 0 &&
365 !is_near_layout_10(array.layout)) {
366 pr_err("only near layout is supported with clustered raid10\n");
367 return 1;
368 }
369 }
370
371 st = super_by_fd(fd, &subarray);
372 if (!st) {
373 pr_err("Cannot understand version %d.%d\n",
374 array.major_version, array.minor_version);
375 return 1;
376 }
377 if (subarray) {
378 pr_err("Cannot add bitmaps to sub-arrays yet\n");
379 free(subarray);
380 free(st);
381 return 1;
382 }
383
384 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
385 if (mdi) {
386 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
387 pr_err("Cannot add bitmap to array with PPL\n");
388 free(mdi);
389 free(st);
390 return 1;
391 }
392 free(mdi);
393 }
394
395 if (strcmp(s->bitmap_file, "internal") == 0 ||
396 strcmp(s->bitmap_file, "clustered") == 0) {
397 int rv;
398 int d;
399 int offset_setable = 0;
400 if (st->ss->add_internal_bitmap == NULL) {
401 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
402 return 1;
403 }
404 st->nodes = c->nodes;
405 st->cluster_name = c->homecluster;
406 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
407 if (mdi)
408 offset_setable = 1;
409 for (d = 0; d < st->max_devs; d++) {
410 mdu_disk_info_t disk;
411 char *dv;
412 int fd2;
413
414 disk.number = d;
415 if (md_get_disk_info(fd, &disk) < 0)
416 continue;
417 if (disk.major == 0 && disk.minor == 0)
418 continue;
419 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
420 continue;
421 dv = map_dev(disk.major, disk.minor, 1);
422 if (!dv)
423 continue;
424 fd2 = dev_open(dv, O_RDWR);
425 if (fd2 < 0)
426 continue;
427 rv = st->ss->load_super(st, fd2, NULL);
428 if (!rv) {
429 rv = st->ss->add_internal_bitmap(
430 st, &s->bitmap_chunk, c->delay,
431 s->write_behind, bitmapsize,
432 offset_setable, major);
433 if (!rv) {
434 st->ss->write_bitmap(st, fd2,
435 NodeNumUpdate);
436 } else {
437 pr_err("failed to create internal bitmap - chunksize problem.\n");
438 }
439 } else {
440 pr_err("failed to load super-block.\n");
441 }
442 close(fd2);
443 if (rv)
444 return 1;
445 }
446 if (offset_setable) {
447 st->ss->getinfo_super(st, mdi, NULL);
448 if (sysfs_init(mdi, fd, NULL)) {
449 pr_err("failed to initialize sysfs.\n");
450 free(mdi);
451 }
452 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
453 mdi->bitmap_offset);
454 free(mdi);
455 } else {
456 if (strcmp(s->bitmap_file, "clustered") == 0)
457 array.state |= (1 << MD_SB_CLUSTERED);
458 array.state |= (1 << MD_SB_BITMAP_PRESENT);
459 rv = md_set_array_info(fd, &array);
460 }
461 if (rv < 0) {
462 if (errno == EBUSY)
463 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
464 pr_err("failed to set internal bitmap.\n");
465 return 1;
466 }
467 } else {
468 int uuid[4];
469 int bitmap_fd;
470 int d;
471 int max_devs = st->max_devs;
472
473 /* try to load a superblock */
474 for (d = 0; d < max_devs; d++) {
475 mdu_disk_info_t disk;
476 char *dv;
477 int fd2;
478 disk.number = d;
479 if (md_get_disk_info(fd, &disk) < 0)
480 continue;
481 if ((disk.major==0 && disk.minor == 0) ||
482 (disk.state & (1 << MD_DISK_REMOVED)))
483 continue;
484 dv = map_dev(disk.major, disk.minor, 1);
485 if (!dv)
486 continue;
487 fd2 = dev_open(dv, O_RDONLY);
488 if (fd2 >= 0) {
489 if (st->ss->load_super(st, fd2, NULL) == 0) {
490 close(fd2);
491 st->ss->uuid_from_super(st, uuid);
492 break;
493 }
494 close(fd2);
495 }
496 }
497 if (d == max_devs) {
498 pr_err("cannot find UUID for array!\n");
499 return 1;
500 }
501 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
502 s->bitmap_chunk, c->delay, s->write_behind,
503 bitmapsize, major)) {
504 return 1;
505 }
506 bitmap_fd = open(s->bitmap_file, O_RDWR);
507 if (bitmap_fd < 0) {
508 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
509 return 1;
510 }
511 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
512 int err = errno;
513 if (errno == EBUSY)
514 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
515 pr_err("Cannot set bitmap file for %s: %s\n",
516 devname, strerror(err));
517 return 1;
518 }
519 }
520
521 return 0;
522 }
523
524 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
525 {
526 struct supertype *st;
527 struct mdinfo *sra;
528 struct mdinfo *sd;
529 char *subarray = NULL;
530 int ret = 0;
531 char container_dev[PATH_MAX];
532 char buf[20];
533
534 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
535 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
536 pr_err("Operation not supported for consistency policy %s\n",
537 map_num(consistency_policies, s->consistency_policy));
538 return 1;
539 }
540
541 st = super_by_fd(fd, &subarray);
542 if (!st)
543 return 1;
544
545 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
546 GET_DEVS|GET_STATE);
547 if (!sra) {
548 ret = 1;
549 goto free_st;
550 }
551
552 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
553 !st->ss->write_init_ppl) {
554 pr_err("%s metadata does not support PPL\n", st->ss->name);
555 ret = 1;
556 goto free_info;
557 }
558
559 if (sra->array.level != 5) {
560 pr_err("Operation not supported for array level %d\n",
561 sra->array.level);
562 ret = 1;
563 goto free_info;
564 }
565
566 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
567 pr_err("Consistency policy is already %s\n",
568 map_num(consistency_policies, s->consistency_policy));
569 ret = 1;
570 goto free_info;
571 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
572 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
573 pr_err("Current consistency policy is %s, cannot change to %s\n",
574 map_num(consistency_policies, sra->consistency_policy),
575 map_num(consistency_policies, s->consistency_policy));
576 ret = 1;
577 goto free_info;
578 }
579
580 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
581 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
582 ret = 1;
583 goto free_info;
584 } else if (strcmp(buf, "reshape\n") == 0) {
585 pr_err("PPL cannot be enabled when reshape is in progress\n");
586 ret = 1;
587 goto free_info;
588 }
589 }
590
591 if (subarray) {
592 char *update;
593
594 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
595 update = "ppl";
596 else
597 update = "no-ppl";
598
599 sprintf(container_dev, "/dev/%s", st->container_devnm);
600
601 ret = Update_subarray(container_dev, subarray, update, NULL,
602 c->verbose);
603 if (ret)
604 goto free_info;
605 }
606
607 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
608 struct mdinfo info;
609
610 if (subarray) {
611 struct mdinfo *mdi;
612 int cfd;
613
614 cfd = open(container_dev, O_RDWR|O_EXCL);
615 if (cfd < 0) {
616 pr_err("Failed to open %s\n", container_dev);
617 ret = 1;
618 goto free_info;
619 }
620
621 ret = st->ss->load_container(st, cfd, st->container_devnm);
622 close(cfd);
623
624 if (ret) {
625 pr_err("Cannot read superblock for %s\n",
626 container_dev);
627 goto free_info;
628 }
629
630 mdi = st->ss->container_content(st, subarray);
631 info = *mdi;
632 free(mdi);
633 }
634
635 for (sd = sra->devs; sd; sd = sd->next) {
636 int dfd;
637 char *devpath;
638
639 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
640 dfd = dev_open(devpath, O_RDWR);
641 if (dfd < 0) {
642 pr_err("Failed to open %s\n", devpath);
643 ret = 1;
644 goto free_info;
645 }
646
647 if (!subarray) {
648 ret = st->ss->load_super(st, dfd, NULL);
649 if (ret) {
650 pr_err("Failed to load super-block.\n");
651 close(dfd);
652 goto free_info;
653 }
654
655 ret = st->ss->update_super(st, sra, "ppl",
656 devname,
657 c->verbose, 0, NULL);
658 if (ret) {
659 close(dfd);
660 st->ss->free_super(st);
661 goto free_info;
662 }
663 st->ss->getinfo_super(st, &info, NULL);
664 }
665
666 ret |= sysfs_set_num(sra, sd, "ppl_sector",
667 info.ppl_sector);
668 ret |= sysfs_set_num(sra, sd, "ppl_size",
669 info.ppl_size);
670
671 if (ret) {
672 pr_err("Failed to set PPL attributes for %s\n",
673 sd->sys_name);
674 close(dfd);
675 st->ss->free_super(st);
676 goto free_info;
677 }
678
679 ret = st->ss->write_init_ppl(st, &info, dfd);
680 if (ret)
681 pr_err("Failed to write PPL\n");
682
683 close(dfd);
684
685 if (!subarray)
686 st->ss->free_super(st);
687
688 if (ret)
689 goto free_info;
690 }
691 }
692
693 ret = sysfs_set_str(sra, NULL, "consistency_policy",
694 map_num(consistency_policies,
695 s->consistency_policy));
696 if (ret)
697 pr_err("Failed to change array consistency policy\n");
698
699 free_info:
700 sysfs_free(sra);
701 free_st:
702 free(st);
703 free(subarray);
704
705 return ret;
706 }
707
708 /*
709 * When reshaping an array we might need to backup some data.
710 * This is written to all spares with a 'super_block' describing it.
711 * The superblock goes 4K from the end of the used space on the
712 * device.
713 * It if written after the backup is complete.
714 * It has the following structure.
715 */
716
717 static struct mdp_backup_super {
718 char magic[16]; /* md_backup_data-1 or -2 */
719 __u8 set_uuid[16];
720 __u64 mtime;
721 /* start/sizes in 512byte sectors */
722 __u64 devstart; /* address on backup device/file of data */
723 __u64 arraystart;
724 __u64 length;
725 __u32 sb_csum; /* csum of preceeding bytes. */
726 __u32 pad1;
727 __u64 devstart2; /* offset in to data of second section */
728 __u64 arraystart2;
729 __u64 length2;
730 __u32 sb_csum2; /* csum of preceeding bytes. */
731 __u8 pad[512-68-32];
732 } __attribute__((aligned(512))) bsb, bsb2;
733
734 static __u32 bsb_csum(char *buf, int len)
735 {
736 int i;
737 int csum = 0;
738 for (i = 0; i < len; i++)
739 csum = (csum<<3) + buf[0];
740 return __cpu_to_le32(csum);
741 }
742
743 static int check_idle(struct supertype *st)
744 {
745 /* Check that all member arrays for this container, or the
746 * container of this array, are idle
747 */
748 char *container = (st->container_devnm[0]
749 ? st->container_devnm : st->devnm);
750 struct mdstat_ent *ent, *e;
751 int is_idle = 1;
752
753 ent = mdstat_read(0, 0);
754 for (e = ent ; e; e = e->next) {
755 if (!is_container_member(e, container))
756 continue;
757 /* frozen array is not idle*/
758 if (e->percent >= 0 || e->metadata_version[9] == '-') {
759 is_idle = 0;
760 break;
761 }
762 }
763 free_mdstat(ent);
764 return is_idle;
765 }
766
767 static int freeze_container(struct supertype *st)
768 {
769 char *container = (st->container_devnm[0]
770 ? st->container_devnm : st->devnm);
771
772 if (!check_idle(st))
773 return -1;
774
775 if (block_monitor(container, 1)) {
776 pr_err("failed to freeze container\n");
777 return -2;
778 }
779
780 return 1;
781 }
782
783 static void unfreeze_container(struct supertype *st)
784 {
785 char *container = (st->container_devnm[0]
786 ? st->container_devnm : st->devnm);
787
788 unblock_monitor(container, 1);
789 }
790
791 static int freeze(struct supertype *st)
792 {
793 /* Try to freeze resync/rebuild on this array/container.
794 * Return -1 if the array is busy,
795 * return -2 container cannot be frozen,
796 * return 0 if this kernel doesn't support 'frozen'
797 * return 1 if it worked.
798 */
799 if (st->ss->external)
800 return freeze_container(st);
801 else {
802 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
803 int err;
804 char buf[20];
805
806 if (!sra)
807 return -1;
808 /* Need to clear any 'read-auto' status */
809 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
810 strncmp(buf, "read-auto", 9) == 0)
811 sysfs_set_str(sra, NULL, "array_state", "clean");
812
813 err = sysfs_freeze_array(sra);
814 sysfs_free(sra);
815 return err;
816 }
817 }
818
819 static void unfreeze(struct supertype *st)
820 {
821 if (st->ss->external)
822 return unfreeze_container(st);
823 else {
824 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
825 char buf[20];
826
827 if (sra &&
828 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
829 strcmp(buf, "frozen\n") == 0)
830 sysfs_set_str(sra, NULL, "sync_action", "idle");
831 sysfs_free(sra);
832 }
833 }
834
835 static void wait_reshape(struct mdinfo *sra)
836 {
837 int fd = sysfs_get_fd(sra, NULL, "sync_action");
838 char action[20];
839
840 if (fd < 0)
841 return;
842
843 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
844 strncmp(action, "reshape", 7) == 0)
845 sysfs_wait(fd, NULL);
846 close(fd);
847 }
848
849 static int reshape_super(struct supertype *st, unsigned long long size,
850 int level, int layout, int chunksize, int raid_disks,
851 int delta_disks, char *backup_file, char *dev,
852 int direction, int verbose)
853 {
854 /* nothing extra to check in the native case */
855 if (!st->ss->external)
856 return 0;
857 if (!st->ss->reshape_super || !st->ss->manage_reshape) {
858 pr_err("%s metadata does not support reshape\n",
859 st->ss->name);
860 return 1;
861 }
862
863 return st->ss->reshape_super(st, size, level, layout, chunksize,
864 raid_disks, delta_disks, backup_file, dev,
865 direction, verbose);
866 }
867
868 static void sync_metadata(struct supertype *st)
869 {
870 if (st->ss->external) {
871 if (st->update_tail) {
872 flush_metadata_updates(st);
873 st->update_tail = &st->updates;
874 } else
875 st->ss->sync_metadata(st);
876 }
877 }
878
879 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
880 {
881 /* when dealing with external metadata subarrays we need to be
882 * prepared to handle EAGAIN. The kernel may need to wait for
883 * mdmon to mark the array active so the kernel can handle
884 * allocations/writeback when preparing the reshape action
885 * (md_allow_write()). We temporarily disable safe_mode_delay
886 * to close a race with the array_state going clean before the
887 * next write to raid_disks / stripe_cache_size
888 */
889 char safe[50];
890 int rc;
891
892 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
893 if (!container ||
894 (strcmp(name, "raid_disks") != 0 &&
895 strcmp(name, "stripe_cache_size") != 0))
896 return sysfs_set_num(sra, NULL, name, n);
897
898 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
899 if (rc <= 0)
900 return -1;
901 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
902 rc = sysfs_set_num(sra, NULL, name, n);
903 if (rc < 0 && errno == EAGAIN) {
904 ping_monitor(container);
905 /* if we get EAGAIN here then the monitor is not active
906 * so stop trying
907 */
908 rc = sysfs_set_num(sra, NULL, name, n);
909 }
910 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
911 return rc;
912 }
913
914 int start_reshape(struct mdinfo *sra, int already_running,
915 int before_data_disks, int data_disks)
916 {
917 int err;
918 unsigned long long sync_max_to_set;
919
920 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
921 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
922 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
923 sra->reshape_progress);
924 if (before_data_disks <= data_disks)
925 sync_max_to_set = sra->reshape_progress / data_disks;
926 else
927 sync_max_to_set = (sra->component_size * data_disks
928 - sra->reshape_progress) / data_disks;
929 if (!already_running)
930 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
931 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
932 if (!already_running && err == 0) {
933 int cnt = 5;
934 do {
935 err = sysfs_set_str(sra, NULL, "sync_action",
936 "reshape");
937 if (err)
938 sleep(1);
939 } while (err && errno == EBUSY && cnt-- > 0);
940 }
941 return err;
942 }
943
944 void abort_reshape(struct mdinfo *sra)
945 {
946 sysfs_set_str(sra, NULL, "sync_action", "idle");
947 /*
948 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
949 * suspend_hi to decrease as well as increase.")
950 * you could only increase suspend_{lo,hi} unless the region they
951 * covered was empty. So to reset to 0, you need to push suspend_lo
952 * up past suspend_hi first. So to maximize the chance of mdadm
953 * working on all kernels, we want to keep doing that.
954 */
955 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
956 sysfs_set_num(sra, NULL, "suspend_hi", 0);
957 sysfs_set_num(sra, NULL, "suspend_lo", 0);
958 sysfs_set_num(sra, NULL, "sync_min", 0);
959 // It isn't safe to reset sync_max as we aren't monitoring.
960 // Array really should be stopped at this point.
961 }
962
963 int remove_disks_for_takeover(struct supertype *st,
964 struct mdinfo *sra,
965 int layout)
966 {
967 int nr_of_copies;
968 struct mdinfo *remaining;
969 int slot;
970
971 if (st->ss->external) {
972 int rv = 0;
973 struct mdinfo *arrays = st->ss->container_content(st, NULL);
974 /*
975 * containter_content returns list of arrays in container
976 * If arrays->next is not NULL it means that there are
977 * 2 arrays in container and operation should be blocked
978 */
979 if (arrays) {
980 if (arrays->next)
981 rv = 1;
982 sysfs_free(arrays);
983 if (rv) {
984 pr_err("Error. Cannot perform operation on /dev/%s\n", st->devnm);
985 pr_err("For this operation it MUST be single array in container\n");
986 return rv;
987 }
988 }
989 }
990
991 if (sra->array.level == 10)
992 nr_of_copies = layout & 0xff;
993 else if (sra->array.level == 1)
994 nr_of_copies = sra->array.raid_disks;
995 else
996 return 1;
997
998 remaining = sra->devs;
999 sra->devs = NULL;
1000 /* for each 'copy', select one device and remove from the list. */
1001 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
1002 struct mdinfo **diskp;
1003 int found = 0;
1004
1005 /* Find a working device to keep */
1006 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1007 struct mdinfo *disk = *diskp;
1008
1009 if (disk->disk.raid_disk < slot)
1010 continue;
1011 if (disk->disk.raid_disk >= slot + nr_of_copies)
1012 continue;
1013 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1014 continue;
1015 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1016 continue;
1017 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1018 continue;
1019
1020 /* We have found a good disk to use! */
1021 *diskp = disk->next;
1022 disk->next = sra->devs;
1023 sra->devs = disk;
1024 found = 1;
1025 break;
1026 }
1027 if (!found)
1028 break;
1029 }
1030
1031 if (slot < sra->array.raid_disks) {
1032 /* didn't find all slots */
1033 struct mdinfo **e;
1034 e = &remaining;
1035 while (*e)
1036 e = &(*e)->next;
1037 *e = sra->devs;
1038 sra->devs = remaining;
1039 return 1;
1040 }
1041
1042 /* Remove all 'remaining' devices from the array */
1043 while (remaining) {
1044 struct mdinfo *sd = remaining;
1045 remaining = sd->next;
1046
1047 sysfs_set_str(sra, sd, "state", "faulty");
1048 sysfs_set_str(sra, sd, "slot", "none");
1049 /* for external metadata disks should be removed in mdmon */
1050 if (!st->ss->external)
1051 sysfs_set_str(sra, sd, "state", "remove");
1052 sd->disk.state |= (1<<MD_DISK_REMOVED);
1053 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1054 sd->next = sra->devs;
1055 sra->devs = sd;
1056 }
1057 return 0;
1058 }
1059
1060 void reshape_free_fdlist(int *fdlist,
1061 unsigned long long *offsets,
1062 int size)
1063 {
1064 int i;
1065
1066 for (i = 0; i < size; i++)
1067 if (fdlist[i] >= 0)
1068 close(fdlist[i]);
1069
1070 free(fdlist);
1071 free(offsets);
1072 }
1073
1074 int reshape_prepare_fdlist(char *devname,
1075 struct mdinfo *sra,
1076 int raid_disks,
1077 int nrdisks,
1078 unsigned long blocks,
1079 char *backup_file,
1080 int *fdlist,
1081 unsigned long long *offsets)
1082 {
1083 int d = 0;
1084 struct mdinfo *sd;
1085
1086 enable_fds(nrdisks);
1087 for (d = 0; d <= nrdisks; d++)
1088 fdlist[d] = -1;
1089 d = raid_disks;
1090 for (sd = sra->devs; sd; sd = sd->next) {
1091 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1092 continue;
1093 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1094 sd->disk.raid_disk < raid_disks) {
1095 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1096 fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY);
1097 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1098 if (fdlist[sd->disk.raid_disk] < 0) {
1099 pr_err("%s: cannot open component %s\n",
1100 devname, dn ? dn : "-unknown-");
1101 d = -1;
1102 goto release;
1103 }
1104 } else if (backup_file == NULL) {
1105 /* spare */
1106 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1107 fdlist[d] = dev_open(dn, O_RDWR);
1108 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1109 if (fdlist[d] < 0) {
1110 pr_err("%s: cannot open component %s\n",
1111 devname, dn ? dn : "-unknown-");
1112 d = -1;
1113 goto release;
1114 }
1115 d++;
1116 }
1117 }
1118 release:
1119 return d;
1120 }
1121
1122 int reshape_open_backup_file(char *backup_file,
1123 int fd,
1124 char *devname,
1125 long blocks,
1126 int *fdlist,
1127 unsigned long long *offsets,
1128 char *sys_name,
1129 int restart)
1130 {
1131 /* Return 1 on success, 0 on any form of failure */
1132 /* need to check backup file is large enough */
1133 char buf[512];
1134 struct stat stb;
1135 unsigned int dev;
1136 int i;
1137
1138 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1139 S_IRUSR | S_IWUSR);
1140 *offsets = 8 * 512;
1141 if (*fdlist < 0) {
1142 pr_err("%s: cannot create backup file %s: %s\n",
1143 devname, backup_file, strerror(errno));
1144 return 0;
1145 }
1146 /* Guard against backup file being on array device.
1147 * If array is partitioned or if LVM etc is in the
1148 * way this will not notice, but it is better than
1149 * nothing.
1150 */
1151 fstat(*fdlist, &stb);
1152 dev = stb.st_dev;
1153 fstat(fd, &stb);
1154 if (stb.st_rdev == dev) {
1155 pr_err("backup file must NOT be on the array being reshaped.\n");
1156 close(*fdlist);
1157 return 0;
1158 }
1159
1160 memset(buf, 0, 512);
1161 for (i=0; i < blocks + 8 ; i++) {
1162 if (write(*fdlist, buf, 512) != 512) {
1163 pr_err("%s: cannot create backup file %s: %s\n",
1164 devname, backup_file, strerror(errno));
1165 return 0;
1166 }
1167 }
1168 if (fsync(*fdlist) != 0) {
1169 pr_err("%s: cannot create backup file %s: %s\n",
1170 devname, backup_file, strerror(errno));
1171 return 0;
1172 }
1173
1174 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1175 char *bu = make_backup(sys_name);
1176 if (symlink(backup_file, bu))
1177 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1178 strerror(errno));
1179 free(bu);
1180 }
1181
1182 return 1;
1183 }
1184
1185 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1186 unsigned int ndata, unsigned int odata)
1187 {
1188 unsigned long a, b, blocks;
1189 /* So how much do we need to backup.
1190 * We need an amount of data which is both a whole number of
1191 * old stripes and a whole number of new stripes.
1192 * So LCM for (chunksize*datadisks).
1193 */
1194 a = (ochunk/512) * odata;
1195 b = (nchunk/512) * ndata;
1196 /* Find GCD */
1197 a = GCD(a, b);
1198 /* LCM == product / GCD */
1199 blocks = (unsigned long)(ochunk/512) * (unsigned long)(nchunk/512) *
1200 odata * ndata / a;
1201
1202 return blocks;
1203 }
1204
1205 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1206 {
1207 /* Based on the current array state in info->array and
1208 * the changes in info->new_* etc, determine:
1209 * - whether the change is possible
1210 * - Intermediate level/raid_disks/layout
1211 * - whether a restriping reshape is needed
1212 * - number of sectors in minimum change unit. This
1213 * will cover a whole number of stripes in 'before' and
1214 * 'after'.
1215 *
1216 * Return message if the change should be rejected
1217 * NULL if the change can be achieved
1218 *
1219 * This can be called as part of starting a reshape, or
1220 * when assembling an array that is undergoing reshape.
1221 */
1222 int near, far, offset, copies;
1223 int new_disks;
1224 int old_chunk, new_chunk;
1225 /* delta_parity records change in number of devices
1226 * caused by level change
1227 */
1228 int delta_parity = 0;
1229
1230 memset(re, 0, sizeof(*re));
1231
1232 /* If a new level not explicitly given, we assume no-change */
1233 if (info->new_level == UnSet)
1234 info->new_level = info->array.level;
1235
1236 if (info->new_chunk)
1237 switch (info->new_level) {
1238 case 0:
1239 case 4:
1240 case 5:
1241 case 6:
1242 case 10:
1243 /* chunk size is meaningful, must divide component_size
1244 * evenly
1245 */
1246 if (info->component_size % (info->new_chunk/512)) {
1247 unsigned long long shrink = info->component_size;
1248 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1249 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1250 info->new_chunk/1024, info->component_size/2);
1251 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1252 devname, shrink/2);
1253 pr_err("will shrink the array so the given chunk size would work.\n");
1254 return "";
1255 }
1256 break;
1257 default:
1258 return "chunk size not meaningful for this level";
1259 }
1260 else
1261 info->new_chunk = info->array.chunk_size;
1262
1263 switch (info->array.level) {
1264 default:
1265 return "No reshape is possibly for this RAID level";
1266 case LEVEL_LINEAR:
1267 if (info->delta_disks != UnSet)
1268 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1269 else
1270 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1271 case 1:
1272 /* RAID1 can convert to RAID1 with different disks, or
1273 * raid5 with 2 disks, or
1274 * raid0 with 1 disk
1275 */
1276 if (info->new_level > 1 && (info->component_size & 7))
1277 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1278 if (info->new_level == 0) {
1279 if (info->delta_disks != UnSet &&
1280 info->delta_disks != 0)
1281 return "Cannot change number of disks with RAID1->RAID0 conversion";
1282 re->level = 0;
1283 re->before.data_disks = 1;
1284 re->after.data_disks = 1;
1285 return NULL;
1286 }
1287 if (info->new_level == 1) {
1288 if (info->delta_disks == UnSet)
1289 /* Don't know what to do */
1290 return "no change requested for Growing RAID1";
1291 re->level = 1;
1292 return NULL;
1293 }
1294 if (info->array.raid_disks != 2 && info->new_level == 5)
1295 return "Can only convert a 2-device array to RAID5";
1296 if (info->array.raid_disks == 2 && info->new_level == 5) {
1297 re->level = 5;
1298 re->before.data_disks = 1;
1299 if (info->delta_disks != UnSet &&
1300 info->delta_disks != 0)
1301 re->after.data_disks = 1 + info->delta_disks;
1302 else
1303 re->after.data_disks = 1;
1304 if (re->after.data_disks < 1)
1305 return "Number of disks too small for RAID5";
1306
1307 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1308 info->array.chunk_size = 65536;
1309 break;
1310 }
1311 /* Could do some multi-stage conversions, but leave that to
1312 * later.
1313 */
1314 return "Impossibly level change request for RAID1";
1315
1316 case 10:
1317 /* RAID10 can be converted from near mode to
1318 * RAID0 by removing some devices.
1319 * It can also be reshaped if the kernel supports
1320 * new_data_offset.
1321 */
1322 switch (info->new_level) {
1323 case 0:
1324 if ((info->array.layout & ~0xff) != 0x100)
1325 return "Cannot Grow RAID10 with far/offset layout";
1326 /*
1327 * number of devices must be multiple of
1328 * number of copies
1329 */
1330 if (info->array.raid_disks %
1331 (info->array.layout & 0xff))
1332 return "RAID10 layout too complex for Grow operation";
1333
1334 new_disks = (info->array.raid_disks /
1335 (info->array.layout & 0xff));
1336 if (info->delta_disks == UnSet)
1337 info->delta_disks = (new_disks
1338 - info->array.raid_disks);
1339
1340 if (info->delta_disks !=
1341 new_disks - info->array.raid_disks)
1342 return "New number of raid-devices impossible for RAID10";
1343 if (info->new_chunk &&
1344 info->new_chunk != info->array.chunk_size)
1345 return "Cannot change chunk-size with RAID10 Grow";
1346
1347 /* looks good */
1348 re->level = 0;
1349 re->before.data_disks = new_disks;
1350 re->after.data_disks = re->before.data_disks;
1351 return NULL;
1352
1353 case 10:
1354 near = info->array.layout & 0xff;
1355 far = (info->array.layout >> 8) & 0xff;
1356 offset = info->array.layout & 0x10000;
1357 if (far > 1 && !offset)
1358 return "Cannot reshape RAID10 in far-mode";
1359 copies = near * far;
1360
1361 old_chunk = info->array.chunk_size * far;
1362
1363 if (info->new_layout == UnSet)
1364 info->new_layout = info->array.layout;
1365 else {
1366 near = info->new_layout & 0xff;
1367 far = (info->new_layout >> 8) & 0xff;
1368 offset = info->new_layout & 0x10000;
1369 if (far > 1 && !offset)
1370 return "Cannot reshape RAID10 to far-mode";
1371 if (near * far != copies)
1372 return "Cannot change number of copies when reshaping RAID10";
1373 }
1374 if (info->delta_disks == UnSet)
1375 info->delta_disks = 0;
1376 new_disks = (info->array.raid_disks +
1377 info->delta_disks);
1378
1379 new_chunk = info->new_chunk * far;
1380
1381 re->level = 10;
1382 re->before.layout = info->array.layout;
1383 re->before.data_disks = info->array.raid_disks;
1384 re->after.layout = info->new_layout;
1385 re->after.data_disks = new_disks;
1386 /* For RAID10 we don't do backup but do allow reshape,
1387 * so set backup_blocks to INVALID_SECTORS rather than
1388 * zero.
1389 * And there is no need to synchronise stripes on both
1390 * 'old' and 'new'. So the important
1391 * number is the minimum data_offset difference
1392 * which is the larger of (offset copies * chunk).
1393 */
1394 re->backup_blocks = INVALID_SECTORS;
1395 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1396 if (new_disks < re->before.data_disks &&
1397 info->space_after < re->min_offset_change)
1398 /* Reduce component size by one chunk */
1399 re->new_size = (info->component_size -
1400 re->min_offset_change);
1401 else
1402 re->new_size = info->component_size;
1403 re->new_size = re->new_size * new_disks / copies;
1404 return NULL;
1405
1406 default:
1407 return "RAID10 can only be changed to RAID0";
1408 }
1409 case 0:
1410 /* RAID0 can be converted to RAID10, or to RAID456 */
1411 if (info->new_level == 10) {
1412 if (info->new_layout == UnSet &&
1413 info->delta_disks == UnSet) {
1414 /* Assume near=2 layout */
1415 info->new_layout = 0x102;
1416 info->delta_disks = info->array.raid_disks;
1417 }
1418 if (info->new_layout == UnSet) {
1419 int copies = 1 + (info->delta_disks
1420 / info->array.raid_disks);
1421 if (info->array.raid_disks * (copies-1) !=
1422 info->delta_disks)
1423 return "Impossible number of devices for RAID0->RAID10";
1424 info->new_layout = 0x100 + copies;
1425 }
1426 if (info->delta_disks == UnSet) {
1427 int copies = info->new_layout & 0xff;
1428 if (info->new_layout != 0x100 + copies)
1429 return "New layout impossible for RAID0->RAID10";;
1430 info->delta_disks = (copies - 1) *
1431 info->array.raid_disks;
1432 }
1433 if (info->new_chunk &&
1434 info->new_chunk != info->array.chunk_size)
1435 return "Cannot change chunk-size with RAID0->RAID10";
1436 /* looks good */
1437 re->level = 10;
1438 re->before.data_disks = (info->array.raid_disks +
1439 info->delta_disks);
1440 re->after.data_disks = re->before.data_disks;
1441 re->before.layout = info->new_layout;
1442 return NULL;
1443 }
1444
1445 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1446 * a raid4 style layout of the final level.
1447 */
1448 switch (info->new_level) {
1449 case 4:
1450 delta_parity = 1;
1451 case 0:
1452 re->level = 4;
1453 re->before.layout = 0;
1454 break;
1455 case 5:
1456 delta_parity = 1;
1457 re->level = 5;
1458 re->before.layout = ALGORITHM_PARITY_N;
1459 if (info->new_layout == UnSet)
1460 info->new_layout = map_name(r5layout, "default");
1461 break;
1462 case 6:
1463 delta_parity = 2;
1464 re->level = 6;
1465 re->before.layout = ALGORITHM_PARITY_N;
1466 if (info->new_layout == UnSet)
1467 info->new_layout = map_name(r6layout, "default");
1468 break;
1469 default:
1470 return "Impossible level change requested";
1471 }
1472 re->before.data_disks = info->array.raid_disks;
1473 /* determining 'after' layout happens outside this 'switch' */
1474 break;
1475
1476 case 4:
1477 info->array.layout = ALGORITHM_PARITY_N;
1478 case 5:
1479 switch (info->new_level) {
1480 case 0:
1481 delta_parity = -1;
1482 case 4:
1483 re->level = info->array.level;
1484 re->before.data_disks = info->array.raid_disks - 1;
1485 re->before.layout = info->array.layout;
1486 break;
1487 case 5:
1488 re->level = 5;
1489 re->before.data_disks = info->array.raid_disks - 1;
1490 re->before.layout = info->array.layout;
1491 break;
1492 case 6:
1493 delta_parity = 1;
1494 re->level = 6;
1495 re->before.data_disks = info->array.raid_disks - 1;
1496 switch (info->array.layout) {
1497 case ALGORITHM_LEFT_ASYMMETRIC:
1498 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1499 break;
1500 case ALGORITHM_RIGHT_ASYMMETRIC:
1501 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1502 break;
1503 case ALGORITHM_LEFT_SYMMETRIC:
1504 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1505 break;
1506 case ALGORITHM_RIGHT_SYMMETRIC:
1507 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1508 break;
1509 case ALGORITHM_PARITY_0:
1510 re->before.layout = ALGORITHM_PARITY_0_6;
1511 break;
1512 case ALGORITHM_PARITY_N:
1513 re->before.layout = ALGORITHM_PARITY_N_6;
1514 break;
1515 default:
1516 return "Cannot convert an array with this layout";
1517 }
1518 break;
1519 case 1:
1520 if (info->array.raid_disks != 2)
1521 return "Can only convert a 2-device array to RAID1";
1522 if (info->delta_disks != UnSet &&
1523 info->delta_disks != 0)
1524 return "Cannot set raid_disk when converting RAID5->RAID1";
1525 re->level = 1;
1526 info->new_chunk = 0;
1527 return NULL;
1528 default:
1529 return "Impossible level change requested";
1530 }
1531 break;
1532 case 6:
1533 switch (info->new_level) {
1534 case 4:
1535 case 5:
1536 delta_parity = -1;
1537 case 6:
1538 re->level = 6;
1539 re->before.data_disks = info->array.raid_disks - 2;
1540 re->before.layout = info->array.layout;
1541 break;
1542 default:
1543 return "Impossible level change requested";
1544 }
1545 break;
1546 }
1547
1548 /* If we reached here then it looks like a re-stripe is
1549 * happening. We have determined the intermediate level
1550 * and initial raid_disks/layout and stored these in 're'.
1551 *
1552 * We need to deduce the final layout that can be atomically
1553 * converted to the end state.
1554 */
1555 switch (info->new_level) {
1556 case 0:
1557 /* We can only get to RAID0 from RAID4 or RAID5
1558 * with appropriate layout and one extra device
1559 */
1560 if (re->level != 4 && re->level != 5)
1561 return "Cannot covert to RAID0 from this level";
1562
1563 switch (re->level) {
1564 case 4:
1565 re->before.layout = 0;
1566 re->after.layout = 0;
1567 break;
1568 case 5:
1569 re->after.layout = ALGORITHM_PARITY_N;
1570 break;
1571 }
1572 break;
1573
1574 case 4:
1575 /* We can only get to RAID4 from RAID5 */
1576 if (re->level != 4 && re->level != 5)
1577 return "Cannot convert to RAID4 from this level";
1578
1579 switch (re->level) {
1580 case 4:
1581 re->after.layout = 0;
1582 break;
1583 case 5:
1584 re->after.layout = ALGORITHM_PARITY_N;
1585 break;
1586 }
1587 break;
1588
1589 case 5:
1590 /* We get to RAID5 from RAID5 or RAID6 */
1591 if (re->level != 5 && re->level != 6)
1592 return "Cannot convert to RAID5 from this level";
1593
1594 switch (re->level) {
1595 case 5:
1596 if (info->new_layout == UnSet)
1597 re->after.layout = re->before.layout;
1598 else
1599 re->after.layout = info->new_layout;
1600 break;
1601 case 6:
1602 if (info->new_layout == UnSet)
1603 info->new_layout = re->before.layout;
1604
1605 /* after.layout needs to be raid6 version of new_layout */
1606 if (info->new_layout == ALGORITHM_PARITY_N)
1607 re->after.layout = ALGORITHM_PARITY_N;
1608 else {
1609 char layout[40];
1610 char *ls = map_num(r5layout, info->new_layout);
1611 int l;
1612 if (ls) {
1613 /* Current RAID6 layout has a RAID5
1614 * equivalent - good
1615 */
1616 strcat(strcpy(layout, ls), "-6");
1617 l = map_name(r6layout, layout);
1618 if (l == UnSet)
1619 return "Cannot find RAID6 layout to convert to";
1620 } else {
1621 /* Current RAID6 has no equivalent.
1622 * If it is already a '-6' layout we
1623 * can leave it unchanged, else we must
1624 * fail
1625 */
1626 ls = map_num(r6layout,
1627 info->new_layout);
1628 if (!ls ||
1629 strcmp(ls+strlen(ls)-2, "-6") != 0)
1630 return "Please specify new layout";
1631 l = info->new_layout;
1632 }
1633 re->after.layout = l;
1634 }
1635 }
1636 break;
1637
1638 case 6:
1639 /* We must already be at level 6 */
1640 if (re->level != 6)
1641 return "Impossible level change";
1642 if (info->new_layout == UnSet)
1643 re->after.layout = info->array.layout;
1644 else
1645 re->after.layout = info->new_layout;
1646 break;
1647 default:
1648 return "Impossible level change requested";
1649 }
1650 if (info->delta_disks == UnSet)
1651 info->delta_disks = delta_parity;
1652
1653 re->after.data_disks =
1654 (re->before.data_disks + info->delta_disks - delta_parity);
1655
1656 switch (re->level) {
1657 case 6:
1658 re->parity = 2;
1659 break;
1660 case 4:
1661 case 5:
1662 re->parity = 1;
1663 break;
1664 default:
1665 re->parity = 0;
1666 break;
1667 }
1668 /* So we have a restripe operation, we need to calculate the number
1669 * of blocks per reshape operation.
1670 */
1671 re->new_size = info->component_size * re->before.data_disks;
1672 if (info->new_chunk == 0)
1673 info->new_chunk = info->array.chunk_size;
1674 if (re->after.data_disks == re->before.data_disks &&
1675 re->after.layout == re->before.layout &&
1676 info->new_chunk == info->array.chunk_size) {
1677 /* Nothing to change, can change level immediately. */
1678 re->level = info->new_level;
1679 re->backup_blocks = 0;
1680 return NULL;
1681 }
1682 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1683 /* chunk and layout changes make no difference */
1684 re->level = info->new_level;
1685 re->backup_blocks = 0;
1686 return NULL;
1687 }
1688
1689 if (re->after.data_disks == re->before.data_disks &&
1690 get_linux_version() < 2006032)
1691 return "in-place reshape is not safe before 2.6.32 - sorry.";
1692
1693 if (re->after.data_disks < re->before.data_disks &&
1694 get_linux_version() < 2006030)
1695 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1696
1697 re->backup_blocks = compute_backup_blocks(
1698 info->new_chunk, info->array.chunk_size,
1699 re->after.data_disks, re->before.data_disks);
1700 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1701
1702 re->new_size = info->component_size * re->after.data_disks;
1703 return NULL;
1704 }
1705
1706 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1707 char *text_version)
1708 {
1709 struct mdinfo *info;
1710 char *subarray;
1711 int ret_val = -1;
1712
1713 if ((st == NULL) || (sra == NULL))
1714 return ret_val;
1715
1716 if (text_version == NULL)
1717 text_version = sra->text_version;
1718 subarray = strchr(text_version + 1, '/')+1;
1719 info = st->ss->container_content(st, subarray);
1720 if (info) {
1721 unsigned long long current_size = 0;
1722 unsigned long long new_size = info->custom_array_size/2;
1723
1724 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1725 new_size > current_size) {
1726 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1727 < 0)
1728 dprintf("Error: Cannot set array size");
1729 else {
1730 ret_val = 0;
1731 dprintf("Array size changed");
1732 }
1733 dprintf_cont(" from %llu to %llu.\n",
1734 current_size, new_size);
1735 }
1736 sysfs_free(info);
1737 } else
1738 dprintf("Error: set_array_size(): info pointer in NULL\n");
1739
1740 return ret_val;
1741 }
1742
1743 static int reshape_array(char *container, int fd, char *devname,
1744 struct supertype *st, struct mdinfo *info,
1745 int force, struct mddev_dev *devlist,
1746 unsigned long long data_offset,
1747 char *backup_file, int verbose, int forked,
1748 int restart, int freeze_reshape);
1749 static int reshape_container(char *container, char *devname,
1750 int mdfd,
1751 struct supertype *st,
1752 struct mdinfo *info,
1753 int force,
1754 char *backup_file, int verbose,
1755 int forked, int restart, int freeze_reshape);
1756
1757 int Grow_reshape(char *devname, int fd,
1758 struct mddev_dev *devlist,
1759 unsigned long long data_offset,
1760 struct context *c, struct shape *s)
1761 {
1762 /* Make some changes in the shape of an array.
1763 * The kernel must support the change.
1764 *
1765 * There are three different changes. Each can trigger
1766 * a resync or recovery so we freeze that until we have
1767 * requested everything (if kernel supports freezing - 2.6.30).
1768 * The steps are:
1769 * - change size (i.e. component_size)
1770 * - change level
1771 * - change layout/chunksize/ndisks
1772 *
1773 * The last can require a reshape. It is different on different
1774 * levels so we need to check the level before actioning it.
1775 * Some times the level change needs to be requested after the
1776 * reshape (e.g. raid6->raid5, raid5->raid0)
1777 *
1778 */
1779 struct mdu_array_info_s array;
1780 int rv = 0;
1781 struct supertype *st;
1782 char *subarray = NULL;
1783
1784 int frozen;
1785 int changed = 0;
1786 char *container = NULL;
1787 int cfd = -1;
1788
1789 struct mddev_dev *dv;
1790 int added_disks;
1791
1792 struct mdinfo info;
1793 struct mdinfo *sra;
1794
1795 if (md_get_array_info(fd, &array) < 0) {
1796 pr_err("%s is not an active md array - aborting\n",
1797 devname);
1798 return 1;
1799 }
1800 if (s->level != UnSet && s->chunk) {
1801 pr_err("Cannot change array level in the same operation as changing chunk size.\n");
1802 return 1;
1803 }
1804
1805 if (data_offset != INVALID_SECTORS && array.level != 10 &&
1806 (array.level < 4 || array.level > 6)) {
1807 pr_err("--grow --data-offset not yet supported\n");
1808 return 1;
1809 }
1810
1811 if (s->size > 0 &&
1812 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1813 pr_err("cannot change component size at the same time as other changes.\n"
1814 " Change size first, then check data is intact before making other changes.\n");
1815 return 1;
1816 }
1817
1818 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1819 array.level > 1 && get_linux_version() < 2006032 &&
1820 !check_env("MDADM_FORCE_FEWER")) {
1821 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1822 " Please use a newer kernel\n");
1823 return 1;
1824 }
1825
1826 if (array.level > 1 && s->size > 1 &&
1827 (unsigned long long) (array.chunk_size / 1024) > s->size) {
1828 pr_err("component size must be larger than chunk size.\n");
1829 return 1;
1830 }
1831
1832 st = super_by_fd(fd, &subarray);
1833 if (!st) {
1834 pr_err("Unable to determine metadata format for %s\n", devname);
1835 return 1;
1836 }
1837 if (s->raiddisks > st->max_devs) {
1838 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1839 return 1;
1840 }
1841 if (s->level == 0 &&
1842 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1843 !(array.state & (1<<MD_SB_CLUSTERED))) {
1844 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1845 if (md_set_array_info(fd, &array)!= 0) {
1846 pr_err("failed to remove internal bitmap.\n");
1847 return 1;
1848 }
1849 }
1850
1851 /* in the external case we need to check that the requested reshape is
1852 * supported, and perform an initial check that the container holds the
1853 * pre-requisite spare devices (mdmon owns final validation)
1854 */
1855 if (st->ss->external) {
1856 int retval;
1857
1858 if (subarray) {
1859 container = st->container_devnm;
1860 cfd = open_dev_excl(st->container_devnm);
1861 } else {
1862 container = st->devnm;
1863 close(fd);
1864 cfd = open_dev_excl(st->devnm);
1865 fd = cfd;
1866 }
1867 if (cfd < 0) {
1868 pr_err("Unable to open container for %s\n", devname);
1869 free(subarray);
1870 return 1;
1871 }
1872
1873 retval = st->ss->load_container(st, cfd, NULL);
1874
1875 if (retval) {
1876 pr_err("Cannot read superblock for %s\n", devname);
1877 free(subarray);
1878 return 1;
1879 }
1880
1881 /* check if operation is supported for metadata handler */
1882 if (st->ss->container_content) {
1883 struct mdinfo *cc = NULL;
1884 struct mdinfo *content = NULL;
1885
1886 cc = st->ss->container_content(st, subarray);
1887 for (content = cc; content ; content = content->next) {
1888 int allow_reshape = 1;
1889
1890 /* check if reshape is allowed based on metadata
1891 * indications stored in content.array.status
1892 */
1893 if (content->array.state &
1894 (1 << MD_SB_BLOCK_VOLUME))
1895 allow_reshape = 0;
1896 if (content->array.state &
1897 (1 << MD_SB_BLOCK_CONTAINER_RESHAPE))
1898 allow_reshape = 0;
1899 if (!allow_reshape) {
1900 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1901 devname, container);
1902 sysfs_free(cc);
1903 free(subarray);
1904 return 1;
1905 }
1906 if (content->consistency_policy ==
1907 CONSISTENCY_POLICY_PPL) {
1908 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1909 sysfs_free(cc);
1910 free(subarray);
1911 return 1;
1912 }
1913 }
1914 sysfs_free(cc);
1915 }
1916 if (mdmon_running(container))
1917 st->update_tail = &st->updates;
1918 }
1919
1920 added_disks = 0;
1921 for (dv = devlist; dv; dv = dv->next)
1922 added_disks++;
1923 if (s->raiddisks > array.raid_disks &&
1924 array.spare_disks + added_disks <
1925 (s->raiddisks - array.raid_disks) &&
1926 !c->force) {
1927 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1928 " Use --force to over-ride this check.\n",
1929 s->raiddisks - array.raid_disks,
1930 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1931 array.spare_disks + added_disks);
1932 return 1;
1933 }
1934
1935 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1936 GET_STATE | GET_VERSION);
1937 if (sra) {
1938 if (st->ss->external && subarray == NULL) {
1939 array.level = LEVEL_CONTAINER;
1940 sra->array.level = LEVEL_CONTAINER;
1941 }
1942 } else {
1943 pr_err("failed to read sysfs parameters for %s\n",
1944 devname);
1945 return 1;
1946 }
1947 frozen = freeze(st);
1948 if (frozen < -1) {
1949 /* freeze() already spewed the reason */
1950 sysfs_free(sra);
1951 return 1;
1952 } else if (frozen < 0) {
1953 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1954 sysfs_free(sra);
1955 return 1;
1956 }
1957
1958 /* ========= set size =============== */
1959 if (s->size > 0 &&
1960 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1961 unsigned long long orig_size = get_component_size(fd)/2;
1962 unsigned long long min_csize;
1963 struct mdinfo *mdi;
1964 int raid0_takeover = 0;
1965
1966 if (orig_size == 0)
1967 orig_size = (unsigned) array.size;
1968
1969 if (orig_size == 0) {
1970 pr_err("Cannot set device size in this type of array.\n");
1971 rv = 1;
1972 goto release;
1973 }
1974
1975 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1976 devname, APPLY_METADATA_CHANGES,
1977 c->verbose > 0)) {
1978 rv = 1;
1979 goto release;
1980 }
1981 sync_metadata(st);
1982 if (st->ss->external) {
1983 /* metadata can have size limitation
1984 * update size value according to metadata information
1985 */
1986 struct mdinfo *sizeinfo =
1987 st->ss->container_content(st, subarray);
1988 if (sizeinfo) {
1989 unsigned long long new_size =
1990 sizeinfo->custom_array_size/2;
1991 int data_disks = get_data_disks(
1992 sizeinfo->array.level,
1993 sizeinfo->array.layout,
1994 sizeinfo->array.raid_disks);
1995 new_size /= data_disks;
1996 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
1997 orig_size, new_size,
1998 new_size * data_disks);
1999 s->size = new_size;
2000 sysfs_free(sizeinfo);
2001 }
2002 }
2003
2004 /* Update the size of each member device in case
2005 * they have been resized. This will never reduce
2006 * below the current used-size. The "size" attribute
2007 * understands '0' to mean 'max'.
2008 */
2009 min_csize = 0;
2010 for (mdi = sra->devs; mdi; mdi = mdi->next) {
2011 sysfs_set_num(sra, mdi, "size",
2012 s->size == MAX_SIZE ? 0 : s->size);
2013 if (array.not_persistent == 0 &&
2014 array.major_version == 0 &&
2015 get_linux_version() < 3001000) {
2016 /* Dangerous to allow size to exceed 2TB */
2017 unsigned long long csize;
2018 if (sysfs_get_ll(sra, mdi, "size",
2019 &csize) == 0) {
2020 if (csize >= 2ULL*1024*1024*1024)
2021 csize = 2ULL*1024*1024*1024;
2022 if ((min_csize == 0 ||
2023 (min_csize > csize)))
2024 min_csize = csize;
2025 }
2026 }
2027 }
2028 if (min_csize && s->size > min_csize) {
2029 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2030 rv = 1;
2031 goto size_change_error;
2032 }
2033 if (min_csize && s->size == MAX_SIZE) {
2034 /* Don't let the kernel choose a size - it will get
2035 * it wrong
2036 */
2037 pr_err("Limited v0.90 array to 2TB per device\n");
2038 s->size = min_csize;
2039 }
2040 if (st->ss->external) {
2041 if (sra->array.level == 0) {
2042 rv = sysfs_set_str(sra, NULL, "level", "raid5");
2043 if (!rv) {
2044 raid0_takeover = 1;
2045 /* get array parameters after takeover
2046 * to change one parameter at time only
2047 */
2048 rv = md_get_array_info(fd, &array);
2049 }
2050 }
2051 /* make sure mdmon is
2052 * aware of the new level */
2053 if (!mdmon_running(st->container_devnm))
2054 start_mdmon(st->container_devnm);
2055 ping_monitor(container);
2056 if (mdmon_running(st->container_devnm) &&
2057 st->update_tail == NULL)
2058 st->update_tail = &st->updates;
2059 }
2060
2061 if (s->size == MAX_SIZE)
2062 s->size = 0;
2063 array.size = s->size;
2064 if (s->size & ~INT32_MAX) {
2065 /* got truncated to 32bit, write to
2066 * component_size instead
2067 */
2068 if (sra)
2069 rv = sysfs_set_num(sra, NULL,
2070 "component_size", s->size);
2071 else
2072 rv = -1;
2073 } else {
2074 rv = md_set_array_info(fd, &array);
2075
2076 /* manage array size when it is managed externally
2077 */
2078 if ((rv == 0) && st->ss->external)
2079 rv = set_array_size(st, sra, sra->text_version);
2080 }
2081
2082 if (raid0_takeover) {
2083 /* do not recync non-existing parity,
2084 * we will drop it anyway
2085 */
2086 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2087 /* go back to raid0, drop parity disk
2088 */
2089 sysfs_set_str(sra, NULL, "level", "raid0");
2090 md_get_array_info(fd, &array);
2091 }
2092
2093 size_change_error:
2094 if (rv != 0) {
2095 int err = errno;
2096
2097 /* restore metadata */
2098 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2099 UnSet, NULL, devname,
2100 ROLLBACK_METADATA_CHANGES,
2101 c->verbose) == 0)
2102 sync_metadata(st);
2103 pr_err("Cannot set device size for %s: %s\n",
2104 devname, strerror(err));
2105 if (err == EBUSY &&
2106 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2107 cont_err("Bitmap must be removed before size can be changed\n");
2108 rv = 1;
2109 goto release;
2110 }
2111 if (s->assume_clean) {
2112 /* This will fail on kernels older than 3.0 unless
2113 * a backport has been arranged.
2114 */
2115 if (sra == NULL ||
2116 sysfs_set_str(sra, NULL, "resync_start",
2117 "none") < 0)
2118 pr_err("--assume-clean not supported with --grow on this kernel\n");
2119 }
2120 md_get_array_info(fd, &array);
2121 s->size = get_component_size(fd)/2;
2122 if (s->size == 0)
2123 s->size = array.size;
2124 if (c->verbose >= 0) {
2125 if (s->size == orig_size)
2126 pr_err("component size of %s unchanged at %lluK\n",
2127 devname, s->size);
2128 else
2129 pr_err("component size of %s has been set to %lluK\n",
2130 devname, s->size);
2131 }
2132 changed = 1;
2133 } else if (array.level != LEVEL_CONTAINER) {
2134 s->size = get_component_size(fd)/2;
2135 if (s->size == 0)
2136 s->size = array.size;
2137 }
2138
2139 /* See if there is anything else to do */
2140 if ((s->level == UnSet || s->level == array.level) &&
2141 (s->layout_str == NULL) &&
2142 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2143 data_offset == INVALID_SECTORS &&
2144 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2145 /* Nothing more to do */
2146 if (!changed && c->verbose >= 0)
2147 pr_err("%s: no change requested\n", devname);
2148 goto release;
2149 }
2150
2151 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2152 * current implementation assumes that following conditions must be met:
2153 * - RAID10:
2154 * - far_copies == 1
2155 * - near_copies == 2
2156 */
2157 if ((s->level == 0 && array.level == 10 && sra &&
2158 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2159 (s->level == 0 && array.level == 1 && sra)) {
2160 int err;
2161
2162 err = remove_disks_for_takeover(st, sra, array.layout);
2163 if (err) {
2164 dprintf("Array cannot be reshaped\n");
2165 if (cfd > -1)
2166 close(cfd);
2167 rv = 1;
2168 goto release;
2169 }
2170 /* Make sure mdmon has seen the device removal
2171 * and updated metadata before we continue with
2172 * level change
2173 */
2174 if (container)
2175 ping_monitor(container);
2176 }
2177
2178 memset(&info, 0, sizeof(info));
2179 info.array = array;
2180 if (sysfs_init(&info, fd, NULL)) {
2181 pr_err("failed to initialize sysfs.\n");
2182 rv = 1;
2183 goto release;
2184 }
2185 strcpy(info.text_version, sra->text_version);
2186 info.component_size = s->size*2;
2187 info.new_level = s->level;
2188 info.new_chunk = s->chunk * 1024;
2189 if (info.array.level == LEVEL_CONTAINER) {
2190 info.delta_disks = UnSet;
2191 info.array.raid_disks = s->raiddisks;
2192 } else if (s->raiddisks)
2193 info.delta_disks = s->raiddisks - info.array.raid_disks;
2194 else
2195 info.delta_disks = UnSet;
2196 if (s->layout_str == NULL) {
2197 info.new_layout = UnSet;
2198 if (info.array.level == 6 &&
2199 (info.new_level == 6 || info.new_level == UnSet) &&
2200 info.array.layout >= 16) {
2201 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2202 cont_err("during the reshape, please specify --layout=preserve\n");
2203 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2204 rv = 1;
2205 goto release;
2206 }
2207 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2208 strcmp(s->layout_str, "normalize") == 0) {
2209 /* If we have a -6 RAID6 layout, remove the '-6'. */
2210 info.new_layout = UnSet;
2211 if (info.array.level == 6 && info.new_level == UnSet) {
2212 char l[40], *h;
2213 strcpy(l, map_num(r6layout, info.array.layout));
2214 h = strrchr(l, '-');
2215 if (h && strcmp(h, "-6") == 0) {
2216 *h = 0;
2217 info.new_layout = map_name(r6layout, l);
2218 }
2219 } else {
2220 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2221 rv = 1;
2222 goto release;
2223 }
2224 } else if (strcmp(s->layout_str, "preserve") == 0) {
2225 /* This means that a non-standard RAID6 layout
2226 * is OK.
2227 * In particular:
2228 * - When reshape a RAID6 (e.g. adding a device)
2229 * which is in a non-standard layout, it is OK
2230 * to preserve that layout.
2231 * - When converting a RAID5 to RAID6, leave it in
2232 * the XXX-6 layout, don't re-layout.
2233 */
2234 if (info.array.level == 6 && info.new_level == UnSet)
2235 info.new_layout = info.array.layout;
2236 else if (info.array.level == 5 && info.new_level == 6) {
2237 char l[40];
2238 strcpy(l, map_num(r5layout, info.array.layout));
2239 strcat(l, "-6");
2240 info.new_layout = map_name(r6layout, l);
2241 } else {
2242 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2243 rv = 1;
2244 goto release;
2245 }
2246 } else {
2247 int l = info.new_level;
2248 if (l == UnSet)
2249 l = info.array.level;
2250 switch (l) {
2251 case 5:
2252 info.new_layout = map_name(r5layout, s->layout_str);
2253 break;
2254 case 6:
2255 info.new_layout = map_name(r6layout, s->layout_str);
2256 break;
2257 case 10:
2258 info.new_layout = parse_layout_10(s->layout_str);
2259 break;
2260 case LEVEL_FAULTY:
2261 info.new_layout = parse_layout_faulty(s->layout_str);
2262 break;
2263 default:
2264 pr_err("layout not meaningful with this level\n");
2265 rv = 1;
2266 goto release;
2267 }
2268 if (info.new_layout == UnSet) {
2269 pr_err("layout %s not understood for this level\n",
2270 s->layout_str);
2271 rv = 1;
2272 goto release;
2273 }
2274 }
2275
2276 if (array.level == LEVEL_FAULTY) {
2277 if (s->level != UnSet && s->level != array.level) {
2278 pr_err("cannot change level of Faulty device\n");
2279 rv =1 ;
2280 }
2281 if (s->chunk) {
2282 pr_err("cannot set chunksize of Faulty device\n");
2283 rv =1 ;
2284 }
2285 if (s->raiddisks && s->raiddisks != 1) {
2286 pr_err("cannot set raid_disks of Faulty device\n");
2287 rv =1 ;
2288 }
2289 if (s->layout_str) {
2290 if (md_get_array_info(fd, &array) != 0) {
2291 dprintf("Cannot get array information.\n");
2292 goto release;
2293 }
2294 array.layout = info.new_layout;
2295 if (md_set_array_info(fd, &array) != 0) {
2296 pr_err("failed to set new layout\n");
2297 rv = 1;
2298 } else if (c->verbose >= 0)
2299 printf("layout for %s set to %d\n",
2300 devname, array.layout);
2301 }
2302 } else if (array.level == LEVEL_CONTAINER) {
2303 /* This change is to be applied to every array in the
2304 * container. This is only needed when the metadata imposes
2305 * restraints of the various arrays in the container.
2306 * Currently we only know that IMSM requires all arrays
2307 * to have the same number of devices so changing the
2308 * number of devices (On-Line Capacity Expansion) must be
2309 * performed at the level of the container
2310 */
2311 if (fd > 0) {
2312 close(fd);
2313 fd = -1;
2314 }
2315 rv = reshape_container(container, devname, -1, st, &info,
2316 c->force, c->backup_file, c->verbose,
2317 0, 0, 0);
2318 frozen = 0;
2319 } else {
2320 /* get spare devices from external metadata
2321 */
2322 if (st->ss->external) {
2323 struct mdinfo *info2;
2324
2325 info2 = st->ss->container_content(st, subarray);
2326 if (info2) {
2327 info.array.spare_disks =
2328 info2->array.spare_disks;
2329 sysfs_free(info2);
2330 }
2331 }
2332
2333 /* Impose these changes on a single array. First
2334 * check that the metadata is OK with the change. */
2335
2336 if (reshape_super(st, 0, info.new_level,
2337 info.new_layout, info.new_chunk,
2338 info.array.raid_disks, info.delta_disks,
2339 c->backup_file, devname,
2340 APPLY_METADATA_CHANGES, c->verbose)) {
2341 rv = 1;
2342 goto release;
2343 }
2344 sync_metadata(st);
2345 rv = reshape_array(container, fd, devname, st, &info, c->force,
2346 devlist, data_offset, c->backup_file,
2347 c->verbose, 0, 0, 0);
2348 frozen = 0;
2349 }
2350 release:
2351 sysfs_free(sra);
2352 if (frozen > 0)
2353 unfreeze(st);
2354 return rv;
2355 }
2356
2357 /* verify_reshape_position()
2358 * Function checks if reshape position in metadata is not farther
2359 * than position in md.
2360 * Return value:
2361 * 0 : not valid sysfs entry
2362 * it can be caused by not started reshape, it should be started
2363 * by reshape array or raid0 array is before takeover
2364 * -1 : error, reshape position is obviously wrong
2365 * 1 : success, reshape progress correct or updated
2366 */
2367 static int verify_reshape_position(struct mdinfo *info, int level)
2368 {
2369 int ret_val = 0;
2370 char buf[40];
2371 int rv;
2372
2373 /* read sync_max, failure can mean raid0 array */
2374 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2375
2376 if (rv > 0) {
2377 char *ep;
2378 unsigned long long position = strtoull(buf, &ep, 0);
2379
2380 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2381 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2382 position *= get_data_disks(level,
2383 info->new_layout,
2384 info->array.raid_disks);
2385 if (info->reshape_progress < position) {
2386 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2387 info->reshape_progress, position);
2388 info->reshape_progress = position;
2389 ret_val = 1;
2390 } else if (info->reshape_progress > position) {
2391 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2392 position, info->reshape_progress);
2393 ret_val = -1;
2394 } else {
2395 dprintf("Reshape position in md and metadata are the same;");
2396 ret_val = 1;
2397 }
2398 }
2399 } else if (rv == 0) {
2400 /* for valid sysfs entry, 0-length content
2401 * should be indicated as error
2402 */
2403 ret_val = -1;
2404 }
2405
2406 return ret_val;
2407 }
2408
2409 static unsigned long long choose_offset(unsigned long long lo,
2410 unsigned long long hi,
2411 unsigned long long min,
2412 unsigned long long max)
2413 {
2414 /* Choose a new offset between hi and lo.
2415 * It must be between min and max, but
2416 * we would prefer something near the middle of hi/lo, and also
2417 * prefer to be aligned to a big power of 2.
2418 *
2419 * So we start with the middle, then for each bit,
2420 * starting at '1' and increasing, if it is set, we either
2421 * add it or subtract it if possible, preferring the option
2422 * which is furthest from the boundary.
2423 *
2424 * We stop once we get a 1MB alignment. As units are in sectors,
2425 * 1MB = 2*1024 sectors.
2426 */
2427 unsigned long long choice = (lo + hi) / 2;
2428 unsigned long long bit = 1;
2429
2430 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2431 unsigned long long bigger, smaller;
2432 if (! (bit & choice))
2433 continue;
2434 bigger = choice + bit;
2435 smaller = choice - bit;
2436 if (bigger > max && smaller < min)
2437 break;
2438 if (bigger > max)
2439 choice = smaller;
2440 else if (smaller < min)
2441 choice = bigger;
2442 else if (hi - bigger > smaller - lo)
2443 choice = bigger;
2444 else
2445 choice = smaller;
2446 }
2447 return choice;
2448 }
2449
2450 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2451 char *devname, int delta_disks,
2452 unsigned long long data_offset,
2453 unsigned long long min,
2454 int can_fallback)
2455 {
2456 struct mdinfo *sd;
2457 int dir = 0;
2458 int err = 0;
2459 unsigned long long before, after;
2460
2461 /* Need to find min space before and after so same is used
2462 * on all devices
2463 */
2464 before = UINT64_MAX;
2465 after = UINT64_MAX;
2466 for (sd = sra->devs; sd; sd = sd->next) {
2467 char *dn;
2468 int dfd;
2469 int rv;
2470 struct supertype *st2;
2471 struct mdinfo info2;
2472
2473 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2474 continue;
2475 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2476 dfd = dev_open(dn, O_RDONLY);
2477 if (dfd < 0) {
2478 pr_err("%s: cannot open component %s\n",
2479 devname, dn ? dn : "-unknown-");
2480 goto release;
2481 }
2482 st2 = dup_super(st);
2483 rv = st2->ss->load_super(st2,dfd, NULL);
2484 close(dfd);
2485 if (rv) {
2486 free(st2);
2487 pr_err("%s: cannot get superblock from %s\n",
2488 devname, dn);
2489 goto release;
2490 }
2491 st2->ss->getinfo_super(st2, &info2, NULL);
2492 st2->ss->free_super(st2);
2493 free(st2);
2494 if (info2.space_before == 0 &&
2495 info2.space_after == 0) {
2496 /* Metadata doesn't support data_offset changes */
2497 if (!can_fallback)
2498 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2499 devname);
2500 goto fallback;
2501 }
2502 if (before > info2.space_before)
2503 before = info2.space_before;
2504 if (after > info2.space_after)
2505 after = info2.space_after;
2506
2507 if (data_offset != INVALID_SECTORS) {
2508 if (dir == 0) {
2509 if (info2.data_offset == data_offset) {
2510 pr_err("%s: already has that data_offset\n",
2511 dn);
2512 goto release;
2513 }
2514 if (data_offset < info2.data_offset)
2515 dir = -1;
2516 else
2517 dir = 1;
2518 } else if ((data_offset <= info2.data_offset &&
2519 dir == 1) ||
2520 (data_offset >= info2.data_offset &&
2521 dir == -1)) {
2522 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2523 dn);
2524 goto release;
2525 }
2526 }
2527 }
2528 if (before == UINT64_MAX)
2529 /* impossible really, there must be no devices */
2530 return 1;
2531
2532 for (sd = sra->devs; sd; sd = sd->next) {
2533 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2534 unsigned long long new_data_offset;
2535
2536 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2537 continue;
2538 if (delta_disks < 0) {
2539 /* Don't need any space as array is shrinking
2540 * just move data_offset up by min
2541 */
2542 if (data_offset == INVALID_SECTORS)
2543 new_data_offset = sd->data_offset + min;
2544 else {
2545 if (data_offset < sd->data_offset + min) {
2546 pr_err("--data-offset too small for %s\n",
2547 dn);
2548 goto release;
2549 }
2550 new_data_offset = data_offset;
2551 }
2552 } else if (delta_disks > 0) {
2553 /* need space before */
2554 if (before < min) {
2555 if (can_fallback)
2556 goto fallback;
2557 pr_err("Insufficient head-space for reshape on %s\n",
2558 dn);
2559 goto release;
2560 }
2561 if (data_offset == INVALID_SECTORS)
2562 new_data_offset = sd->data_offset - min;
2563 else {
2564 if (data_offset > sd->data_offset - min) {
2565 pr_err("--data-offset too large for %s\n",
2566 dn);
2567 goto release;
2568 }
2569 new_data_offset = data_offset;
2570 }
2571 } else {
2572 if (dir == 0) {
2573 /* can move up or down. If 'data_offset'
2574 * was set we would have already decided,
2575 * so just choose direction with most space.
2576 */
2577 if (before > after)
2578 dir = -1;
2579 else
2580 dir = 1;
2581 }
2582 sysfs_set_str(sra, NULL, "reshape_direction",
2583 dir == 1 ? "backwards" : "forwards");
2584 if (dir > 0) {
2585 /* Increase data offset */
2586 if (after < min) {
2587 if (can_fallback)
2588 goto fallback;
2589 pr_err("Insufficient tail-space for reshape on %s\n",
2590 dn);
2591 goto release;
2592 }
2593 if (data_offset != INVALID_SECTORS &&
2594 data_offset < sd->data_offset + min) {
2595 pr_err("--data-offset too small on %s\n",
2596 dn);
2597 goto release;
2598 }
2599 if (data_offset != INVALID_SECTORS)
2600 new_data_offset = data_offset;
2601 else
2602 new_data_offset = choose_offset(sd->data_offset,
2603 sd->data_offset + after,
2604 sd->data_offset + min,
2605 sd->data_offset + after);
2606 } else {
2607 /* Decrease data offset */
2608 if (before < min) {
2609 if (can_fallback)
2610 goto fallback;
2611 pr_err("insufficient head-room on %s\n",
2612 dn);
2613 goto release;
2614 }
2615 if (data_offset != INVALID_SECTORS &&
2616 data_offset > sd->data_offset - min) {
2617 pr_err("--data-offset too large on %s\n",
2618 dn);
2619 goto release;
2620 }
2621 if (data_offset != INVALID_SECTORS)
2622 new_data_offset = data_offset;
2623 else
2624 new_data_offset = choose_offset(sd->data_offset - before,
2625 sd->data_offset,
2626 sd->data_offset - before,
2627 sd->data_offset - min);
2628 }
2629 }
2630 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2631 if (err < 0 && errno == E2BIG) {
2632 /* try again after increasing data size to max */
2633 err = sysfs_set_num(sra, sd, "size", 0);
2634 if (err < 0 && errno == EINVAL &&
2635 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2636 /* some kernels have a bug where you cannot
2637 * use '0' on spare devices. */
2638 sysfs_set_num(sra, sd, "size",
2639 (sra->component_size + after)/2);
2640 }
2641 err = sysfs_set_num(sra, sd, "new_offset",
2642 new_data_offset);
2643 }
2644 if (err < 0) {
2645 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2646 pr_err("data-offset is too big for %s\n", dn);
2647 goto release;
2648 }
2649 if (sd == sra->devs &&
2650 (errno == ENOENT || errno == E2BIG))
2651 /* Early kernel, no 'new_offset' file,
2652 * or kernel doesn't like us.
2653 * For RAID5/6 this is not fatal
2654 */
2655 return 1;
2656 pr_err("Cannot set new_offset for %s\n", dn);
2657 break;
2658 }
2659 }
2660 return err;
2661 release:
2662 return -1;
2663 fallback:
2664 /* Just use a backup file */
2665 return 1;
2666 }
2667
2668 static int raid10_reshape(char *container, int fd, char *devname,
2669 struct supertype *st, struct mdinfo *info,
2670 struct reshape *reshape,
2671 unsigned long long data_offset,
2672 int force, int verbose)
2673 {
2674 /* Changing raid_disks, layout, chunksize or possibly
2675 * just data_offset for a RAID10.
2676 * We must always change data_offset. We change by at least
2677 * ->min_offset_change which is the largest of the old and new
2678 * chunk sizes.
2679 * If raid_disks is increasing, then data_offset must decrease
2680 * by at least this copy size.
2681 * If raid_disks is unchanged, data_offset must increase or
2682 * decrease by at least min_offset_change but preferably by much more.
2683 * We choose half of the available space.
2684 * If raid_disks is decreasing, data_offset must increase by
2685 * at least min_offset_change. To allow of this, component_size
2686 * must be decreased by the same amount.
2687 *
2688 * So we calculate the required minimum and direction, possibly
2689 * reduce the component_size, then iterate through the devices
2690 * and set the new_data_offset.
2691 * If that all works, we set chunk_size, layout, raid_disks, and start
2692 * 'reshape'
2693 */
2694 struct mdinfo *sra;
2695 unsigned long long min;
2696 int err = 0;
2697
2698 sra = sysfs_read(fd, NULL,
2699 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2700 );
2701 if (!sra) {
2702 pr_err("%s: Cannot get array details from sysfs\n", devname);
2703 goto release;
2704 }
2705 min = reshape->min_offset_change;
2706
2707 if (info->delta_disks)
2708 sysfs_set_str(sra, NULL, "reshape_direction",
2709 info->delta_disks < 0 ? "backwards" : "forwards");
2710 if (info->delta_disks < 0 && info->space_after < min) {
2711 int rv = sysfs_set_num(sra, NULL, "component_size",
2712 (sra->component_size - min)/2);
2713 if (rv) {
2714 pr_err("cannot reduce component size\n");
2715 goto release;
2716 }
2717 }
2718 err = set_new_data_offset(sra, st, devname, info->delta_disks,
2719 data_offset, min, 0);
2720 if (err == 1) {
2721 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2722 cont_err("supported on this kernel\n");
2723 err = -1;
2724 }
2725 if (err < 0)
2726 goto release;
2727
2728 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2729 err = errno;
2730 if (!err && sysfs_set_num(sra, NULL, "layout",
2731 reshape->after.layout) < 0)
2732 err = errno;
2733 if (!err &&
2734 sysfs_set_num(sra, NULL, "raid_disks",
2735 info->array.raid_disks + info->delta_disks) < 0)
2736 err = errno;
2737 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2738 err = errno;
2739 if (err) {
2740 pr_err("Cannot set array shape for %s\n",
2741 devname);
2742 if (err == EBUSY &&
2743 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2744 cont_err(" Bitmap must be removed before shape can be changed\n");
2745 goto release;
2746 }
2747 sysfs_free(sra);
2748 return 0;
2749 release:
2750 sysfs_free(sra);
2751 return 1;
2752 }
2753
2754 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2755 {
2756 struct mdinfo *sra, *sd;
2757 /* Initialisation to silence compiler warning */
2758 unsigned long long min_space_before = 0, min_space_after = 0;
2759 int first = 1;
2760
2761 sra = sysfs_read(fd, NULL, GET_DEVS);
2762 if (!sra)
2763 return;
2764 for (sd = sra->devs; sd; sd = sd->next) {
2765 char *dn;
2766 int dfd;
2767 struct supertype *st2;
2768 struct mdinfo info2;
2769
2770 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2771 continue;
2772 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2773 dfd = dev_open(dn, O_RDONLY);
2774 if (dfd < 0)
2775 break;
2776 st2 = dup_super(st);
2777 if (st2->ss->load_super(st2,dfd, NULL)) {
2778 close(dfd);
2779 free(st2);
2780 break;
2781 }
2782 close(dfd);
2783 st2->ss->getinfo_super(st2, &info2, NULL);
2784 st2->ss->free_super(st2);
2785 free(st2);
2786 if (first ||
2787 min_space_before > info2.space_before)
2788 min_space_before = info2.space_before;
2789 if (first ||
2790 min_space_after > info2.space_after)
2791 min_space_after = info2.space_after;
2792 first = 0;
2793 }
2794 if (sd == NULL && !first) {
2795 info->space_after = min_space_after;
2796 info->space_before = min_space_before;
2797 }
2798 sysfs_free(sra);
2799 }
2800
2801 static void update_cache_size(char *container, struct mdinfo *sra,
2802 struct mdinfo *info,
2803 int disks, unsigned long long blocks)
2804 {
2805 /* Check that the internal stripe cache is
2806 * large enough, or it won't work.
2807 * It must hold at least 4 stripes of the larger
2808 * chunk size
2809 */
2810 unsigned long cache;
2811 cache = max(info->array.chunk_size, info->new_chunk);
2812 cache *= 4; /* 4 stripes minimum */
2813 cache /= 512; /* convert to sectors */
2814 /* make sure there is room for 'blocks' with a bit to spare */
2815 if (cache < 16 + blocks / disks)
2816 cache = 16 + blocks / disks;
2817 cache /= (4096/512); /* Convert from sectors to pages */
2818
2819 if (sra->cache_size < cache)
2820 subarray_set_num(container, sra, "stripe_cache_size",
2821 cache+1);
2822 }
2823
2824 static int impose_reshape(struct mdinfo *sra,
2825 struct mdinfo *info,
2826 struct supertype *st,
2827 int fd,
2828 int restart,
2829 char *devname, char *container,
2830 struct reshape *reshape)
2831 {
2832 struct mdu_array_info_s array;
2833
2834 sra->new_chunk = info->new_chunk;
2835
2836 if (restart) {
2837 /* for external metadata checkpoint saved by mdmon can be lost
2838 * or missed /due to e.g. crash/. Check if md is not during
2839 * restart farther than metadata points to.
2840 * If so, this means metadata information is obsolete.
2841 */
2842 if (st->ss->external)
2843 verify_reshape_position(info, reshape->level);
2844 sra->reshape_progress = info->reshape_progress;
2845 } else {
2846 sra->reshape_progress = 0;
2847 if (reshape->after.data_disks < reshape->before.data_disks)
2848 /* start from the end of the new array */
2849 sra->reshape_progress = (sra->component_size
2850 * reshape->after.data_disks);
2851 }
2852
2853 md_get_array_info(fd, &array);
2854 if (info->array.chunk_size == info->new_chunk &&
2855 reshape->before.layout == reshape->after.layout &&
2856 st->ss->external == 0) {
2857 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2858 array.raid_disks = reshape->after.data_disks + reshape->parity;
2859 if (!restart && md_set_array_info(fd, &array) != 0) {
2860 int err = errno;
2861
2862 pr_err("Cannot set device shape for %s: %s\n",
2863 devname, strerror(errno));
2864
2865 if (err == EBUSY &&
2866 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2867 cont_err("Bitmap must be removed before shape can be changed\n");
2868
2869 goto release;
2870 }
2871 } else if (!restart) {
2872 /* set them all just in case some old 'new_*' value
2873 * persists from some earlier problem.
2874 */
2875 int err = 0;
2876 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2877 err = errno;
2878 if (!err && sysfs_set_num(sra, NULL, "layout",
2879 reshape->after.layout) < 0)
2880 err = errno;
2881 if (!err && subarray_set_num(container, sra, "raid_disks",
2882 reshape->after.data_disks +
2883 reshape->parity) < 0)
2884 err = errno;
2885 if (err) {
2886 pr_err("Cannot set device shape for %s\n", devname);
2887
2888 if (err == EBUSY &&
2889 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2890 cont_err("Bitmap must be removed before shape can be changed\n");
2891 goto release;
2892 }
2893 }
2894 return 0;
2895 release:
2896 return -1;
2897 }
2898
2899 static int impose_level(int fd, int level, char *devname, int verbose)
2900 {
2901 char *c;
2902 struct mdu_array_info_s array;
2903 struct mdinfo info;
2904
2905 if (sysfs_init(&info, fd, NULL)) {
2906 pr_err("failed to initialize sysfs.\n");
2907 return 1;
2908 }
2909
2910 md_get_array_info(fd, &array);
2911 if (level == 0 && (array.level >= 4 && array.level <= 6)) {
2912 /* To convert to RAID0 we need to fail and
2913 * remove any non-data devices. */
2914 int found = 0;
2915 int d;
2916 int data_disks = array.raid_disks - 1;
2917 if (array.level == 6)
2918 data_disks -= 1;
2919 if (array.level == 5 && array.layout != ALGORITHM_PARITY_N)
2920 return -1;
2921 if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6)
2922 return -1;
2923 sysfs_set_str(&info, NULL,"sync_action", "idle");
2924 /* First remove any spares so no recovery starts */
2925 for (d = 0, found = 0;
2926 d < MAX_DISKS && found < array.nr_disks; d++) {
2927 mdu_disk_info_t disk;
2928 disk.number = d;
2929 if (md_get_disk_info(fd, &disk) < 0)
2930 continue;
2931 if (disk.major == 0 && disk.minor == 0)
2932 continue;
2933 found++;
2934 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2935 disk.raid_disk < data_disks)
2936 /* keep this */
2937 continue;
2938 ioctl(fd, HOT_REMOVE_DISK,
2939 makedev(disk.major, disk.minor));
2940 }
2941 /* Now fail anything left */
2942 md_get_array_info(fd, &array);
2943 for (d = 0, found = 0;
2944 d < MAX_DISKS && found < array.nr_disks; d++) {
2945 mdu_disk_info_t disk;
2946 disk.number = d;
2947 if (md_get_disk_info(fd, &disk) < 0)
2948 continue;
2949 if (disk.major == 0 && disk.minor == 0)
2950 continue;
2951 found++;
2952 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2953 disk.raid_disk < data_disks)
2954 /* keep this */
2955 continue;
2956 ioctl(fd, SET_DISK_FAULTY,
2957 makedev(disk.major, disk.minor));
2958 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2959 }
2960 }
2961 c = map_num(pers, level);
2962 if (c) {
2963 int err = sysfs_set_str(&info, NULL, "level", c);
2964 if (err) {
2965 err = errno;
2966 pr_err("%s: could not set level to %s\n",
2967 devname, c);
2968 if (err == EBUSY &&
2969 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2970 cont_err("Bitmap must be removed before level can be changed\n");
2971 return err;
2972 }
2973 if (verbose >= 0)
2974 pr_err("level of %s changed to %s\n", devname, c);
2975 }
2976 return 0;
2977 }
2978
2979 int sigterm = 0;
2980 static void catch_term(int sig)
2981 {
2982 sigterm = 1;
2983 }
2984
2985 static int continue_via_systemd(char *devnm)
2986 {
2987 int skipped, i, pid, status;
2988 char pathbuf[1024];
2989 /* In a systemd/udev world, it is best to get systemd to
2990 * run "mdadm --grow --continue" rather than running in the
2991 * background.
2992 */
2993 switch(fork()) {
2994 case 0:
2995 /* FIXME yuk. CLOSE_EXEC?? */
2996 skipped = 0;
2997 for (i = 3; skipped < 20; i++)
2998 if (close(i) < 0)
2999 skipped++;
3000 else
3001 skipped = 0;
3002
3003 /* Don't want to see error messages from
3004 * systemctl. If the service doesn't exist,
3005 * we fork ourselves.
3006 */
3007 close(2);
3008 open("/dev/null", O_WRONLY);
3009 snprintf(pathbuf, sizeof(pathbuf),
3010 "mdadm-grow-continue@%s.service", devnm);
3011 status = execl("/usr/bin/systemctl", "systemctl", "restart",
3012 pathbuf, NULL);
3013 status = execl("/bin/systemctl", "systemctl", "restart",
3014 pathbuf, NULL);
3015 exit(1);
3016 case -1: /* Just do it ourselves. */
3017 break;
3018 default: /* parent - good */
3019 pid = wait(&status);
3020 if (pid >= 0 && status == 0)
3021 return 1;
3022 }
3023 return 0;
3024 }
3025
3026 static int reshape_array(char *container, int fd, char *devname,
3027 struct supertype *st, struct mdinfo *info,
3028 int force, struct mddev_dev *devlist,
3029 unsigned long long data_offset,
3030 char *backup_file, int verbose, int forked,
3031 int restart, int freeze_reshape)
3032 {
3033 struct reshape reshape;
3034 int spares_needed;
3035 char *msg;
3036 int orig_level = UnSet;
3037 int odisks;
3038 int delayed;
3039
3040 struct mdu_array_info_s array;
3041 char *c;
3042
3043 struct mddev_dev *dv;
3044 int added_disks;
3045
3046 int *fdlist = NULL;
3047 unsigned long long *offsets = NULL;
3048 int d;
3049 int nrdisks;
3050 int err;
3051 unsigned long blocks;
3052 unsigned long long array_size;
3053 int done;
3054 struct mdinfo *sra = NULL;
3055 char buf[20];
3056
3057 /* when reshaping a RAID0, the component_size might be zero.
3058 * So try to fix that up.
3059 */
3060 if (md_get_array_info(fd, &array) != 0) {
3061 dprintf("Cannot get array information.\n");
3062 goto release;
3063 }
3064 if (array.level == 0 && info->component_size == 0) {
3065 get_dev_size(fd, NULL, &array_size);
3066 info->component_size = array_size / array.raid_disks;
3067 }
3068
3069 if (array.level == 10)
3070 /* Need space_after info */
3071 get_space_after(fd, st, info);
3072
3073 if (info->reshape_active) {
3074 int new_level = info->new_level;
3075 info->new_level = UnSet;
3076 if (info->delta_disks > 0)
3077 info->array.raid_disks -= info->delta_disks;
3078 msg = analyse_change(devname, info, &reshape);
3079 info->new_level = new_level;
3080 if (info->delta_disks > 0)
3081 info->array.raid_disks += info->delta_disks;
3082 if (!restart)
3083 /* Make sure the array isn't read-only */
3084 ioctl(fd, RESTART_ARRAY_RW, 0);
3085 } else
3086 msg = analyse_change(devname, info, &reshape);
3087 if (msg) {
3088 /* if msg == "", error has already been printed */
3089 if (msg[0])
3090 pr_err("%s\n", msg);
3091 goto release;
3092 }
3093 if (restart && (reshape.level != info->array.level ||
3094 reshape.before.layout != info->array.layout ||
3095 reshape.before.data_disks + reshape.parity !=
3096 info->array.raid_disks - max(0, info->delta_disks))) {
3097 pr_err("reshape info is not in native format - cannot continue.\n");
3098 goto release;
3099 }
3100
3101 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3102 !((sysfs_get_str(info, NULL, "sync_action",
3103 buf, sizeof(buf)) > 0) &&
3104 (strncmp(buf, "reshape", 7) == 0))) {
3105 /* When reshape is restarted from '0', very begin of array
3106 * it is possible that for external metadata reshape and array
3107 * configuration doesn't happen.
3108 * Check if md has the same opinion, and reshape is restarted
3109 * from 0. If so, this is regular reshape start after reshape
3110 * switch in metadata to next array only.
3111 */
3112 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3113 (info->reshape_progress == 0))
3114 restart = 0;
3115 }
3116 if (restart) {
3117 /*
3118 * reshape already started. just skip to monitoring
3119 * the reshape
3120 */
3121 if (reshape.backup_blocks == 0)
3122 return 0;
3123 if (restart & RESHAPE_NO_BACKUP)
3124 return 0;
3125
3126 /* Need 'sra' down at 'started:' */
3127 sra = sysfs_read(fd, NULL,
3128 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|
3129 GET_CHUNK|GET_CACHE);
3130 if (!sra) {
3131 pr_err("%s: Cannot get array details from sysfs\n",
3132 devname);
3133 goto release;
3134 }
3135
3136 if (!backup_file)
3137 backup_file = locate_backup(sra->sys_name);
3138
3139 goto started;
3140 }
3141 /* The container is frozen but the array may not be.
3142 * So freeze the array so spares don't get put to the wrong use
3143 * FIXME there should probably be a cleaner separation between
3144 * freeze_array and freeze_container.
3145 */
3146 sysfs_freeze_array(info);
3147 /* Check we have enough spares to not be degraded */
3148 added_disks = 0;
3149 for (dv = devlist; dv ; dv=dv->next)
3150 added_disks++;
3151 spares_needed = max(reshape.before.data_disks,
3152 reshape.after.data_disks) +
3153 reshape.parity - array.raid_disks;
3154
3155 if (!force && info->new_level > 1 && info->array.level > 1 &&
3156 spares_needed > info->array.spare_disks + added_disks) {
3157 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3158 " Use --force to over-ride this check.\n",
3159 spares_needed,
3160 spares_needed == 1 ? "" : "s",
3161 info->array.spare_disks + added_disks);
3162 goto release;
3163 }
3164 /* Check we have enough spares to not fail */
3165 spares_needed = max(reshape.before.data_disks,
3166 reshape.after.data_disks)
3167 - array.raid_disks;
3168 if ((info->new_level > 1 || info->new_level == 0) &&
3169 spares_needed > info->array.spare_disks +added_disks) {
3170 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3171 spares_needed, spares_needed == 1 ? "" : "s",
3172 info->array.spare_disks + added_disks);
3173 goto release;
3174 }
3175
3176 if (reshape.level != array.level) {
3177 int err = impose_level(fd, reshape.level, devname, verbose);
3178 if (err)
3179 goto release;
3180 info->new_layout = UnSet; /* after level change,
3181 * layout is meaningless */
3182 orig_level = array.level;
3183 sysfs_freeze_array(info);
3184
3185 if (reshape.level > 0 && st->ss->external) {
3186 /* make sure mdmon is aware of the new level */
3187 if (mdmon_running(container))
3188 flush_mdmon(container);
3189
3190 if (!mdmon_running(container))
3191 start_mdmon(container);
3192 ping_monitor(container);
3193 if (mdmon_running(container) && st->update_tail == NULL)
3194 st->update_tail = &st->updates;
3195 }
3196 }
3197 /* ->reshape_super might have chosen some spares from the
3198 * container that it wants to be part of the new array.
3199 * We can collect them with ->container_content and give
3200 * them to the kernel.
3201 */
3202 if (st->ss->reshape_super && st->ss->container_content) {
3203 char *subarray = strchr(info->text_version+1, '/')+1;
3204 struct mdinfo *info2 =
3205 st->ss->container_content(st, subarray);
3206 struct mdinfo *d;
3207
3208 if (info2) {
3209 if (sysfs_init(info2, fd, st->devnm)) {
3210 pr_err("unable to initialize sysfs for %s\n",
3211 st->devnm);
3212 free(info2);
3213 goto release;
3214 }
3215 /* When increasing number of devices, we need to set
3216 * new raid_disks before adding these, or they might
3217 * be rejected.
3218 */
3219 if (reshape.backup_blocks &&
3220 reshape.after.data_disks >
3221 reshape.before.data_disks)
3222 subarray_set_num(container, info2, "raid_disks",
3223 reshape.after.data_disks +
3224 reshape.parity);
3225 for (d = info2->devs; d; d = d->next) {
3226 if (d->disk.state == 0 &&
3227 d->disk.raid_disk >= 0) {
3228 /* This is a spare that wants to
3229 * be part of the array.
3230 */
3231 add_disk(fd, st, info2, d);
3232 }
3233 }
3234 sysfs_free(info2);
3235 }
3236 }
3237 /* We might have been given some devices to add to the
3238 * array. Now that the array has been changed to the right
3239 * level and frozen, we can safely add them.
3240 */
3241 if (devlist) {
3242 if (Manage_subdevs(devname, fd, devlist, verbose, 0, NULL, 0))
3243 goto release;
3244 }
3245
3246 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3247 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3248 if (reshape.backup_blocks == 0) {
3249 /* No restriping needed, but we might need to impose
3250 * some more changes: layout, raid_disks, chunk_size
3251 */
3252 /* read current array info */
3253 if (md_get_array_info(fd, &array) != 0) {
3254 dprintf("Cannot get array information.\n");
3255 goto release;
3256 }
3257 /* compare current array info with new values and if
3258 * it is different update them to new */
3259 if (info->new_layout != UnSet &&
3260 info->new_layout != array.layout) {
3261 array.layout = info->new_layout;
3262 if (md_set_array_info(fd, &array) != 0) {
3263 pr_err("failed to set new layout\n");
3264 goto release;
3265 } else if (verbose >= 0)
3266 printf("layout for %s set to %d\n",
3267 devname, array.layout);
3268 }
3269 if (info->delta_disks != UnSet && info->delta_disks != 0 &&
3270 array.raid_disks !=
3271 (info->array.raid_disks + info->delta_disks)) {
3272 array.raid_disks += info->delta_disks;
3273 if (md_set_array_info(fd, &array) != 0) {
3274 pr_err("failed to set raid disks\n");
3275 goto release;
3276 } else if (verbose >= 0) {
3277 printf("raid_disks for %s set to %d\n",
3278 devname, array.raid_disks);
3279 }
3280 }
3281 if (info->new_chunk != 0 &&
3282 info->new_chunk != array.chunk_size) {
3283 if (sysfs_set_num(info, NULL,
3284 "chunk_size", info->new_chunk) != 0) {
3285 pr_err("failed to set chunk size\n");
3286 goto release;
3287 } else if (verbose >= 0)
3288 printf("chunk size for %s set to %d\n",
3289 devname, info->new_chunk);
3290 }
3291 unfreeze(st);
3292 return 0;
3293 }
3294
3295 /*
3296 * There are three possibilities.
3297 * 1/ The array will shrink.
3298 * We need to ensure the reshape will pause before reaching
3299 * the 'critical section'. We also need to fork and wait for
3300 * that to happen. When it does we
3301 * suspend/backup/complete/unfreeze
3302 *
3303 * 2/ The array will not change size.
3304 * This requires that we keep a backup of a sliding window
3305 * so that we can restore data after a crash. So we need
3306 * to fork and monitor progress.
3307 * In future we will allow the data_offset to change, so
3308 * a sliding backup becomes unnecessary.
3309 *
3310 * 3/ The array will grow. This is relatively easy.
3311 * However the kernel's restripe routines will cheerfully
3312 * overwrite some early data before it is safe. So we
3313 * need to make a backup of the early parts of the array
3314 * and be ready to restore it if rebuild aborts very early.
3315 * For externally managed metadata, we still need a forked
3316 * child to monitor the reshape and suspend IO over the region
3317 * that is being reshaped.
3318 *
3319 * We backup data by writing it to one spare, or to a
3320 * file which was given on command line.
3321 *
3322 * In each case, we first make sure that storage is available
3323 * for the required backup.
3324 * Then we:
3325 * - request the shape change.
3326 * - fork to handle backup etc.
3327 */
3328 /* Check that we can hold all the data */
3329 get_dev_size(fd, NULL, &array_size);
3330 if (reshape.new_size < (array_size/512)) {
3331 pr_err("this change will reduce the size of the array.\n"
3332 " use --grow --array-size first to truncate array.\n"
3333 " e.g. mdadm --grow %s --array-size %llu\n",
3334 devname, reshape.new_size/2);
3335 goto release;
3336 }
3337
3338 if (array.level == 10) {
3339 /* Reshaping RAID10 does not require any data backup by
3340 * user-space. Instead it requires that the data_offset
3341 * is changed to avoid the need for backup.
3342 * So this is handled very separately
3343 */
3344 if (restart)
3345 /* Nothing to do. */
3346 return 0;
3347 return raid10_reshape(container, fd, devname, st, info,
3348 &reshape, data_offset, force, verbose);
3349 }
3350 sra = sysfs_read(fd, NULL,
3351 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3352 GET_CACHE);
3353 if (!sra) {
3354 pr_err("%s: Cannot get array details from sysfs\n",
3355 devname);
3356 goto release;
3357 }
3358
3359 if (!backup_file)
3360 switch(set_new_data_offset(sra, st, devname,
3361 reshape.after.data_disks - reshape.before.data_disks,
3362 data_offset,
3363 reshape.min_offset_change, 1)) {
3364 case -1:
3365 goto release;
3366 case 0:
3367 /* Updated data_offset, so it's easy now */
3368 update_cache_size(container, sra, info,
3369 min(reshape.before.data_disks,
3370 reshape.after.data_disks),
3371 reshape.backup_blocks);
3372
3373 /* Right, everything seems fine. Let's kick things off.
3374 */
3375 sync_metadata(st);
3376
3377 if (impose_reshape(sra, info, st, fd, restart,
3378 devname, container, &reshape) < 0)
3379 goto release;
3380 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3381 struct mdinfo *sd;
3382 if (errno != EINVAL) {
3383 pr_err("Failed to initiate reshape!\n");
3384 goto release;
3385 }
3386 /* revert data_offset and try the old way */
3387 for (sd = sra->devs; sd; sd = sd->next) {
3388 sysfs_set_num(sra, sd, "new_offset",
3389 sd->data_offset);
3390 sysfs_set_str(sra, NULL, "reshape_direction",
3391 "forwards");
3392 }
3393 break;
3394 }
3395 if (info->new_level == reshape.level)
3396 return 0;
3397 /* need to adjust level when reshape completes */
3398 switch(fork()) {
3399 case -1: /* ignore error, but don't wait */
3400 return 0;
3401 default: /* parent */
3402 return 0;
3403 case 0:
3404 map_fork();
3405 break;
3406 }
3407 close(fd);
3408 wait_reshape(sra);
3409 fd = open_dev(sra->sys_name);
3410 if (fd >= 0)
3411 impose_level(fd, info->new_level, devname, verbose);
3412 return 0;
3413 case 1: /* Couldn't set data_offset, try the old way */
3414 if (data_offset != INVALID_SECTORS) {
3415 pr_err("Cannot update data_offset on this array\n");
3416 goto release;
3417 }
3418 break;
3419 }
3420
3421 started:
3422 /* Decide how many blocks (sectors) for a reshape
3423 * unit. The number we have so far is just a minimum
3424 */
3425 blocks = reshape.backup_blocks;
3426 if (reshape.before.data_disks ==
3427 reshape.after.data_disks) {
3428 /* Make 'blocks' bigger for better throughput, but
3429 * not so big that we reject it below.
3430 * Try for 16 megabytes
3431 */
3432 while (blocks * 32 < sra->component_size && blocks < 16*1024*2)
3433 blocks *= 2;
3434 } else
3435 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3436
3437 if (blocks >= sra->component_size/2) {
3438 pr_err("%s: Something wrong - reshape aborted\n", devname);
3439 goto release;
3440 }
3441
3442 /* Now we need to open all these devices so we can read/write.
3443 */
3444 nrdisks = max(reshape.before.data_disks,
3445 reshape.after.data_disks) + reshape.parity
3446 + sra->array.spare_disks;
3447 fdlist = xcalloc((1+nrdisks), sizeof(int));
3448 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3449
3450 odisks = reshape.before.data_disks + reshape.parity;
3451 d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks,
3452 backup_file, fdlist, offsets);
3453 if (d < odisks) {
3454 goto release;
3455 }
3456 if ((st->ss->manage_reshape == NULL) ||
3457 (st->ss->recover_backup == NULL)) {
3458 if (backup_file == NULL) {
3459 if (reshape.after.data_disks <=
3460 reshape.before.data_disks) {
3461 pr_err("%s: Cannot grow - need backup-file\n",
3462 devname);
3463 pr_err(" Please provide one with \"--backup=...\"\n");
3464 goto release;
3465 } else if (d == odisks) {
3466 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3467 goto release;
3468 }
3469 } else {
3470 if (!reshape_open_backup_file(backup_file, fd, devname,
3471 (signed)blocks,
3472 fdlist+d, offsets+d,
3473 sra->sys_name, restart)) {
3474 goto release;
3475 }
3476 d++;
3477 }
3478 }
3479
3480 update_cache_size(container, sra, info,
3481 min(reshape.before.data_disks,
3482 reshape.after.data_disks), blocks);
3483
3484 /* Right, everything seems fine. Let's kick things off.
3485 * If only changing raid_disks, use ioctl, else use
3486 * sysfs.
3487 */
3488 sync_metadata(st);
3489
3490 if (impose_reshape(sra, info, st, fd, restart,
3491 devname, container, &reshape) < 0)
3492 goto release;
3493
3494 err = start_reshape(sra, restart, reshape.before.data_disks,
3495 reshape.after.data_disks);
3496 if (err) {
3497 pr_err("Cannot %s reshape for %s\n",
3498 restart ? "continue" : "start", devname);
3499 goto release;
3500 }
3501 if (restart)
3502 sysfs_set_str(sra, NULL, "array_state", "active");
3503 if (freeze_reshape) {
3504 free(fdlist);
3505 free(offsets);
3506 sysfs_free(sra);
3507 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3508 sra->reshape_progress);
3509 return 1;
3510 }
3511
3512 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3513 if (continue_via_systemd(container ?: sra->sys_name)) {
3514 free(fdlist);
3515 free(offsets);
3516 sysfs_free(sra);
3517 return 0;
3518 }
3519
3520 /* Now we just need to kick off the reshape and watch, while
3521 * handling backups of the data...
3522 * This is all done by a forked background process.
3523 */
3524 switch(forked ? 0 : fork()) {
3525 case -1:
3526 pr_err("Cannot run child to monitor reshape: %s\n",
3527 strerror(errno));
3528 abort_reshape(sra);
3529 goto release;
3530 default:
3531 free(fdlist);
3532 free(offsets);
3533 sysfs_free(sra);
3534 return 0;
3535 case 0:
3536 map_fork();
3537 break;
3538 }
3539
3540 /* If another array on the same devices is busy, the
3541 * reshape will wait for them. This would mean that
3542 * the first section that we suspend will stay suspended
3543 * for a long time. So check on that possibility
3544 * by looking for "DELAYED" in /proc/mdstat, and if found,
3545 * wait a while
3546 */
3547 do {
3548 struct mdstat_ent *mds, *m;
3549 delayed = 0;
3550 mds = mdstat_read(1, 0);
3551 for (m = mds; m; m = m->next)
3552 if (strcmp(m->devnm, sra->sys_name) == 0) {
3553 if (m->resync && m->percent == RESYNC_DELAYED)
3554 delayed = 1;
3555 if (m->resync == 0)
3556 /* Haven't started the reshape thread
3557 * yet, wait a bit
3558 */
3559 delayed = 2;
3560 break;
3561 }
3562 free_mdstat(mds);
3563 if (delayed == 1 && get_linux_version() < 3007000) {
3564 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3565 " You might experience problems until other reshapes complete.\n");
3566 delayed = 0;
3567 }
3568 if (delayed)
3569 mdstat_wait(30 - (delayed-1) * 25);
3570 } while (delayed);
3571 mdstat_close();
3572 close(fd);
3573 if (check_env("MDADM_GROW_VERIFY"))
3574 fd = open(devname, O_RDONLY | O_DIRECT);
3575 else
3576 fd = -1;
3577 mlockall(MCL_FUTURE);
3578
3579 signal(SIGTERM, catch_term);
3580
3581 if (st->ss->external) {
3582 /* metadata handler takes it from here */
3583 done = st->ss->manage_reshape(
3584 fd, sra, &reshape, st, blocks,
3585 fdlist, offsets, d - odisks, fdlist + odisks,
3586 offsets + odisks);
3587 } else
3588 done = child_monitor(
3589 fd, sra, &reshape, st, blocks, fdlist, offsets,
3590 d - odisks, fdlist + odisks, offsets + odisks);
3591
3592 free(fdlist);
3593 free(offsets);
3594
3595 if (backup_file && done) {
3596 char *bul;
3597 bul = make_backup(sra->sys_name);
3598 if (bul) {
3599 char buf[1024];
3600 int l = readlink(bul, buf, sizeof(buf) - 1);
3601 if (l > 0) {
3602 buf[l]=0;
3603 unlink(buf);
3604 }
3605 unlink(bul);
3606 free(bul);
3607 }
3608 unlink(backup_file);
3609 }
3610 if (!done) {
3611 abort_reshape(sra);
3612 goto out;
3613 }
3614
3615 if (!st->ss->external &&
3616 !(reshape.before.data_disks != reshape.after.data_disks &&
3617 info->custom_array_size) && info->new_level == reshape.level &&
3618 !forked) {
3619 /* no need to wait for the reshape to finish as
3620 * there is nothing more to do.
3621 */
3622 sysfs_free(sra);
3623 exit(0);
3624 }
3625 wait_reshape(sra);
3626
3627 if (st->ss->external) {
3628 /* Re-load the metadata as much could have changed */
3629 int cfd = open_dev(st->container_devnm);
3630 if (cfd >= 0) {
3631 flush_mdmon(container);
3632 st->ss->free_super(st);
3633 st->ss->load_container(st, cfd, container);
3634 close(cfd);
3635 }
3636 }
3637
3638 /* set new array size if required customer_array_size is used
3639 * by this metadata.
3640 */
3641 if (reshape.before.data_disks != reshape.after.data_disks &&
3642 info->custom_array_size)
3643 set_array_size(st, info, info->text_version);
3644
3645 if (info->new_level != reshape.level) {
3646 if (fd < 0)
3647 fd = open(devname, O_RDONLY);
3648 impose_level(fd, info->new_level, devname, verbose);
3649 close(fd);
3650 if (info->new_level == 0)
3651 st->update_tail = NULL;
3652 }
3653 out:
3654 sysfs_free(sra);
3655 if (forked)
3656 return 0;
3657 unfreeze(st);
3658 exit(0);
3659
3660 release:
3661 free(fdlist);
3662 free(offsets);
3663 if (orig_level != UnSet && sra) {
3664 c = map_num(pers, orig_level);
3665 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3666 pr_err("aborting level change\n");
3667 }
3668 sysfs_free(sra);
3669 if (!forked)
3670 unfreeze(st);
3671 return 1;
3672 }
3673
3674 /* mdfd handle is passed to be closed in child process (after fork).
3675 */
3676 int reshape_container(char *container, char *devname,
3677 int mdfd,
3678 struct supertype *st,
3679 struct mdinfo *info,
3680 int force,
3681 char *backup_file, int verbose,
3682 int forked, int restart, int freeze_reshape)
3683 {
3684 struct mdinfo *cc = NULL;
3685 int rv = restart;
3686 char last_devnm[32] = "";
3687
3688 /* component_size is not meaningful for a container,
3689 * so pass '0' meaning 'no change'
3690 */
3691 if (!restart &&
3692 reshape_super(st, 0, info->new_level,
3693 info->new_layout, info->new_chunk,
3694 info->array.raid_disks, info->delta_disks,
3695 backup_file, devname, APPLY_METADATA_CHANGES,
3696 verbose)) {
3697 unfreeze(st);
3698 return 1;
3699 }
3700
3701 sync_metadata(st);
3702
3703 /* ping monitor to be sure that update is on disk
3704 */
3705 ping_monitor(container);
3706
3707 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3708 if (continue_via_systemd(container))
3709 return 0;
3710
3711 switch (forked ? 0 : fork()) {
3712 case -1: /* error */
3713 perror("Cannot fork to complete reshape\n");
3714 unfreeze(st);
3715 return 1;
3716 default: /* parent */
3717 if (!freeze_reshape)
3718 printf("%s: multi-array reshape continues in background\n", Name);
3719 return 0;
3720 case 0: /* child */
3721 map_fork();
3722 break;
3723 }
3724
3725 /* close unused handle in child process
3726 */
3727 if (mdfd > -1)
3728 close(mdfd);
3729
3730 while(1) {
3731 /* For each member array with reshape_active,
3732 * we need to perform the reshape.
3733 * We pick the first array that needs reshaping and
3734 * reshape it. reshape_array() will re-read the metadata
3735 * so the next time through a different array should be
3736 * ready for reshape.
3737 * It is possible that the 'different' array will not
3738 * be assembled yet. In that case we simple exit.
3739 * When it is assembled, the mdadm which assembles it
3740 * will take over the reshape.
3741 */
3742 struct mdinfo *content;
3743 int fd;
3744 struct mdstat_ent *mdstat;
3745 char *adev;
3746 dev_t devid;
3747
3748 sysfs_free(cc);
3749
3750 cc = st->ss->container_content(st, NULL);
3751
3752 for (content = cc; content ; content = content->next) {
3753 char *subarray;
3754 if (!content->reshape_active)
3755 continue;
3756
3757 subarray = strchr(content->text_version+1, '/')+1;
3758 mdstat = mdstat_by_subdev(subarray, container);
3759 if (!mdstat)
3760 continue;
3761 if (mdstat->active == 0) {
3762 pr_err("Skipping inactive array %s.\n",
3763 mdstat->devnm);
3764 free_mdstat(mdstat);
3765 mdstat = NULL;
3766 continue;
3767 }
3768 break;
3769 }
3770 if (!content)
3771 break;
3772
3773 devid = devnm2devid(mdstat->devnm);
3774 adev = map_dev(major(devid), minor(devid), 0);
3775 if (!adev)
3776 adev = content->text_version;
3777
3778 fd = open_dev(mdstat->devnm);
3779 if (fd < 0) {
3780 pr_err("Device %s cannot be opened for reshape.\n",
3781 adev);
3782 break;
3783 }
3784
3785 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3786 /* Do not allow for multiple reshape_array() calls for
3787 * the same array.
3788 * It can happen when reshape_array() returns without
3789 * error, when reshape is not finished (wrong reshape
3790 * starting/continuation conditions). Mdmon doesn't
3791 * switch to next array in container and reentry
3792 * conditions for the same array occur.
3793 * This is possibly interim until the behaviour of
3794 * reshape_array is resolved().
3795 */
3796 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3797 close(fd);
3798 break;
3799 }
3800 strcpy(last_devnm, mdstat->devnm);
3801
3802 if (sysfs_init(content, fd, mdstat->devnm)) {
3803 pr_err("Unable to initialize sysfs for %s\n",
3804 mdstat->devnm);
3805 rv = 1;
3806 break;
3807 }
3808
3809 if (mdmon_running(container))
3810 flush_mdmon(container);
3811
3812 rv = reshape_array(container, fd, adev, st,
3813 content, force, NULL, INVALID_SECTORS,
3814 backup_file, verbose, 1, restart,
3815 freeze_reshape);
3816 close(fd);
3817
3818 if (freeze_reshape) {
3819 sysfs_free(cc);
3820 exit(0);
3821 }
3822
3823 restart = 0;
3824 if (rv)
3825 break;
3826
3827 if (mdmon_running(container))
3828 flush_mdmon(container);
3829 }
3830 if (!rv)
3831 unfreeze(st);
3832 sysfs_free(cc);
3833 exit(0);
3834 }
3835
3836 /*
3837 * We run a child process in the background which performs the following
3838 * steps:
3839 * - wait for resync to reach a certain point
3840 * - suspend io to the following section
3841 * - backup that section
3842 * - allow resync to proceed further
3843 * - resume io
3844 * - discard the backup.
3845 *
3846 * When are combined in slightly different ways in the three cases.
3847 * Grow:
3848 * - suspend/backup/allow/wait/resume/discard
3849 * Shrink:
3850 * - allow/wait/suspend/backup/allow/wait/resume/discard
3851 * same-size:
3852 * - wait/resume/discard/suspend/backup/allow
3853 *
3854 * suspend/backup/allow always come together
3855 * wait/resume/discard do too.
3856 * For the same-size case we have two backups to improve flow.
3857 *
3858 */
3859
3860 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3861 unsigned long long backup_point,
3862 unsigned long long wait_point,
3863 unsigned long long *suspend_point,
3864 unsigned long long *reshape_completed, int *frozen)
3865 {
3866 /* This function is called repeatedly by the reshape manager.
3867 * It determines how much progress can safely be made and allows
3868 * that progress.
3869 * - 'info' identifies the array and particularly records in
3870 * ->reshape_progress the metadata's knowledge of progress
3871 * This is a sector offset from the start of the array
3872 * of the next array block to be relocated. This number
3873 * may increase from 0 or decrease from array_size, depending
3874 * on the type of reshape that is happening.
3875 * Note that in contrast, 'sync_completed' is a block count of the
3876 * reshape so far. It gives the distance between the start point
3877 * (head or tail of device) and the next place that data will be
3878 * written. It always increases.
3879 * - 'reshape' is the structure created by analyse_change
3880 * - 'backup_point' shows how much the metadata manager has backed-up
3881 * data. For reshapes with increasing progress, it is the next address
3882 * to be backed up, previous addresses have been backed-up. For
3883 * decreasing progress, it is the earliest address that has been
3884 * backed up - later address are also backed up.
3885 * So addresses between reshape_progress and backup_point are
3886 * backed up providing those are in the 'correct' order.
3887 * - 'wait_point' is an array address. When reshape_completed
3888 * passes this point, progress_reshape should return. It might
3889 * return earlier if it determines that ->reshape_progress needs
3890 * to be updated or further backup is needed.
3891 * - suspend_point is maintained by progress_reshape and the caller
3892 * should not touch it except to initialise to zero.
3893 * It is an array address and it only increases in 2.6.37 and earlier.
3894 * This makes it difficult to handle reducing reshapes with
3895 * external metadata.
3896 * However: it is similar to backup_point in that it records the
3897 * other end of a suspended region from reshape_progress.
3898 * it is moved to extend the region that is safe to backup and/or
3899 * reshape
3900 * - reshape_completed is read from sysfs and returned. The caller
3901 * should copy this into ->reshape_progress when it has reason to
3902 * believe that the metadata knows this, and any backup outside this
3903 * has been erased.
3904 *
3905 * Return value is:
3906 * 1 if more data from backup_point - but only as far as suspend_point,
3907 * should be backed up
3908 * 0 if things are progressing smoothly
3909 * -1 if the reshape is finished because it is all done,
3910 * -2 if the reshape is finished due to an error.
3911 */
3912
3913 int advancing = (reshape->after.data_disks
3914 >= reshape->before.data_disks);
3915 unsigned long long need_backup; /* All data between start of array and
3916 * here will at some point need to
3917 * be backed up.
3918 */
3919 unsigned long long read_offset, write_offset;
3920 unsigned long long write_range;
3921 unsigned long long max_progress, target, completed;
3922 unsigned long long array_size = (info->component_size
3923 * reshape->before.data_disks);
3924 int fd;
3925 char buf[20];
3926
3927 /* First, we unsuspend any region that is now known to be safe.
3928 * If suspend_point is on the 'wrong' side of reshape_progress, then
3929 * we don't have or need suspension at the moment. This is true for
3930 * native metadata when we don't need to back-up.
3931 */
3932 if (advancing) {
3933 if (info->reshape_progress <= *suspend_point)
3934 sysfs_set_num(info, NULL, "suspend_lo",
3935 info->reshape_progress);
3936 } else {
3937 /* Note: this won't work in 2.6.37 and before.
3938 * Something somewhere should make sure we don't need it!
3939 */
3940 if (info->reshape_progress >= *suspend_point)
3941 sysfs_set_num(info, NULL, "suspend_hi",
3942 info->reshape_progress);
3943 }
3944
3945 /* Now work out how far it is safe to progress.
3946 * If the read_offset for ->reshape_progress is less than
3947 * 'blocks' beyond the write_offset, we can only progress as far
3948 * as a backup.
3949 * Otherwise we can progress until the write_offset for the new location
3950 * reaches (within 'blocks' of) the read_offset at the current location.
3951 * However that region must be suspended unless we are using native
3952 * metadata.
3953 * If we need to suspend more, we limit it to 128M per device, which is
3954 * rather arbitrary and should be some time-based calculation.
3955 */
3956 read_offset = info->reshape_progress / reshape->before.data_disks;
3957 write_offset = info->reshape_progress / reshape->after.data_disks;
3958 write_range = info->new_chunk/512;
3959 if (reshape->before.data_disks == reshape->after.data_disks)
3960 need_backup = array_size;
3961 else
3962 need_backup = reshape->backup_blocks;
3963 if (advancing) {
3964 if (read_offset < write_offset + write_range)
3965 max_progress = backup_point;
3966 else
3967 max_progress =
3968 read_offset * reshape->after.data_disks;
3969 } else {
3970 if (read_offset > write_offset - write_range)
3971 /* Can only progress as far as has been backed up,
3972 * which must be suspended */
3973 max_progress = backup_point;
3974 else if (info->reshape_progress <= need_backup)
3975 max_progress = backup_point;
3976 else {
3977 if (info->array.major_version >= 0)
3978 /* Can progress until backup is needed */
3979 max_progress = need_backup;
3980 else {
3981 /* Can progress until metadata update is required */
3982 max_progress =
3983 read_offset * reshape->after.data_disks;
3984 /* but data must be suspended */
3985 if (max_progress < *suspend_point)
3986 max_progress = *suspend_point;
3987 }
3988 }
3989 }
3990
3991 /* We know it is safe to progress to 'max_progress' providing
3992 * it is suspended or we are using native metadata.
3993 * Consider extending suspend_point 128M per device if it
3994 * is less than 64M per device beyond reshape_progress.
3995 * But always do a multiple of 'blocks'
3996 * FIXME this is too big - it takes to long to complete
3997 * this much.
3998 */
3999 target = 64*1024*2 * min(reshape->before.data_disks,
4000 reshape->after.data_disks);
4001 target /= reshape->backup_blocks;
4002 if (target < 2)
4003 target = 2;
4004 target *= reshape->backup_blocks;
4005
4006 /* For externally managed metadata we always need to suspend IO to
4007 * the area being reshaped so we regularly push suspend_point forward.
4008 * For native metadata we only need the suspend if we are going to do
4009 * a backup.
4010 */
4011 if (advancing) {
4012 if ((need_backup > info->reshape_progress ||
4013 info->array.major_version < 0) &&
4014 *suspend_point < info->reshape_progress + target) {
4015 if (need_backup < *suspend_point + 2 * target)
4016 *suspend_point = need_backup;
4017 else if (*suspend_point + 2 * target < array_size)
4018 *suspend_point += 2 * target;
4019 else
4020 *suspend_point = array_size;
4021 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4022 if (max_progress > *suspend_point)
4023 max_progress = *suspend_point;
4024 }
4025 } else {
4026 if (info->array.major_version >= 0) {
4027 /* Only need to suspend when about to backup */
4028 if (info->reshape_progress < need_backup * 2 &&
4029 *suspend_point > 0) {
4030 *suspend_point = 0;
4031 sysfs_set_num(info, NULL, "suspend_lo", 0);
4032 sysfs_set_num(info, NULL, "suspend_hi",
4033 need_backup);
4034 }
4035 } else {
4036 /* Need to suspend continually */
4037 if (info->reshape_progress < *suspend_point)
4038 *suspend_point = info->reshape_progress;
4039 if (*suspend_point + target < info->reshape_progress)
4040 /* No need to move suspend region yet */;
4041 else {
4042 if (*suspend_point >= 2 * target)
4043 *suspend_point -= 2 * target;
4044 else
4045 *suspend_point = 0;
4046 sysfs_set_num(info, NULL, "suspend_lo",
4047 *suspend_point);
4048 }
4049 if (max_progress < *suspend_point)
4050 max_progress = *suspend_point;
4051 }
4052 }
4053
4054 /* now set sync_max to allow that progress. sync_max, like
4055 * sync_completed is a count of sectors written per device, so
4056 * we find the difference between max_progress and the start point,
4057 * and divide that by after.data_disks to get a sync_max
4058 * number.
4059 * At the same time we convert wait_point to a similar number
4060 * for comparing against sync_completed.
4061 */
4062 /* scale down max_progress to per_disk */
4063 max_progress /= reshape->after.data_disks;
4064 /*
4065 * Round to chunk size as some kernels give an erroneously
4066 * high number
4067 */
4068 max_progress /= info->new_chunk/512;
4069 max_progress *= info->new_chunk/512;
4070 /* And round to old chunk size as the kernel wants that */
4071 max_progress /= info->array.chunk_size/512;
4072 max_progress *= info->array.chunk_size/512;
4073 /* Limit progress to the whole device */
4074 if (max_progress > info->component_size)
4075 max_progress = info->component_size;
4076 wait_point /= reshape->after.data_disks;
4077 if (!advancing) {
4078 /* switch from 'device offset' to 'processed block count' */
4079 max_progress = info->component_size - max_progress;
4080 wait_point = info->component_size - wait_point;
4081 }
4082
4083 if (!*frozen)
4084 sysfs_set_num(info, NULL, "sync_max", max_progress);
4085
4086 /* Now wait. If we have already reached the point that we were
4087 * asked to wait to, don't wait at all, else wait for any change.
4088 * We need to select on 'sync_completed' as that is the place that
4089 * notifications happen, but we are really interested in
4090 * 'reshape_position'
4091 */
4092 fd = sysfs_get_fd(info, NULL, "sync_completed");
4093 if (fd < 0)
4094 goto check_progress;
4095
4096 if (sysfs_fd_get_ll(fd, &completed) < 0)
4097 goto check_progress;
4098
4099 while (completed < max_progress && completed < wait_point) {
4100 /* Check that sync_action is still 'reshape' to avoid
4101 * waiting forever on a dead array
4102 */
4103 char action[20];
4104 if (sysfs_get_str(info, NULL, "sync_action", action, 20) <= 0 ||
4105 strncmp(action, "reshape", 7) != 0)
4106 break;
4107 /* Some kernels reset 'sync_completed' to zero
4108 * before setting 'sync_action' to 'idle'.
4109 * So we need these extra tests.
4110 */
4111 if (completed == 0 && advancing &&
4112 strncmp(action, "idle", 4) == 0 &&
4113 info->reshape_progress > 0)
4114 break;
4115 if (completed == 0 && !advancing &&
4116 strncmp(action, "idle", 4) == 0 &&
4117 info->reshape_progress <
4118 (info->component_size * reshape->after.data_disks))
4119 break;
4120 sysfs_wait(fd, NULL);
4121 if (sysfs_fd_get_ll(fd, &completed) < 0)
4122 goto check_progress;
4123 }
4124 /* Some kernels reset 'sync_completed' to zero,
4125 * we need to have real point we are in md.
4126 * So in that case, read 'reshape_position' from sysfs.
4127 */
4128 if (completed == 0) {
4129 unsigned long long reshapep;
4130 char action[20];
4131 if (sysfs_get_str(info, NULL, "sync_action", action, 20) > 0 &&
4132 strncmp(action, "idle", 4) == 0 &&
4133 sysfs_get_ll(info, NULL,
4134 "reshape_position", &reshapep) == 0)
4135 *reshape_completed = reshapep;
4136 } else {
4137 /* some kernels can give an incorrectly high
4138 * 'completed' number, so round down */
4139 completed /= (info->new_chunk/512);
4140 completed *= (info->new_chunk/512);
4141 /* Convert 'completed' back in to a 'progress' number */
4142 completed *= reshape->after.data_disks;
4143 if (!advancing)
4144 completed = (info->component_size
4145 * reshape->after.data_disks
4146 - completed);
4147 *reshape_completed = completed;
4148 }
4149
4150 close(fd);
4151
4152 /* We return the need_backup flag. Caller will decide
4153 * how much - a multiple of ->backup_blocks up to *suspend_point
4154 */
4155 if (advancing)
4156 return need_backup > info->reshape_progress;
4157 else
4158 return need_backup >= info->reshape_progress;
4159
4160 check_progress:
4161 /* if we couldn't read a number from sync_completed, then
4162 * either the reshape did complete, or it aborted.
4163 * We can tell which by checking for 'none' in reshape_position.
4164 * If it did abort, then it might immediately restart if it
4165 * it was just a device failure that leaves us degraded but
4166 * functioning.
4167 */
4168 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4169 sizeof(buf)) < 0 || strncmp(buf, "none", 4) != 0) {
4170 /* The abort might only be temporary. Wait up to 10
4171 * seconds for fd to contain a valid number again.
4172 */
4173 int wait = 10000;
4174 int rv = -2;
4175 unsigned long long new_sync_max;
4176 while (fd >= 0 && rv < 0 && wait > 0) {
4177 if (sysfs_wait(fd, &wait) != 1)
4178 break;
4179 switch (sysfs_fd_get_ll(fd, &completed)) {
4180 case 0:
4181 /* all good again */
4182 rv = 1;
4183 /* If "sync_max" is no longer max_progress
4184 * we need to freeze things
4185 */
4186 sysfs_get_ll(info, NULL, "sync_max",
4187 &new_sync_max);
4188 *frozen = (new_sync_max != max_progress);
4189 break;
4190 case -2: /* read error - abort */
4191 wait = 0;
4192 break;
4193 }
4194 }
4195 if (fd >= 0)
4196 close(fd);
4197 return rv; /* abort */
4198 } else {
4199 /* Maybe racing with array shutdown - check state */
4200 if (fd >= 0)
4201 close(fd);
4202 if (sysfs_get_str(info, NULL, "array_state", buf,
4203 sizeof(buf)) < 0 ||
4204 strncmp(buf, "inactive", 8) == 0 ||
4205 strncmp(buf, "clear",5) == 0)
4206 return -2; /* abort */
4207 return -1; /* complete */
4208 }
4209 }
4210
4211 /* FIXME return status is never checked */
4212 static int grow_backup(struct mdinfo *sra,
4213 unsigned long long offset, /* per device */
4214 unsigned long stripes, /* per device, in old chunks */
4215 int *sources, unsigned long long *offsets,
4216 int disks, int chunk, int level, int layout,
4217 int dests, int *destfd, unsigned long long *destoffsets,
4218 int part, int *degraded,
4219 char *buf)
4220 {
4221 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4222 * to storage 'destfd' (offset 'destoffsets'), after first
4223 * suspending IO. Then allow resync to continue
4224 * over the suspended section.
4225 * Use part 'part' of the backup-super-block.
4226 */
4227 int odata = disks;
4228 int rv = 0;
4229 int i;
4230 unsigned long long ll;
4231 int new_degraded;
4232 //printf("offset %llu\n", offset);
4233 if (level >= 4)
4234 odata--;
4235 if (level == 6)
4236 odata--;
4237
4238 /* Check that array hasn't become degraded, else we might backup the wrong data */
4239 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4240 return -1; /* FIXME this error is ignored */
4241 new_degraded = (int)ll;
4242 if (new_degraded != *degraded) {
4243 /* check each device to ensure it is still working */
4244 struct mdinfo *sd;
4245 for (sd = sra->devs ; sd ; sd = sd->next) {
4246 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4247 continue;
4248 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4249 char sbuf[100];
4250
4251 if (sysfs_get_str(sra, sd, "state",
4252 sbuf, sizeof(sbuf)) < 0 ||
4253 strstr(sbuf, "faulty") ||
4254 strstr(sbuf, "in_sync") == NULL) {
4255 /* this device is dead */
4256 sd->disk.state = (1<<MD_DISK_FAULTY);
4257 if (sd->disk.raid_disk >= 0 &&
4258 sources[sd->disk.raid_disk] >= 0) {
4259 close(sources[sd->disk.raid_disk]);
4260 sources[sd->disk.raid_disk] = -1;
4261 }
4262 }
4263 }
4264 }
4265 *degraded = new_degraded;
4266 }
4267 if (part) {
4268 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4269 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4270 } else {
4271 bsb.arraystart = __cpu_to_le64(offset * odata);
4272 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4273 }
4274 if (part)
4275 bsb.magic[15] = '2';
4276 for (i = 0; i < dests; i++)
4277 if (part)
4278 lseek64(destfd[i], destoffsets[i] +
4279 __le64_to_cpu(bsb.devstart2)*512, 0);
4280 else
4281 lseek64(destfd[i], destoffsets[i], 0);
4282
4283 rv = save_stripes(sources, offsets, disks, chunk, level, layout,
4284 dests, destfd, offset * 512 * odata,
4285 stripes * chunk * odata, buf);
4286
4287 if (rv)
4288 return rv;
4289 bsb.mtime = __cpu_to_le64(time(0));
4290 for (i = 0; i < dests; i++) {
4291 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4292
4293 bsb.sb_csum = bsb_csum((char*)&bsb,
4294 ((char*)&bsb.sb_csum)-((char*)&bsb));
4295 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4296 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4297 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4298
4299 rv = -1;
4300 if ((unsigned long long)lseek64(destfd[i],
4301 destoffsets[i] - 4096, 0) !=
4302 destoffsets[i] - 4096)
4303 break;
4304 if (write(destfd[i], &bsb, 512) != 512)
4305 break;
4306 if (destoffsets[i] > 4096) {
4307 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4308 destoffsets[i]+stripes*chunk*odata)
4309 break;
4310 if (write(destfd[i], &bsb, 512) != 512)
4311 break;
4312 }
4313 fsync(destfd[i]);
4314 rv = 0;
4315 }
4316
4317 return rv;
4318 }
4319
4320 /* in 2.6.30, the value reported by sync_completed can be
4321 * less that it should be by one stripe.
4322 * This only happens when reshape hits sync_max and pauses.
4323 * So allow wait_backup to either extent sync_max further
4324 * than strictly necessary, or return before the
4325 * sync has got quite as far as we would really like.
4326 * This is what 'blocks2' is for.
4327 * The various caller give appropriate values so that
4328 * every works.
4329 */
4330 /* FIXME return value is often ignored */
4331 static int forget_backup(int dests, int *destfd,
4332 unsigned long long *destoffsets,
4333 int part)
4334 {
4335 /*
4336 * Erase backup 'part' (which is 0 or 1)
4337 */
4338 int i;
4339 int rv;
4340
4341 if (part) {
4342 bsb.arraystart2 = __cpu_to_le64(0);
4343 bsb.length2 = __cpu_to_le64(0);
4344 } else {
4345 bsb.arraystart = __cpu_to_le64(0);
4346 bsb.length = __cpu_to_le64(0);
4347 }
4348 bsb.mtime = __cpu_to_le64(time(0));
4349 rv = 0;
4350 for (i = 0; i < dests; i++) {
4351 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4352 bsb.sb_csum = bsb_csum((char*)&bsb,
4353 ((char*)&bsb.sb_csum)-((char*)&bsb));
4354 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4355 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4356 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4357 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4358 destoffsets[i]-4096)
4359 rv = -1;
4360 if (rv == 0 && write(destfd[i], &bsb, 512) != 512)
4361 rv = -1;
4362 fsync(destfd[i]);
4363 }
4364 return rv;
4365 }
4366
4367 static void fail(char *msg)
4368 {
4369 int rv;
4370 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4371 rv |= (write(2, "\n", 1) != 1);
4372 exit(rv ? 1 : 2);
4373 }
4374
4375 static char *abuf, *bbuf;
4376 static unsigned long long abuflen;
4377 static void validate(int afd, int bfd, unsigned long long offset)
4378 {
4379 /* check that the data in the backup against the array.
4380 * This is only used for regression testing and should not
4381 * be used while the array is active
4382 */
4383 if (afd < 0)
4384 return;
4385 lseek64(bfd, offset - 4096, 0);
4386 if (read(bfd, &bsb2, 512) != 512)
4387 fail("cannot read bsb");
4388 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4389 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4390 fail("first csum bad");
4391 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4392 fail("magic is bad");
4393 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4394 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4395 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4396 fail("second csum bad");
4397
4398 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4399 fail("devstart is wrong");
4400
4401 if (bsb2.length) {
4402 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4403
4404 if (abuflen < len) {
4405 free(abuf);
4406 free(bbuf);
4407 abuflen = len;
4408 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4409 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4410 abuflen = 0;
4411 /* just stop validating on mem-alloc failure */
4412 return;
4413 }
4414 }
4415
4416 lseek64(bfd, offset, 0);
4417 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4418 //printf("len %llu\n", len);
4419 fail("read first backup failed");
4420 }
4421 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4422 if ((unsigned long long)read(afd, abuf, len) != len)
4423 fail("read first from array failed");
4424 if (memcmp(bbuf, abuf, len) != 0) {
4425 #if 0
4426 int i;
4427 printf("offset=%llu len=%llu\n",
4428 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4429 for (i=0; i<len; i++)
4430 if (bbuf[i] != abuf[i]) {
4431 printf("first diff byte %d\n", i);
4432 break;
4433 }
4434 #endif
4435 fail("data1 compare failed");
4436 }
4437 }
4438 if (bsb2.length2) {
4439 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4440
4441 if (abuflen < len) {
4442 free(abuf);
4443 free(bbuf);
4444 abuflen = len;
4445 abuf = xmalloc(abuflen);
4446 bbuf = xmalloc(abuflen);
4447 }
4448
4449 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4450 if ((unsigned long long)read(bfd, bbuf, len) != len)
4451 fail("read second backup failed");
4452 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4453 if ((unsigned long long)read(afd, abuf, len) != len)
4454 fail("read second from array failed");
4455 if (memcmp(bbuf, abuf, len) != 0)
4456 fail("data2 compare failed");
4457 }
4458 }
4459
4460 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4461 struct supertype *st, unsigned long blocks,
4462 int *fds, unsigned long long *offsets,
4463 int dests, int *destfd, unsigned long long *destoffsets)
4464 {
4465 /* Monitor a reshape where backup is being performed using
4466 * 'native' mechanism - either to a backup file, or
4467 * to some space in a spare.
4468 */
4469 char *buf;
4470 int degraded = -1;
4471 unsigned long long speed;
4472 unsigned long long suspend_point, array_size;
4473 unsigned long long backup_point, wait_point;
4474 unsigned long long reshape_completed;
4475 int done = 0;
4476 int increasing = reshape->after.data_disks >=
4477 reshape->before.data_disks;
4478 int part = 0; /* The next part of the backup area to fill. It
4479 * may already be full, so we need to check */
4480 int level = reshape->level;
4481 int layout = reshape->before.layout;
4482 int data = reshape->before.data_disks;
4483 int disks = reshape->before.data_disks + reshape->parity;
4484 int chunk = sra->array.chunk_size;
4485 struct mdinfo *sd;
4486 unsigned long stripes;
4487 int uuid[4];
4488 int frozen = 0;
4489
4490 /* set up the backup-super-block. This requires the
4491 * uuid from the array.
4492 */
4493 /* Find a superblock */
4494 for (sd = sra->devs; sd; sd = sd->next) {
4495 char *dn;
4496 int devfd;
4497 int ok;
4498 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4499 continue;
4500 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4501 devfd = dev_open(dn, O_RDONLY);
4502 if (devfd < 0)
4503 continue;
4504 ok = st->ss->load_super(st, devfd, NULL);
4505 close(devfd);
4506 if (ok == 0)
4507 break;
4508 }
4509 if (!sd) {
4510 pr_err("Cannot find a superblock\n");
4511 return 0;
4512 }
4513
4514 memset(&bsb, 0, 512);
4515 memcpy(bsb.magic, "md_backup_data-1", 16);
4516 st->ss->uuid_from_super(st, uuid);
4517 memcpy(bsb.set_uuid, uuid, 16);
4518 bsb.mtime = __cpu_to_le64(time(0));
4519 bsb.devstart2 = blocks;
4520
4521 stripes = blocks / (sra->array.chunk_size/512) /
4522 reshape->before.data_disks;
4523
4524 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4525 /* Don't start the 'reshape' */
4526 return 0;
4527 if (reshape->before.data_disks == reshape->after.data_disks) {
4528 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4529 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4530 }
4531
4532 if (increasing) {
4533 array_size = sra->component_size * reshape->after.data_disks;
4534 backup_point = sra->reshape_progress;
4535 suspend_point = 0;
4536 } else {
4537 array_size = sra->component_size * reshape->before.data_disks;
4538 backup_point = reshape->backup_blocks;
4539 suspend_point = array_size;
4540 }
4541
4542 while (!done) {
4543 int rv;
4544
4545 /* Want to return as soon the oldest backup slot can
4546 * be released as that allows us to start backing up
4547 * some more, providing suspend_point has been
4548 * advanced, which it should have.
4549 */
4550 if (increasing) {
4551 wait_point = array_size;
4552 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4553 wait_point = (__le64_to_cpu(bsb.arraystart) +
4554 __le64_to_cpu(bsb.length));
4555 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4556 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4557 __le64_to_cpu(bsb.length2));
4558 } else {
4559 wait_point = 0;
4560 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4561 wait_point = __le64_to_cpu(bsb.arraystart);
4562 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4563 wait_point = __le64_to_cpu(bsb.arraystart2);
4564 }
4565
4566 reshape_completed = sra->reshape_progress;
4567 rv = progress_reshape(sra, reshape,
4568 backup_point, wait_point,
4569 &suspend_point, &reshape_completed,
4570 &frozen);
4571 /* external metadata would need to ping_monitor here */
4572 sra->reshape_progress = reshape_completed;
4573
4574 /* Clear any backup region that is before 'here' */
4575 if (increasing) {
4576 if (__le64_to_cpu(bsb.length) > 0 &&
4577 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4578 __le64_to_cpu(bsb.length)))
4579 forget_backup(dests, destfd,
4580 destoffsets, 0);
4581 if (__le64_to_cpu(bsb.length2) > 0 &&
4582 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4583 __le64_to_cpu(bsb.length2)))
4584 forget_backup(dests, destfd,
4585 destoffsets, 1);
4586 } else {
4587 if (__le64_to_cpu(bsb.length) > 0 &&
4588 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4589 forget_backup(dests, destfd,
4590 destoffsets, 0);
4591 if (__le64_to_cpu(bsb.length2) > 0 &&
4592 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4593 forget_backup(dests, destfd,
4594 destoffsets, 1);
4595 }
4596 if (sigterm)
4597 rv = -2;
4598 if (rv < 0) {
4599 if (rv == -1)
4600 done = 1;
4601 break;
4602 }
4603 if (rv == 0 && increasing && !st->ss->external) {
4604 /* No longer need to monitor this reshape */
4605 sysfs_set_str(sra, NULL, "sync_max", "max");
4606 done = 1;
4607 break;
4608 }
4609
4610 while (rv) {
4611 unsigned long long offset;
4612 unsigned long actual_stripes;
4613 /* Need to backup some data.
4614 * If 'part' is not used and the desired
4615 * backup size is suspended, do a backup,
4616 * then consider the next part.
4617 */
4618 /* Check that 'part' is unused */
4619 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4620 break;
4621 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4622 break;
4623
4624 offset = backup_point / data;
4625 actual_stripes = stripes;
4626 if (increasing) {
4627 if (offset + actual_stripes * (chunk/512) >
4628 sra->component_size)
4629 actual_stripes = ((sra->component_size - offset)
4630 / (chunk/512));
4631 if (offset + actual_stripes * (chunk/512) >
4632 suspend_point/data)
4633 break;
4634 } else {
4635 if (offset < actual_stripes * (chunk/512))
4636 actual_stripes = offset / (chunk/512);
4637 offset -= actual_stripes * (chunk/512);
4638 if (offset < suspend_point/data)
4639 break;
4640 }
4641 if (actual_stripes == 0)
4642 break;
4643 grow_backup(sra, offset, actual_stripes, fds, offsets,
4644 disks, chunk, level, layout, dests, destfd,
4645 destoffsets, part, &degraded, buf);
4646 validate(afd, destfd[0], destoffsets[0]);
4647 /* record where 'part' is up to */
4648 part = !part;
4649 if (increasing)
4650 backup_point += actual_stripes * (chunk/512) * data;
4651 else
4652 backup_point -= actual_stripes * (chunk/512) * data;
4653 }
4654 }
4655
4656 /* FIXME maybe call progress_reshape one more time instead */
4657 /* remove any remaining suspension */
4658 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4659 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4660 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4661 sysfs_set_num(sra, NULL, "sync_min", 0);
4662
4663 if (reshape->before.data_disks == reshape->after.data_disks)
4664 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4665 free(buf);
4666 return done;
4667 }
4668
4669 /*
4670 * If any spare contains md_back_data-1 which is recent wrt mtime,
4671 * write that data into the array and update the super blocks with
4672 * the new reshape_progress
4673 */
4674 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist,
4675 int cnt, char *backup_file, int verbose)
4676 {
4677 int i, j;
4678 int old_disks;
4679 unsigned long long *offsets;
4680 unsigned long long nstripe, ostripe;
4681 int ndata, odata;
4682
4683 odata = info->array.raid_disks - info->delta_disks - 1;
4684 if (info->array.level == 6)
4685 odata--; /* number of data disks */
4686 ndata = info->array.raid_disks - 1;
4687 if (info->new_level == 6)
4688 ndata--;
4689
4690 old_disks = info->array.raid_disks - info->delta_disks;
4691
4692 if (info->delta_disks <= 0)
4693 /* Didn't grow, so the backup file must have
4694 * been used
4695 */
4696 old_disks = cnt;
4697 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4698 struct mdinfo dinfo;
4699 int fd;
4700 int bsbsize;
4701 char *devname, namebuf[20];
4702 unsigned long long lo, hi;
4703
4704 /* This was a spare and may have some saved data on it.
4705 * Load the superblock, find and load the
4706 * backup_super_block.
4707 * If either fail, go on to next device.
4708 * If the backup contains no new info, just return
4709 * else restore data and update all superblocks
4710 */
4711 if (i == old_disks-1) {
4712 fd = open(backup_file, O_RDONLY);
4713 if (fd<0) {
4714 pr_err("backup file %s inaccessible: %s\n",
4715 backup_file, strerror(errno));
4716 continue;
4717 }
4718 devname = backup_file;
4719 } else {
4720 fd = fdlist[i];
4721 if (fd < 0)
4722 continue;
4723 if (st->ss->load_super(st, fd, NULL))
4724 continue;
4725
4726 st->ss->getinfo_super(st, &dinfo, NULL);
4727 st->ss->free_super(st);
4728
4729 if (lseek64(fd,
4730 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4731 0) < 0) {
4732 pr_err("Cannot seek on device %d\n", i);
4733 continue; /* Cannot seek */
4734 }
4735 sprintf(namebuf, "device-%d", i);
4736 devname = namebuf;
4737 }
4738 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4739 if (verbose)
4740 pr_err("Cannot read from %s\n", devname);
4741 continue; /* Cannot read */
4742 }
4743 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4744 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4745 if (verbose)
4746 pr_err("No backup metadata on %s\n", devname);
4747 continue;
4748 }
4749 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4750 if (verbose)
4751 pr_err("Bad backup-metadata checksum on %s\n",
4752 devname);
4753 continue; /* bad checksum */
4754 }
4755 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4756 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4757 if (verbose)
4758 pr_err("Bad backup-metadata checksum2 on %s\n",
4759 devname);
4760 continue; /* Bad second checksum */
4761 }
4762 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4763 if (verbose)
4764 pr_err("Wrong uuid on backup-metadata on %s\n",
4765 devname);
4766 continue; /* Wrong uuid */
4767 }
4768
4769 /*
4770 * array utime and backup-mtime should be updated at
4771 * much the same time, but it seems that sometimes
4772 * they aren't... So allow considerable flexability in
4773 * matching, and allow this test to be overridden by
4774 * an environment variable.
4775 */
4776 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4777 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4778 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4779 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4780 (unsigned long)__le64_to_cpu(bsb.mtime),
4781 (unsigned long)info->array.utime);
4782 } else {
4783 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4784 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4785 continue; /* time stamp is too bad */
4786 }
4787 }
4788
4789 if (bsb.magic[15] == '1') {
4790 if (bsb.length == 0)
4791 continue;
4792 if (info->delta_disks >= 0) {
4793 /* reshape_progress is increasing */
4794 if (__le64_to_cpu(bsb.arraystart)
4795 + __le64_to_cpu(bsb.length)
4796 < info->reshape_progress) {
4797 nonew:
4798 if (verbose)
4799 pr_err("backup-metadata found on %s but is not needed\n", devname);
4800 continue; /* No new data here */
4801 }
4802 } else {
4803 /* reshape_progress is decreasing */
4804 if (__le64_to_cpu(bsb.arraystart) >=
4805 info->reshape_progress)
4806 goto nonew; /* No new data here */
4807 }
4808 } else {
4809 if (bsb.length == 0 && bsb.length2 == 0)
4810 continue;
4811 if (info->delta_disks >= 0) {
4812 /* reshape_progress is increasing */
4813 if ((__le64_to_cpu(bsb.arraystart)
4814 + __le64_to_cpu(bsb.length)
4815 < info->reshape_progress) &&
4816 (__le64_to_cpu(bsb.arraystart2)
4817 + __le64_to_cpu(bsb.length2)
4818 < info->reshape_progress))
4819 goto nonew; /* No new data here */
4820 } else {
4821 /* reshape_progress is decreasing */
4822 if (__le64_to_cpu(bsb.arraystart) >=
4823 info->reshape_progress &&
4824 __le64_to_cpu(bsb.arraystart2) >=
4825 info->reshape_progress)
4826 goto nonew; /* No new data here */
4827 }
4828 }
4829 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4830 second_fail:
4831 if (verbose)
4832 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4833 devname);
4834 continue; /* Cannot seek */
4835 }
4836 /* There should be a duplicate backup superblock 4k before here */
4837 if (lseek64(fd, -4096, 1) < 0 ||
4838 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4839 goto second_fail; /* Cannot find leading superblock */
4840 if (bsb.magic[15] == '1')
4841 bsbsize = offsetof(struct mdp_backup_super, pad1);
4842 else
4843 bsbsize = offsetof(struct mdp_backup_super, pad);
4844 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4845 goto second_fail; /* Cannot find leading superblock */
4846
4847 /* Now need the data offsets for all devices. */
4848 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4849 for(j=0; j<info->array.raid_disks; j++) {
4850 if (fdlist[j] < 0)
4851 continue;
4852 if (st->ss->load_super(st, fdlist[j], NULL))
4853 /* FIXME should be this be an error */
4854 continue;
4855 st->ss->getinfo_super(st, &dinfo, NULL);
4856 st->ss->free_super(st);
4857 offsets[j] = dinfo.data_offset * 512;
4858 }
4859 printf("%s: restoring critical section\n", Name);
4860
4861 if (restore_stripes(fdlist, offsets, info->array.raid_disks,
4862 info->new_chunk, info->new_level,
4863 info->new_layout, fd,
4864 __le64_to_cpu(bsb.devstart)*512,
4865 __le64_to_cpu(bsb.arraystart)*512,
4866 __le64_to_cpu(bsb.length)*512, NULL)) {
4867 /* didn't succeed, so giveup */
4868 if (verbose)
4869 pr_err("Error restoring backup from %s\n",
4870 devname);
4871 free(offsets);
4872 return 1;
4873 }
4874
4875 if (bsb.magic[15] == '2' &&
4876 restore_stripes(fdlist, offsets, info->array.raid_disks,
4877 info->new_chunk, info->new_level,
4878 info->new_layout, fd,
4879 __le64_to_cpu(bsb.devstart)*512 +
4880 __le64_to_cpu(bsb.devstart2)*512,
4881 __le64_to_cpu(bsb.arraystart2)*512,
4882 __le64_to_cpu(bsb.length2)*512, NULL)) {
4883 /* didn't succeed, so giveup */
4884 if (verbose)
4885 pr_err("Error restoring second backup from %s\n",
4886 devname);
4887 free(offsets);
4888 return 1;
4889 }
4890
4891 free(offsets);
4892
4893 /* Ok, so the data is restored. Let's update those superblocks. */
4894
4895 lo = hi = 0;
4896 if (bsb.length) {
4897 lo = __le64_to_cpu(bsb.arraystart);
4898 hi = lo + __le64_to_cpu(bsb.length);
4899 }
4900 if (bsb.magic[15] == '2' && bsb.length2) {
4901 unsigned long long lo1, hi1;
4902 lo1 = __le64_to_cpu(bsb.arraystart2);
4903 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4904 if (lo == hi) {
4905 lo = lo1;
4906 hi = hi1;
4907 } else if (lo < lo1)
4908 hi = hi1;
4909 else
4910 lo = lo1;
4911 }
4912 if (lo < hi && (info->reshape_progress < lo ||
4913 info->reshape_progress > hi))
4914 /* backup does not affect reshape_progress*/ ;
4915 else if (info->delta_disks >= 0) {
4916 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4917 __le64_to_cpu(bsb.length);
4918 if (bsb.magic[15] == '2') {
4919 unsigned long long p2;
4920
4921 p2 = __le64_to_cpu(bsb.arraystart2) +
4922 __le64_to_cpu(bsb.length2);
4923 if (p2 > info->reshape_progress)
4924 info->reshape_progress = p2;
4925 }
4926 } else {
4927 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4928 if (bsb.magic[15] == '2') {
4929 unsigned long long p2;
4930
4931 p2 = __le64_to_cpu(bsb.arraystart2);
4932 if (p2 < info->reshape_progress)
4933 info->reshape_progress = p2;
4934 }
4935 }
4936 for (j=0; j<info->array.raid_disks; j++) {
4937 if (fdlist[j] < 0)
4938 continue;
4939 if (st->ss->load_super(st, fdlist[j], NULL))
4940 continue;
4941 st->ss->getinfo_super(st, &dinfo, NULL);
4942 dinfo.reshape_progress = info->reshape_progress;
4943 st->ss->update_super(st, &dinfo, "_reshape_progress",
4944 NULL,0, 0, NULL);
4945 st->ss->store_super(st, fdlist[j]);
4946 st->ss->free_super(st);
4947 }
4948 return 0;
4949 }
4950 /* Didn't find any backup data, try to see if any
4951 * was needed.
4952 */
4953 if (info->delta_disks < 0) {
4954 /* When shrinking, the critical section is at the end.
4955 * So see if we are before the critical section.
4956 */
4957 unsigned long long first_block;
4958 nstripe = ostripe = 0;
4959 first_block = 0;
4960 while (ostripe >= nstripe) {
4961 ostripe += info->array.chunk_size / 512;
4962 first_block = ostripe * odata;
4963 nstripe = first_block / ndata / (info->new_chunk/512) *
4964 (info->new_chunk/512);
4965 }
4966
4967 if (info->reshape_progress >= first_block)
4968 return 0;
4969 }
4970 if (info->delta_disks > 0) {
4971 /* See if we are beyond the critical section. */
4972 unsigned long long last_block;
4973 nstripe = ostripe = 0;
4974 last_block = 0;
4975 while (nstripe >= ostripe) {
4976 nstripe += info->new_chunk / 512;
4977 last_block = nstripe * ndata;
4978 ostripe = last_block / odata / (info->array.chunk_size/512) *
4979 (info->array.chunk_size/512);
4980 }
4981
4982 if (info->reshape_progress >= last_block)
4983 return 0;
4984 }
4985 /* needed to recover critical section! */
4986 if (verbose)
4987 pr_err("Failed to find backup of critical section\n");
4988 return 1;
4989 }
4990
4991 int Grow_continue_command(char *devname, int fd,
4992 char *backup_file, int verbose)
4993 {
4994 int ret_val = 0;
4995 struct supertype *st = NULL;
4996 struct mdinfo *content = NULL;
4997 struct mdinfo array;
4998 char *subarray = NULL;
4999 struct mdinfo *cc = NULL;
5000 struct mdstat_ent *mdstat = NULL;
5001 int cfd = -1;
5002 int fd2;
5003
5004 dprintf("Grow continue from command line called for %s\n", devname);
5005
5006 st = super_by_fd(fd, &subarray);
5007 if (!st || !st->ss) {
5008 pr_err("Unable to determine metadata format for %s\n", devname);
5009 return 1;
5010 }
5011 dprintf("Grow continue is run for ");
5012 if (st->ss->external == 0) {
5013 int d;
5014 int cnt = 5;
5015 dprintf_cont("native array (%s)\n", devname);
5016 if (md_get_array_info(fd, &array.array) < 0) {
5017 pr_err("%s is not an active md array - aborting\n",
5018 devname);
5019 ret_val = 1;
5020 goto Grow_continue_command_exit;
5021 }
5022 content = &array;
5023 sysfs_init(content, fd, NULL);
5024 /* Need to load a superblock.
5025 * FIXME we should really get what we need from
5026 * sysfs
5027 */
5028 do {
5029 for (d = 0; d < MAX_DISKS; d++) {
5030 mdu_disk_info_t disk;
5031 char *dv;
5032 int err;
5033 disk.number = d;
5034 if (md_get_disk_info(fd, &disk) < 0)
5035 continue;
5036 if (disk.major == 0 && disk.minor == 0)
5037 continue;
5038 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5039 continue;
5040 dv = map_dev(disk.major, disk.minor, 1);
5041 if (!dv)
5042 continue;
5043 fd2 = dev_open(dv, O_RDONLY);
5044 if (fd2 < 0)
5045 continue;
5046 err = st->ss->load_super(st, fd2, NULL);
5047 close(fd2);
5048 if (err)
5049 continue;
5050 break;
5051 }
5052 if (d == MAX_DISKS) {
5053 pr_err("Unable to load metadata for %s\n",
5054 devname);
5055 ret_val = 1;
5056 goto Grow_continue_command_exit;
5057 }
5058 st->ss->getinfo_super(st, content, NULL);
5059 if (!content->reshape_active)
5060 sleep(3);
5061 else
5062 break;
5063 } while (cnt-- > 0);
5064 } else {
5065 char *container;
5066
5067 if (subarray) {
5068 dprintf_cont("subarray (%s)\n", subarray);
5069 container = st->container_devnm;
5070 cfd = open_dev_excl(st->container_devnm);
5071 } else {
5072 container = st->devnm;
5073 close(fd);
5074 cfd = open_dev_excl(st->devnm);
5075 dprintf_cont("container (%s)\n", container);
5076 fd = cfd;
5077 }
5078 if (cfd < 0) {
5079 pr_err("Unable to open container for %s\n", devname);
5080 ret_val = 1;
5081 goto Grow_continue_command_exit;
5082 }
5083
5084 /* find in container array under reshape
5085 */
5086 ret_val = st->ss->load_container(st, cfd, NULL);
5087 if (ret_val) {
5088 pr_err("Cannot read superblock for %s\n", devname);
5089 ret_val = 1;
5090 goto Grow_continue_command_exit;
5091 }
5092
5093 cc = st->ss->container_content(st, subarray);
5094 for (content = cc; content ; content = content->next) {
5095 char *array_name;
5096 int allow_reshape = 1;
5097
5098 if (content->reshape_active == 0)
5099 continue;
5100 /* The decision about array or container wide
5101 * reshape is taken in Grow_continue based
5102 * content->reshape_active state, therefore we
5103 * need to check_reshape based on
5104 * reshape_active and subarray name
5105 */
5106 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5107 allow_reshape = 0;
5108 if (content->reshape_active == CONTAINER_RESHAPE &&
5109 (content->array.state
5110 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5111 allow_reshape = 0;
5112
5113 if (!allow_reshape) {
5114 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5115 devname, container);
5116 ret_val = 1;
5117 goto Grow_continue_command_exit;
5118 }
5119
5120 array_name = strchr(content->text_version+1, '/')+1;
5121 mdstat = mdstat_by_subdev(array_name, container);
5122 if (!mdstat)
5123 continue;
5124 if (mdstat->active == 0) {
5125 pr_err("Skipping inactive array %s.\n",
5126 mdstat->devnm);
5127 free_mdstat(mdstat);
5128 mdstat = NULL;
5129 continue;
5130 }
5131 break;
5132 }
5133 if (!content) {
5134 pr_err("Unable to determine reshaped array for %s\n", devname);
5135 ret_val = 1;
5136 goto Grow_continue_command_exit;
5137 }
5138 fd2 = open_dev(mdstat->devnm);
5139 if (fd2 < 0) {
5140 pr_err("cannot open (%s)\n", mdstat->devnm);
5141 ret_val = 1;
5142 goto Grow_continue_command_exit;
5143 }
5144
5145 if (sysfs_init(content, fd2, mdstat->devnm)) {
5146 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5147 mdstat->devnm);
5148 ret_val = 1;
5149 close(fd2);
5150 goto Grow_continue_command_exit;
5151 }
5152
5153 close(fd2);
5154
5155 /* start mdmon in case it is not running
5156 */
5157 if (!mdmon_running(container))
5158 start_mdmon(container);
5159 ping_monitor(container);
5160
5161 if (mdmon_running(container))
5162 st->update_tail = &st->updates;
5163 else {
5164 pr_err("No mdmon found. Grow cannot continue.\n");
5165 ret_val = 1;
5166 goto Grow_continue_command_exit;
5167 }
5168 }
5169
5170 /* verify that array under reshape is started from
5171 * correct position
5172 */
5173 if (verify_reshape_position(content, content->array.level) < 0) {
5174 ret_val = 1;
5175 goto Grow_continue_command_exit;
5176 }
5177
5178 /* continue reshape
5179 */
5180 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5181
5182 Grow_continue_command_exit:
5183 if (cfd > -1)
5184 close(cfd);
5185 st->ss->free_super(st);
5186 free_mdstat(mdstat);
5187 sysfs_free(cc);
5188 free(subarray);
5189
5190 return ret_val;
5191 }
5192
5193 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5194 char *backup_file, int forked, int freeze_reshape)
5195 {
5196 int ret_val = 2;
5197
5198 if (!info->reshape_active)
5199 return ret_val;
5200
5201 if (st->ss->external) {
5202 int cfd = open_dev(st->container_devnm);
5203
5204 if (cfd < 0)
5205 return 1;
5206
5207 st->ss->load_container(st, cfd, st->container_devnm);
5208 close(cfd);
5209 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5210 st, info, 0, backup_file, 0,
5211 forked, 1 | info->reshape_active,
5212 freeze_reshape);
5213 } else
5214 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5215 NULL, INVALID_SECTORS, backup_file,
5216 0, forked, 1 | info->reshape_active,
5217 freeze_reshape);
5218
5219 return ret_val;
5220 }
5221
5222 char *make_backup(char *name)
5223 {
5224 char *base = "backup_file-";
5225 int len;
5226 char *fname;
5227
5228 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5229 fname = xmalloc(len);
5230 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5231 return fname;
5232 }
5233
5234 char *locate_backup(char *name)
5235 {
5236 char *fl = make_backup(name);
5237 struct stat stb;
5238
5239 if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode))
5240 return fl;
5241
5242 free(fl);
5243 return NULL;
5244 }