]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
mdadm: fixes some trivial typos in comments
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 dev_t rdev;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (md_get_array_info(fd, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
149 close(nfd);
150 free(st);
151 return 1;
152 }
153 /* now check out all the devices and make sure we can read the
154 * superblock */
155 for (d=0 ; d < info.array.raid_disks ; d++) {
156 mdu_disk_info_t disk;
157 char *dv;
158
159 st->ss->free_super(st);
160
161 disk.number = d;
162 if (md_get_disk_info(fd, &disk) < 0) {
163 pr_err("cannot get device detail for device %d\n",
164 d);
165 close(nfd);
166 free(st);
167 return 1;
168 }
169 dv = map_dev(disk.major, disk.minor, 1);
170 if (!dv) {
171 pr_err("cannot find device file for device %d\n",
172 d);
173 close(nfd);
174 free(st);
175 return 1;
176 }
177 fd2 = dev_open(dv, O_RDWR);
178 if (fd2 < 0) {
179 pr_err("cannot open device file %s\n", dv);
180 close(nfd);
181 free(st);
182 return 1;
183 }
184
185 if (st->ss->load_super(st, fd2, NULL)) {
186 pr_err("cannot find super block on %s\n", dv);
187 close(nfd);
188 close(fd2);
189 free(st);
190 return 1;
191 }
192 close(fd2);
193 }
194 /* Ok, looks good. Lets update the superblock and write it out to
195 * newdev.
196 */
197
198 info.disk.number = d;
199 info.disk.major = major(rdev);
200 info.disk.minor = minor(rdev);
201 info.disk.raid_disk = d;
202 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
203 st->ss->update_super(st, &info, "linear-grow-new", newdev,
204 0, 0, NULL);
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n",
208 newdev);
209 close(nfd);
210 return 1;
211 }
212 close(nfd);
213
214 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
215 pr_err("Cannot add new disk to this array\n");
216 return 1;
217 }
218 /* Well, that seems to have worked.
219 * Now go through and update all superblocks
220 */
221
222 if (md_get_array_info(fd, &info.array) < 0) {
223 pr_err("cannot get array info for %s\n", devname);
224 return 1;
225 }
226
227 nd = d;
228 for (d=0 ; d < info.array.raid_disks ; d++) {
229 mdu_disk_info_t disk;
230 char *dv;
231
232 disk.number = d;
233 if (md_get_disk_info(fd, &disk) < 0) {
234 pr_err("cannot get device detail for device %d\n",
235 d);
236 return 1;
237 }
238 dv = map_dev(disk.major, disk.minor, 1);
239 if (!dv) {
240 pr_err("cannot find device file for device %d\n",
241 d);
242 return 1;
243 }
244 fd2 = dev_open(dv, O_RDWR);
245 if (fd2 < 0) {
246 pr_err("cannot open device file %s\n", dv);
247 return 1;
248 }
249 if (st->ss->load_super(st, fd2, NULL)) {
250 pr_err("cannot find super block on %s\n", dv);
251 close(fd);
252 return 1;
253 }
254 info.array.raid_disks = nd+1;
255 info.array.nr_disks = nd+1;
256 info.array.active_disks = nd+1;
257 info.array.working_disks = nd+1;
258
259 st->ss->update_super(st, &info, "linear-grow-update", dv,
260 0, 0, NULL);
261
262 if (st->ss->store_super(st, fd2)) {
263 pr_err("Cannot store new superblock on %s\n", dv);
264 close(fd2);
265 return 1;
266 }
267 close(fd2);
268 }
269
270 return 0;
271 }
272
273 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
274 {
275 /*
276 * First check that array doesn't have a bitmap
277 * Then create the bitmap
278 * Then add it
279 *
280 * For internal bitmaps, we need to check the version,
281 * find all the active devices, and write the bitmap block
282 * to all devices
283 */
284 mdu_bitmap_file_t bmf;
285 mdu_array_info_t array;
286 struct supertype *st;
287 char *subarray = NULL;
288 int major = BITMAP_MAJOR_HI;
289 unsigned long long bitmapsize, array_size;
290 struct mdinfo *mdi;
291
292 /*
293 * We only ever get called if s->bitmap_file is != NULL, so this check
294 * is just here to quiet down static code checkers.
295 */
296 if (!s->bitmap_file)
297 return 1;
298
299 if (strcmp(s->bitmap_file, "clustered") == 0)
300 major = BITMAP_MAJOR_CLUSTERED;
301
302 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
303 if (errno == ENOMEM)
304 pr_err("Memory allocation failure.\n");
305 else
306 pr_err("bitmaps not supported by this kernel.\n");
307 return 1;
308 }
309 if (bmf.pathname[0]) {
310 if (strcmp(s->bitmap_file,"none") == 0) {
311 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
312 pr_err("failed to remove bitmap %s\n",
313 bmf.pathname);
314 return 1;
315 }
316 return 0;
317 }
318 pr_err("%s already has a bitmap (%s)\n",
319 devname, bmf.pathname);
320 return 1;
321 }
322 if (md_get_array_info(fd, &array) != 0) {
323 pr_err("cannot get array status for %s\n", devname);
324 return 1;
325 }
326 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
327 if (strcmp(s->bitmap_file, "none")==0) {
328 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
329 if (md_set_array_info(fd, &array) != 0) {
330 if (array.state & (1 << MD_SB_CLUSTERED))
331 pr_err("failed to remove clustered bitmap.\n");
332 else
333 pr_err("failed to remove internal bitmap.\n");
334 return 1;
335 }
336 return 0;
337 }
338 pr_err("bitmap already present on %s\n", devname);
339 return 1;
340 }
341
342 if (strcmp(s->bitmap_file, "none") == 0) {
343 pr_err("no bitmap found on %s\n", devname);
344 return 1;
345 }
346 if (array.level <= 0) {
347 pr_err("Bitmaps not meaningful with level %s\n",
348 map_num(pers, array.level)?:"of this array");
349 return 1;
350 }
351 bitmapsize = array.size;
352 bitmapsize <<= 1;
353 if (get_dev_size(fd, NULL, &array_size) &&
354 array_size > (0x7fffffffULL << 9)) {
355 /* Array is big enough that we cannot trust array.size
356 * try other approaches
357 */
358 bitmapsize = get_component_size(fd);
359 }
360 if (bitmapsize == 0) {
361 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
362 return 1;
363 }
364
365 if (array.level == 10) {
366 int ncopies;
367
368 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
369 bitmapsize = bitmapsize * array.raid_disks / ncopies;
370 }
371
372 st = super_by_fd(fd, &subarray);
373 if (!st) {
374 pr_err("Cannot understand version %d.%d\n",
375 array.major_version, array.minor_version);
376 return 1;
377 }
378 if (subarray) {
379 pr_err("Cannot add bitmaps to sub-arrays yet\n");
380 free(subarray);
381 free(st);
382 return 1;
383 }
384
385 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
386 if (mdi) {
387 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
388 pr_err("Cannot add bitmap to array with PPL\n");
389 free(mdi);
390 free(st);
391 return 1;
392 }
393 free(mdi);
394 }
395
396 if (strcmp(s->bitmap_file, "internal") == 0 ||
397 strcmp(s->bitmap_file, "clustered") == 0) {
398 int rv;
399 int d;
400 int offset_setable = 0;
401 if (st->ss->add_internal_bitmap == NULL) {
402 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
403 return 1;
404 }
405 st->nodes = c->nodes;
406 st->cluster_name = c->homecluster;
407 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
408 if (mdi)
409 offset_setable = 1;
410 for (d = 0; d < st->max_devs; d++) {
411 mdu_disk_info_t disk;
412 char *dv;
413 int fd2;
414
415 disk.number = d;
416 if (md_get_disk_info(fd, &disk) < 0)
417 continue;
418 if (disk.major == 0 && disk.minor == 0)
419 continue;
420 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
421 continue;
422 dv = map_dev(disk.major, disk.minor, 1);
423 if (!dv)
424 continue;
425 fd2 = dev_open(dv, O_RDWR);
426 if (fd2 < 0)
427 continue;
428 rv = st->ss->load_super(st, fd2, NULL);
429 if (!rv) {
430 rv = st->ss->add_internal_bitmap(
431 st, &s->bitmap_chunk, c->delay,
432 s->write_behind, bitmapsize,
433 offset_setable, major);
434 if (!rv) {
435 st->ss->write_bitmap(st, fd2,
436 NodeNumUpdate);
437 } else {
438 pr_err("failed to create internal bitmap - chunksize problem.\n");
439 }
440 } else {
441 pr_err("failed to load super-block.\n");
442 }
443 close(fd2);
444 if (rv)
445 return 1;
446 }
447 if (offset_setable) {
448 st->ss->getinfo_super(st, mdi, NULL);
449 if (sysfs_init(mdi, fd, NULL)) {
450 pr_err("failed to intialize sysfs.\n");
451 free(mdi);
452 }
453 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
454 mdi->bitmap_offset);
455 free(mdi);
456 } else {
457 if (strcmp(s->bitmap_file, "clustered") == 0)
458 array.state |= (1 << MD_SB_CLUSTERED);
459 array.state |= (1 << MD_SB_BITMAP_PRESENT);
460 rv = md_set_array_info(fd, &array);
461 }
462 if (rv < 0) {
463 if (errno == EBUSY)
464 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
465 pr_err("failed to set internal bitmap.\n");
466 return 1;
467 }
468 } else {
469 int uuid[4];
470 int bitmap_fd;
471 int d;
472 int max_devs = st->max_devs;
473
474 /* try to load a superblock */
475 for (d = 0; d < max_devs; d++) {
476 mdu_disk_info_t disk;
477 char *dv;
478 int fd2;
479 disk.number = d;
480 if (md_get_disk_info(fd, &disk) < 0)
481 continue;
482 if ((disk.major==0 && disk.minor == 0) ||
483 (disk.state & (1 << MD_DISK_REMOVED)))
484 continue;
485 dv = map_dev(disk.major, disk.minor, 1);
486 if (!dv)
487 continue;
488 fd2 = dev_open(dv, O_RDONLY);
489 if (fd2 >= 0) {
490 if (st->ss->load_super(st, fd2, NULL) == 0) {
491 close(fd2);
492 st->ss->uuid_from_super(st, uuid);
493 break;
494 }
495 close(fd2);
496 }
497 }
498 if (d == max_devs) {
499 pr_err("cannot find UUID for array!\n");
500 return 1;
501 }
502 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
503 s->bitmap_chunk, c->delay, s->write_behind,
504 bitmapsize, major)) {
505 return 1;
506 }
507 bitmap_fd = open(s->bitmap_file, O_RDWR);
508 if (bitmap_fd < 0) {
509 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
510 return 1;
511 }
512 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
513 int err = errno;
514 if (errno == EBUSY)
515 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
516 pr_err("Cannot set bitmap file for %s: %s\n",
517 devname, strerror(err));
518 return 1;
519 }
520 }
521
522 return 0;
523 }
524
525 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
526 {
527 struct supertype *st;
528 struct mdinfo *sra;
529 struct mdinfo *sd;
530 char *subarray = NULL;
531 int ret = 0;
532 char container_dev[PATH_MAX];
533 char buf[20];
534
535 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
536 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
537 pr_err("Operation not supported for consistency policy %s\n",
538 map_num(consistency_policies, s->consistency_policy));
539 return 1;
540 }
541
542 st = super_by_fd(fd, &subarray);
543 if (!st)
544 return 1;
545
546 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
547 GET_DEVS|GET_STATE);
548 if (!sra) {
549 ret = 1;
550 goto free_st;
551 }
552
553 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
554 !st->ss->write_init_ppl) {
555 pr_err("%s metadata does not support PPL\n", st->ss->name);
556 ret = 1;
557 goto free_info;
558 }
559
560 if (sra->array.level != 5) {
561 pr_err("Operation not supported for array level %d\n",
562 sra->array.level);
563 ret = 1;
564 goto free_info;
565 }
566
567 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
568 pr_err("Consistency policy is already %s\n",
569 map_num(consistency_policies, s->consistency_policy));
570 ret = 1;
571 goto free_info;
572 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
573 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
574 pr_err("Current consistency policy is %s, cannot change to %s\n",
575 map_num(consistency_policies, sra->consistency_policy),
576 map_num(consistency_policies, s->consistency_policy));
577 ret = 1;
578 goto free_info;
579 }
580
581 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
582 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
583 ret = 1;
584 goto free_info;
585 } else if (strcmp(buf, "reshape\n") == 0) {
586 pr_err("PPL cannot be enabled when reshape is in progress\n");
587 ret = 1;
588 goto free_info;
589 }
590 }
591
592 if (subarray) {
593 char *update;
594
595 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
596 update = "ppl";
597 else
598 update = "no-ppl";
599
600 sprintf(container_dev, "/dev/%s", st->container_devnm);
601
602 ret = Update_subarray(container_dev, subarray, update, NULL,
603 c->verbose);
604 if (ret)
605 goto free_info;
606 }
607
608 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
609 struct mdinfo info;
610
611 if (subarray) {
612 struct mdinfo *mdi;
613 int cfd;
614
615 cfd = open(container_dev, O_RDWR|O_EXCL);
616 if (cfd < 0) {
617 pr_err("Failed to open %s\n", container_dev);
618 ret = 1;
619 goto free_info;
620 }
621
622 ret = st->ss->load_container(st, cfd, st->container_devnm);
623 close(cfd);
624
625 if (ret) {
626 pr_err("Cannot read superblock for %s\n",
627 container_dev);
628 goto free_info;
629 }
630
631 mdi = st->ss->container_content(st, subarray);
632 info = *mdi;
633 free(mdi);
634 }
635
636 for (sd = sra->devs; sd; sd = sd->next) {
637 int dfd;
638 char *devpath;
639
640 if ((sd->disk.state & (1 << MD_DISK_SYNC)) == 0)
641 continue;
642
643 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
644 dfd = dev_open(devpath, O_RDWR);
645 if (dfd < 0) {
646 pr_err("Failed to open %s\n", devpath);
647 ret = 1;
648 goto free_info;
649 }
650
651 if (!subarray) {
652 ret = st->ss->load_super(st, dfd, NULL);
653 if (ret) {
654 pr_err("Failed to load super-block.\n");
655 close(dfd);
656 goto free_info;
657 }
658
659 ret = st->ss->update_super(st, sra, "ppl", devname,
660 c->verbose, 0, NULL);
661 if (ret) {
662 close(dfd);
663 st->ss->free_super(st);
664 goto free_info;
665 }
666 st->ss->getinfo_super(st, &info, NULL);
667 }
668
669 ret |= sysfs_set_num(sra, sd, "ppl_sector", info.ppl_sector);
670 ret |= sysfs_set_num(sra, sd, "ppl_size", info.ppl_size);
671
672 if (ret) {
673 pr_err("Failed to set PPL attributes for %s\n",
674 sd->sys_name);
675 close(dfd);
676 st->ss->free_super(st);
677 goto free_info;
678 }
679
680 ret = st->ss->write_init_ppl(st, &info, dfd);
681 if (ret)
682 pr_err("Failed to write PPL\n");
683
684 close(dfd);
685
686 if (!subarray)
687 st->ss->free_super(st);
688
689 if (ret)
690 goto free_info;
691 }
692 }
693
694 ret = sysfs_set_str(sra, NULL, "consistency_policy",
695 map_num(consistency_policies,
696 s->consistency_policy));
697 if (ret)
698 pr_err("Failed to change array consistency policy\n");
699
700 free_info:
701 sysfs_free(sra);
702 free_st:
703 free(st);
704 free(subarray);
705
706 return ret;
707 }
708
709 /*
710 * When reshaping an array we might need to backup some data.
711 * This is written to all spares with a 'super_block' describing it.
712 * The superblock goes 4K from the end of the used space on the
713 * device.
714 * It if written after the backup is complete.
715 * It has the following structure.
716 */
717
718 static struct mdp_backup_super {
719 char magic[16]; /* md_backup_data-1 or -2 */
720 __u8 set_uuid[16];
721 __u64 mtime;
722 /* start/sizes in 512byte sectors */
723 __u64 devstart; /* address on backup device/file of data */
724 __u64 arraystart;
725 __u64 length;
726 __u32 sb_csum; /* csum of preceeding bytes. */
727 __u32 pad1;
728 __u64 devstart2; /* offset in to data of second section */
729 __u64 arraystart2;
730 __u64 length2;
731 __u32 sb_csum2; /* csum of preceeding bytes. */
732 __u8 pad[512-68-32];
733 } __attribute__((aligned(512))) bsb, bsb2;
734
735 static __u32 bsb_csum(char *buf, int len)
736 {
737 int i;
738 int csum = 0;
739 for (i = 0; i < len; i++)
740 csum = (csum<<3) + buf[0];
741 return __cpu_to_le32(csum);
742 }
743
744 static int check_idle(struct supertype *st)
745 {
746 /* Check that all member arrays for this container, or the
747 * container of this array, are idle
748 */
749 char *container = (st->container_devnm[0]
750 ? st->container_devnm : st->devnm);
751 struct mdstat_ent *ent, *e;
752 int is_idle = 1;
753
754 ent = mdstat_read(0, 0);
755 for (e = ent ; e; e = e->next) {
756 if (!is_container_member(e, container))
757 continue;
758 if (e->percent >= 0) {
759 is_idle = 0;
760 break;
761 }
762 }
763 free_mdstat(ent);
764 return is_idle;
765 }
766
767 static int freeze_container(struct supertype *st)
768 {
769 char *container = (st->container_devnm[0]
770 ? st->container_devnm : st->devnm);
771
772 if (!check_idle(st))
773 return -1;
774
775 if (block_monitor(container, 1)) {
776 pr_err("failed to freeze container\n");
777 return -2;
778 }
779
780 return 1;
781 }
782
783 static void unfreeze_container(struct supertype *st)
784 {
785 char *container = (st->container_devnm[0]
786 ? st->container_devnm : st->devnm);
787
788 unblock_monitor(container, 1);
789 }
790
791 static int freeze(struct supertype *st)
792 {
793 /* Try to freeze resync/rebuild on this array/container.
794 * Return -1 if the array is busy,
795 * return -2 container cannot be frozen,
796 * return 0 if this kernel doesn't support 'frozen'
797 * return 1 if it worked.
798 */
799 if (st->ss->external)
800 return freeze_container(st);
801 else {
802 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
803 int err;
804 char buf[20];
805
806 if (!sra)
807 return -1;
808 /* Need to clear any 'read-auto' status */
809 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
810 strncmp(buf, "read-auto", 9) == 0)
811 sysfs_set_str(sra, NULL, "array_state", "clean");
812
813 err = sysfs_freeze_array(sra);
814 sysfs_free(sra);
815 return err;
816 }
817 }
818
819 static void unfreeze(struct supertype *st)
820 {
821 if (st->ss->external)
822 return unfreeze_container(st);
823 else {
824 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
825 char buf[20];
826
827 if (sra &&
828 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
829 strcmp(buf, "frozen\n") == 0)
830 sysfs_set_str(sra, NULL, "sync_action", "idle");
831 sysfs_free(sra);
832 }
833 }
834
835 static void wait_reshape(struct mdinfo *sra)
836 {
837 int fd = sysfs_get_fd(sra, NULL, "sync_action");
838 char action[20];
839
840 if (fd < 0)
841 return;
842
843 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
844 strncmp(action, "reshape", 7) == 0)
845 sysfs_wait(fd, NULL);
846 close(fd);
847 }
848
849 static int reshape_super(struct supertype *st, unsigned long long size,
850 int level, int layout, int chunksize, int raid_disks,
851 int delta_disks, char *backup_file, char *dev,
852 int direction, int verbose)
853 {
854 /* nothing extra to check in the native case */
855 if (!st->ss->external)
856 return 0;
857 if (!st->ss->reshape_super ||
858 !st->ss->manage_reshape) {
859 pr_err("%s metadata does not support reshape\n",
860 st->ss->name);
861 return 1;
862 }
863
864 return st->ss->reshape_super(st, size, level, layout, chunksize,
865 raid_disks, delta_disks, backup_file, dev,
866 direction, verbose);
867 }
868
869 static void sync_metadata(struct supertype *st)
870 {
871 if (st->ss->external) {
872 if (st->update_tail) {
873 flush_metadata_updates(st);
874 st->update_tail = &st->updates;
875 } else
876 st->ss->sync_metadata(st);
877 }
878 }
879
880 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
881 {
882 /* when dealing with external metadata subarrays we need to be
883 * prepared to handle EAGAIN. The kernel may need to wait for
884 * mdmon to mark the array active so the kernel can handle
885 * allocations/writeback when preparing the reshape action
886 * (md_allow_write()). We temporarily disable safe_mode_delay
887 * to close a race with the array_state going clean before the
888 * next write to raid_disks / stripe_cache_size
889 */
890 char safe[50];
891 int rc;
892
893 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
894 if (!container ||
895 (strcmp(name, "raid_disks") != 0 &&
896 strcmp(name, "stripe_cache_size") != 0))
897 return sysfs_set_num(sra, NULL, name, n);
898
899 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
900 if (rc <= 0)
901 return -1;
902 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
903 rc = sysfs_set_num(sra, NULL, name, n);
904 if (rc < 0 && errno == EAGAIN) {
905 ping_monitor(container);
906 /* if we get EAGAIN here then the monitor is not active
907 * so stop trying
908 */
909 rc = sysfs_set_num(sra, NULL, name, n);
910 }
911 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
912 return rc;
913 }
914
915 int start_reshape(struct mdinfo *sra, int already_running,
916 int before_data_disks, int data_disks)
917 {
918 int err;
919 unsigned long long sync_max_to_set;
920
921 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
922 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
923 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
924 sra->reshape_progress);
925 if (before_data_disks <= data_disks)
926 sync_max_to_set = sra->reshape_progress / data_disks;
927 else
928 sync_max_to_set = (sra->component_size * data_disks
929 - sra->reshape_progress) / data_disks;
930 if (!already_running)
931 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
932 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
933 if (!already_running && err == 0) {
934 int cnt = 5;
935 do {
936 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
937 if (err)
938 sleep(1);
939 } while (err && errno == EBUSY && cnt-- > 0);
940 }
941 return err;
942 }
943
944 void abort_reshape(struct mdinfo *sra)
945 {
946 sysfs_set_str(sra, NULL, "sync_action", "idle");
947 /*
948 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
949 * suspend_hi to decrease as well as increase.")
950 * you could only increase suspend_{lo,hi} unless the region they
951 * covered was empty. So to reset to 0, you need to push suspend_lo
952 * up past suspend_hi first. So to maximize the chance of mdadm
953 * working on all kernels, we want to keep doing that.
954 */
955 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
956 sysfs_set_num(sra, NULL, "suspend_hi", 0);
957 sysfs_set_num(sra, NULL, "suspend_lo", 0);
958 sysfs_set_num(sra, NULL, "sync_min", 0);
959 // It isn't safe to reset sync_max as we aren't monitoring.
960 // Array really should be stopped at this point.
961 }
962
963 int remove_disks_for_takeover(struct supertype *st,
964 struct mdinfo *sra,
965 int layout)
966 {
967 int nr_of_copies;
968 struct mdinfo *remaining;
969 int slot;
970
971 if (st->ss->external) {
972 int rv = 0;
973 struct mdinfo *arrays = st->ss->container_content(st, NULL);
974 /*
975 * containter_content returns list of arrays in container
976 * If arrays->next is not NULL it means that there are
977 * 2 arrays in container and operation should be blocked
978 */
979 if (arrays) {
980 if (arrays->next)
981 rv = 1;
982 sysfs_free(arrays);
983 if (rv) {
984 pr_err("Error. Cannot perform operation on /dev/%s\n", st->devnm);
985 pr_err("For this operation it MUST be single array in container\n");
986 return rv;
987 }
988 }
989 }
990
991 if (sra->array.level == 10)
992 nr_of_copies = layout & 0xff;
993 else if (sra->array.level == 1)
994 nr_of_copies = sra->array.raid_disks;
995 else
996 return 1;
997
998 remaining = sra->devs;
999 sra->devs = NULL;
1000 /* for each 'copy', select one device and remove from the list. */
1001 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
1002 struct mdinfo **diskp;
1003 int found = 0;
1004
1005 /* Find a working device to keep */
1006 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1007 struct mdinfo *disk = *diskp;
1008
1009 if (disk->disk.raid_disk < slot)
1010 continue;
1011 if (disk->disk.raid_disk >= slot + nr_of_copies)
1012 continue;
1013 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1014 continue;
1015 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1016 continue;
1017 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1018 continue;
1019
1020 /* We have found a good disk to use! */
1021 *diskp = disk->next;
1022 disk->next = sra->devs;
1023 sra->devs = disk;
1024 found = 1;
1025 break;
1026 }
1027 if (!found)
1028 break;
1029 }
1030
1031 if (slot < sra->array.raid_disks) {
1032 /* didn't find all slots */
1033 struct mdinfo **e;
1034 e = &remaining;
1035 while (*e)
1036 e = &(*e)->next;
1037 *e = sra->devs;
1038 sra->devs = remaining;
1039 return 1;
1040 }
1041
1042 /* Remove all 'remaining' devices from the array */
1043 while (remaining) {
1044 struct mdinfo *sd = remaining;
1045 remaining = sd->next;
1046
1047 sysfs_set_str(sra, sd, "state", "faulty");
1048 sysfs_set_str(sra, sd, "slot", "none");
1049 /* for external metadata disks should be removed in mdmon */
1050 if (!st->ss->external)
1051 sysfs_set_str(sra, sd, "state", "remove");
1052 sd->disk.state |= (1<<MD_DISK_REMOVED);
1053 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1054 sd->next = sra->devs;
1055 sra->devs = sd;
1056 }
1057 return 0;
1058 }
1059
1060 void reshape_free_fdlist(int *fdlist,
1061 unsigned long long *offsets,
1062 int size)
1063 {
1064 int i;
1065
1066 for (i = 0; i < size; i++)
1067 if (fdlist[i] >= 0)
1068 close(fdlist[i]);
1069
1070 free(fdlist);
1071 free(offsets);
1072 }
1073
1074 int reshape_prepare_fdlist(char *devname,
1075 struct mdinfo *sra,
1076 int raid_disks,
1077 int nrdisks,
1078 unsigned long blocks,
1079 char *backup_file,
1080 int *fdlist,
1081 unsigned long long *offsets)
1082 {
1083 int d = 0;
1084 struct mdinfo *sd;
1085
1086 enable_fds(nrdisks);
1087 for (d = 0; d <= nrdisks; d++)
1088 fdlist[d] = -1;
1089 d = raid_disks;
1090 for (sd = sra->devs; sd; sd = sd->next) {
1091 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1092 continue;
1093 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1094 sd->disk.raid_disk < raid_disks) {
1095 char *dn = map_dev(sd->disk.major,
1096 sd->disk.minor, 1);
1097 fdlist[sd->disk.raid_disk]
1098 = dev_open(dn, O_RDONLY);
1099 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1100 if (fdlist[sd->disk.raid_disk] < 0) {
1101 pr_err("%s: cannot open component %s\n",
1102 devname, dn ? dn : "-unknown-");
1103 d = -1;
1104 goto release;
1105 }
1106 } else if (backup_file == NULL) {
1107 /* spare */
1108 char *dn = map_dev(sd->disk.major,
1109 sd->disk.minor, 1);
1110 fdlist[d] = dev_open(dn, O_RDWR);
1111 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1112 if (fdlist[d] < 0) {
1113 pr_err("%s: cannot open component %s\n",
1114 devname, dn ? dn : "-unknown-");
1115 d = -1;
1116 goto release;
1117 }
1118 d++;
1119 }
1120 }
1121 release:
1122 return d;
1123 }
1124
1125 int reshape_open_backup_file(char *backup_file,
1126 int fd,
1127 char *devname,
1128 long blocks,
1129 int *fdlist,
1130 unsigned long long *offsets,
1131 char *sys_name,
1132 int restart)
1133 {
1134 /* Return 1 on success, 0 on any form of failure */
1135 /* need to check backup file is large enough */
1136 char buf[512];
1137 struct stat stb;
1138 unsigned int dev;
1139 int i;
1140
1141 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1142 S_IRUSR | S_IWUSR);
1143 *offsets = 8 * 512;
1144 if (*fdlist < 0) {
1145 pr_err("%s: cannot create backup file %s: %s\n",
1146 devname, backup_file, strerror(errno));
1147 return 0;
1148 }
1149 /* Guard against backup file being on array device.
1150 * If array is partitioned or if LVM etc is in the
1151 * way this will not notice, but it is better than
1152 * nothing.
1153 */
1154 fstat(*fdlist, &stb);
1155 dev = stb.st_dev;
1156 fstat(fd, &stb);
1157 if (stb.st_rdev == dev) {
1158 pr_err("backup file must NOT be on the array being reshaped.\n");
1159 close(*fdlist);
1160 return 0;
1161 }
1162
1163 memset(buf, 0, 512);
1164 for (i=0; i < blocks + 8 ; i++) {
1165 if (write(*fdlist, buf, 512) != 512) {
1166 pr_err("%s: cannot create backup file %s: %s\n",
1167 devname, backup_file, strerror(errno));
1168 return 0;
1169 }
1170 }
1171 if (fsync(*fdlist) != 0) {
1172 pr_err("%s: cannot create backup file %s: %s\n",
1173 devname, backup_file, strerror(errno));
1174 return 0;
1175 }
1176
1177 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1178 char *bu = make_backup(sys_name);
1179 if (symlink(backup_file, bu))
1180 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1181 strerror(errno));
1182 free(bu);
1183 }
1184
1185 return 1;
1186 }
1187
1188 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1189 unsigned int ndata, unsigned int odata)
1190 {
1191 unsigned long a, b, blocks;
1192 /* So how much do we need to backup.
1193 * We need an amount of data which is both a whole number of
1194 * old stripes and a whole number of new stripes.
1195 * So LCM for (chunksize*datadisks).
1196 */
1197 a = (ochunk/512) * odata;
1198 b = (nchunk/512) * ndata;
1199 /* Find GCD */
1200 a = GCD(a, b);
1201 /* LCM == product / GCD */
1202 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
1203
1204 return blocks;
1205 }
1206
1207 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1208 {
1209 /* Based on the current array state in info->array and
1210 * the changes in info->new_* etc, determine:
1211 * - whether the change is possible
1212 * - Intermediate level/raid_disks/layout
1213 * - whether a restriping reshape is needed
1214 * - number of sectors in minimum change unit. This
1215 * will cover a whole number of stripes in 'before' and
1216 * 'after'.
1217 *
1218 * Return message if the change should be rejected
1219 * NULL if the change can be achieved
1220 *
1221 * This can be called as part of starting a reshape, or
1222 * when assembling an array that is undergoing reshape.
1223 */
1224 int near, far, offset, copies;
1225 int new_disks;
1226 int old_chunk, new_chunk;
1227 /* delta_parity records change in number of devices
1228 * caused by level change
1229 */
1230 int delta_parity = 0;
1231
1232 memset(re, 0, sizeof(*re));
1233
1234 /* If a new level not explicitly given, we assume no-change */
1235 if (info->new_level == UnSet)
1236 info->new_level = info->array.level;
1237
1238 if (info->new_chunk)
1239 switch (info->new_level) {
1240 case 0:
1241 case 4:
1242 case 5:
1243 case 6:
1244 case 10:
1245 /* chunk size is meaningful, must divide component_size
1246 * evenly
1247 */
1248 if (info->component_size % (info->new_chunk/512)) {
1249 unsigned long long shrink = info->component_size;
1250 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1251 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1252 info->new_chunk/1024, info->component_size/2);
1253 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1254 devname, shrink/2);
1255 pr_err("will shrink the array so the given chunk size would work.\n");
1256 return "";
1257 }
1258 break;
1259 default:
1260 return "chunk size not meaningful for this level";
1261 }
1262 else
1263 info->new_chunk = info->array.chunk_size;
1264
1265 switch (info->array.level) {
1266 default:
1267 return "No reshape is possibly for this RAID level";
1268 case LEVEL_LINEAR:
1269 if (info->delta_disks != UnSet)
1270 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1271 else
1272 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1273 case 1:
1274 /* RAID1 can convert to RAID1 with different disks, or
1275 * raid5 with 2 disks, or
1276 * raid0 with 1 disk
1277 */
1278 if (info->new_level > 1 && (info->component_size & 7))
1279 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1280 if (info->new_level == 0) {
1281 if (info->delta_disks != UnSet &&
1282 info->delta_disks != 0)
1283 return "Cannot change number of disks with RAID1->RAID0 conversion";
1284 re->level = 0;
1285 re->before.data_disks = 1;
1286 re->after.data_disks = 1;
1287 return NULL;
1288 }
1289 if (info->new_level == 1) {
1290 if (info->delta_disks == UnSet)
1291 /* Don't know what to do */
1292 return "no change requested for Growing RAID1";
1293 re->level = 1;
1294 return NULL;
1295 }
1296 if (info->array.raid_disks != 2 && info->new_level == 5)
1297 return "Can only convert a 2-device array to RAID5";
1298 if (info->array.raid_disks == 2 && info->new_level == 5) {
1299 re->level = 5;
1300 re->before.data_disks = 1;
1301 if (info->delta_disks != UnSet &&
1302 info->delta_disks != 0)
1303 re->after.data_disks = 1 + info->delta_disks;
1304 else
1305 re->after.data_disks = 1;
1306 if (re->after.data_disks < 1)
1307 return "Number of disks too small for RAID5";
1308
1309 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1310 info->array.chunk_size = 65536;
1311 break;
1312 }
1313 /* Could do some multi-stage conversions, but leave that to
1314 * later.
1315 */
1316 return "Impossibly level change request for RAID1";
1317
1318 case 10:
1319 /* RAID10 can be converted from near mode to
1320 * RAID0 by removing some devices.
1321 * It can also be reshaped if the kernel supports
1322 * new_data_offset.
1323 */
1324 switch (info->new_level) {
1325 case 0:
1326 if ((info->array.layout & ~0xff) != 0x100)
1327 return "Cannot Grow RAID10 with far/offset layout";
1328 /* number of devices must be multiple of number of copies */
1329 if (info->array.raid_disks % (info->array.layout & 0xff))
1330 return "RAID10 layout too complex for Grow operation";
1331
1332 new_disks = (info->array.raid_disks
1333 / (info->array.layout & 0xff));
1334 if (info->delta_disks == UnSet)
1335 info->delta_disks = (new_disks
1336 - info->array.raid_disks);
1337
1338 if (info->delta_disks != new_disks - info->array.raid_disks)
1339 return "New number of raid-devices impossible for RAID10";
1340 if (info->new_chunk &&
1341 info->new_chunk != info->array.chunk_size)
1342 return "Cannot change chunk-size with RAID10 Grow";
1343
1344 /* looks good */
1345 re->level = 0;
1346 re->before.data_disks = new_disks;
1347 re->after.data_disks = re->before.data_disks;
1348 return NULL;
1349
1350 case 10:
1351 near = info->array.layout & 0xff;
1352 far = (info->array.layout >> 8) & 0xff;
1353 offset = info->array.layout & 0x10000;
1354 if (far > 1 && !offset)
1355 return "Cannot reshape RAID10 in far-mode";
1356 copies = near * far;
1357
1358 old_chunk = info->array.chunk_size * far;
1359
1360 if (info->new_layout == UnSet)
1361 info->new_layout = info->array.layout;
1362 else {
1363 near = info->new_layout & 0xff;
1364 far = (info->new_layout >> 8) & 0xff;
1365 offset = info->new_layout & 0x10000;
1366 if (far > 1 && !offset)
1367 return "Cannot reshape RAID10 to far-mode";
1368 if (near * far != copies)
1369 return "Cannot change number of copies when reshaping RAID10";
1370 }
1371 if (info->delta_disks == UnSet)
1372 info->delta_disks = 0;
1373 new_disks = (info->array.raid_disks +
1374 info->delta_disks);
1375
1376 new_chunk = info->new_chunk * far;
1377
1378 re->level = 10;
1379 re->before.layout = info->array.layout;
1380 re->before.data_disks = info->array.raid_disks;
1381 re->after.layout = info->new_layout;
1382 re->after.data_disks = new_disks;
1383 /* For RAID10 we don't do backup but do allow reshape,
1384 * so set backup_blocks to INVALID_SECTORS rather than
1385 * zero.
1386 * And there is no need to synchronise stripes on both
1387 * 'old' and 'new'. So the important
1388 * number is the minimum data_offset difference
1389 * which is the larger of (offset copies * chunk).
1390 */
1391 re->backup_blocks = INVALID_SECTORS;
1392 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1393 if (new_disks < re->before.data_disks &&
1394 info->space_after < re->min_offset_change)
1395 /* Reduce component size by one chunk */
1396 re->new_size = (info->component_size -
1397 re->min_offset_change);
1398 else
1399 re->new_size = info->component_size;
1400 re->new_size = re->new_size * new_disks / copies;
1401 return NULL;
1402
1403 default:
1404 return "RAID10 can only be changed to RAID0";
1405 }
1406 case 0:
1407 /* RAID0 can be converted to RAID10, or to RAID456 */
1408 if (info->new_level == 10) {
1409 if (info->new_layout == UnSet &&
1410 info->delta_disks == UnSet) {
1411 /* Assume near=2 layout */
1412 info->new_layout = 0x102;
1413 info->delta_disks = info->array.raid_disks;
1414 }
1415 if (info->new_layout == UnSet) {
1416 int copies = 1 + (info->delta_disks
1417 / info->array.raid_disks);
1418 if (info->array.raid_disks * (copies-1) !=
1419 info->delta_disks)
1420 return "Impossible number of devices for RAID0->RAID10";
1421 info->new_layout = 0x100 + copies;
1422 }
1423 if (info->delta_disks == UnSet) {
1424 int copies = info->new_layout & 0xff;
1425 if (info->new_layout != 0x100 + copies)
1426 return "New layout impossible for RAID0->RAID10";;
1427 info->delta_disks = (copies - 1) *
1428 info->array.raid_disks;
1429 }
1430 if (info->new_chunk &&
1431 info->new_chunk != info->array.chunk_size)
1432 return "Cannot change chunk-size with RAID0->RAID10";
1433 /* looks good */
1434 re->level = 10;
1435 re->before.data_disks = (info->array.raid_disks +
1436 info->delta_disks);
1437 re->after.data_disks = re->before.data_disks;
1438 re->before.layout = info->new_layout;
1439 return NULL;
1440 }
1441
1442 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1443 * a raid4 style layout of the final level.
1444 */
1445 switch (info->new_level) {
1446 case 4:
1447 delta_parity = 1;
1448 case 0:
1449 re->level = 4;
1450 re->before.layout = 0;
1451 break;
1452 case 5:
1453 delta_parity = 1;
1454 re->level = 5;
1455 re->before.layout = ALGORITHM_PARITY_N;
1456 if (info->new_layout == UnSet)
1457 info->new_layout = map_name(r5layout, "default");
1458 break;
1459 case 6:
1460 delta_parity = 2;
1461 re->level = 6;
1462 re->before.layout = ALGORITHM_PARITY_N;
1463 if (info->new_layout == UnSet)
1464 info->new_layout = map_name(r6layout, "default");
1465 break;
1466 default:
1467 return "Impossible level change requested";
1468 }
1469 re->before.data_disks = info->array.raid_disks;
1470 /* determining 'after' layout happens outside this 'switch' */
1471 break;
1472
1473 case 4:
1474 info->array.layout = ALGORITHM_PARITY_N;
1475 case 5:
1476 switch (info->new_level) {
1477 case 0:
1478 delta_parity = -1;
1479 case 4:
1480 re->level = info->array.level;
1481 re->before.data_disks = info->array.raid_disks - 1;
1482 re->before.layout = info->array.layout;
1483 break;
1484 case 5:
1485 re->level = 5;
1486 re->before.data_disks = info->array.raid_disks - 1;
1487 re->before.layout = info->array.layout;
1488 break;
1489 case 6:
1490 delta_parity = 1;
1491 re->level = 6;
1492 re->before.data_disks = info->array.raid_disks - 1;
1493 switch (info->array.layout) {
1494 case ALGORITHM_LEFT_ASYMMETRIC:
1495 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1496 break;
1497 case ALGORITHM_RIGHT_ASYMMETRIC:
1498 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1499 break;
1500 case ALGORITHM_LEFT_SYMMETRIC:
1501 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1502 break;
1503 case ALGORITHM_RIGHT_SYMMETRIC:
1504 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1505 break;
1506 case ALGORITHM_PARITY_0:
1507 re->before.layout = ALGORITHM_PARITY_0_6;
1508 break;
1509 case ALGORITHM_PARITY_N:
1510 re->before.layout = ALGORITHM_PARITY_N_6;
1511 break;
1512 default:
1513 return "Cannot convert an array with this layout";
1514 }
1515 break;
1516 case 1:
1517 if (info->array.raid_disks != 2)
1518 return "Can only convert a 2-device array to RAID1";
1519 if (info->delta_disks != UnSet &&
1520 info->delta_disks != 0)
1521 return "Cannot set raid_disk when converting RAID5->RAID1";
1522 re->level = 1;
1523 info->new_chunk = 0;
1524 return NULL;
1525 default:
1526 return "Impossible level change requested";
1527 }
1528 break;
1529 case 6:
1530 switch (info->new_level) {
1531 case 4:
1532 case 5:
1533 delta_parity = -1;
1534 case 6:
1535 re->level = 6;
1536 re->before.data_disks = info->array.raid_disks - 2;
1537 re->before.layout = info->array.layout;
1538 break;
1539 default:
1540 return "Impossible level change requested";
1541 }
1542 break;
1543 }
1544
1545 /* If we reached here then it looks like a re-stripe is
1546 * happening. We have determined the intermediate level
1547 * and initial raid_disks/layout and stored these in 're'.
1548 *
1549 * We need to deduce the final layout that can be atomically
1550 * converted to the end state.
1551 */
1552 switch (info->new_level) {
1553 case 0:
1554 /* We can only get to RAID0 from RAID4 or RAID5
1555 * with appropriate layout and one extra device
1556 */
1557 if (re->level != 4 && re->level != 5)
1558 return "Cannot covert to RAID0 from this level";
1559
1560 switch (re->level) {
1561 case 4:
1562 re->before.layout = 0;
1563 re->after.layout = 0;
1564 break;
1565 case 5:
1566 re->after.layout = ALGORITHM_PARITY_N;
1567 break;
1568 }
1569 break;
1570
1571 case 4:
1572 /* We can only get to RAID4 from RAID5 */
1573 if (re->level != 4 && re->level != 5)
1574 return "Cannot convert to RAID4 from this level";
1575
1576 switch (re->level) {
1577 case 4:
1578 re->after.layout = 0;
1579 break;
1580 case 5:
1581 re->after.layout = ALGORITHM_PARITY_N;
1582 break;
1583 }
1584 break;
1585
1586 case 5:
1587 /* We get to RAID5 from RAID5 or RAID6 */
1588 if (re->level != 5 && re->level != 6)
1589 return "Cannot convert to RAID5 from this level";
1590
1591 switch (re->level) {
1592 case 5:
1593 if (info->new_layout == UnSet)
1594 re->after.layout = re->before.layout;
1595 else
1596 re->after.layout = info->new_layout;
1597 break;
1598 case 6:
1599 if (info->new_layout == UnSet)
1600 info->new_layout = re->before.layout;
1601
1602 /* after.layout needs to be raid6 version of new_layout */
1603 if (info->new_layout == ALGORITHM_PARITY_N)
1604 re->after.layout = ALGORITHM_PARITY_N;
1605 else {
1606 char layout[40];
1607 char *ls = map_num(r5layout, info->new_layout);
1608 int l;
1609 if (ls) {
1610 /* Current RAID6 layout has a RAID5
1611 * equivalent - good
1612 */
1613 strcat(strcpy(layout, ls), "-6");
1614 l = map_name(r6layout, layout);
1615 if (l == UnSet)
1616 return "Cannot find RAID6 layout to convert to";
1617 } else {
1618 /* Current RAID6 has no equivalent.
1619 * If it is already a '-6' layout we
1620 * can leave it unchanged, else we must
1621 * fail
1622 */
1623 ls = map_num(r6layout, info->new_layout);
1624 if (!ls ||
1625 strcmp(ls+strlen(ls)-2, "-6") != 0)
1626 return "Please specify new layout";
1627 l = info->new_layout;
1628 }
1629 re->after.layout = l;
1630 }
1631 }
1632 break;
1633
1634 case 6:
1635 /* We must already be at level 6 */
1636 if (re->level != 6)
1637 return "Impossible level change";
1638 if (info->new_layout == UnSet)
1639 re->after.layout = info->array.layout;
1640 else
1641 re->after.layout = info->new_layout;
1642 break;
1643 default:
1644 return "Impossible level change requested";
1645 }
1646 if (info->delta_disks == UnSet)
1647 info->delta_disks = delta_parity;
1648
1649 re->after.data_disks =
1650 (re->before.data_disks + info->delta_disks - delta_parity);
1651
1652 switch (re->level) {
1653 case 6:
1654 re->parity = 2;
1655 break;
1656 case 4:
1657 case 5:
1658 re->parity = 1;
1659 break;
1660 default:
1661 re->parity = 0;
1662 break;
1663 }
1664 /* So we have a restripe operation, we need to calculate the number
1665 * of blocks per reshape operation.
1666 */
1667 re->new_size = info->component_size * re->before.data_disks;
1668 if (info->new_chunk == 0)
1669 info->new_chunk = info->array.chunk_size;
1670 if (re->after.data_disks == re->before.data_disks &&
1671 re->after.layout == re->before.layout &&
1672 info->new_chunk == info->array.chunk_size) {
1673 /* Nothing to change, can change level immediately. */
1674 re->level = info->new_level;
1675 re->backup_blocks = 0;
1676 return NULL;
1677 }
1678 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1679 /* chunk and layout changes make no difference */
1680 re->level = info->new_level;
1681 re->backup_blocks = 0;
1682 return NULL;
1683 }
1684
1685 if (re->after.data_disks == re->before.data_disks &&
1686 get_linux_version() < 2006032)
1687 return "in-place reshape is not safe before 2.6.32 - sorry.";
1688
1689 if (re->after.data_disks < re->before.data_disks &&
1690 get_linux_version() < 2006030)
1691 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1692
1693 re->backup_blocks = compute_backup_blocks(
1694 info->new_chunk, info->array.chunk_size,
1695 re->after.data_disks,
1696 re->before.data_disks);
1697 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1698
1699 re->new_size = info->component_size * re->after.data_disks;
1700 return NULL;
1701 }
1702
1703 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1704 char *text_version)
1705 {
1706 struct mdinfo *info;
1707 char *subarray;
1708 int ret_val = -1;
1709
1710 if ((st == NULL) || (sra == NULL))
1711 return ret_val;
1712
1713 if (text_version == NULL)
1714 text_version = sra->text_version;
1715 subarray = strchr(text_version + 1, '/')+1;
1716 info = st->ss->container_content(st, subarray);
1717 if (info) {
1718 unsigned long long current_size = 0;
1719 unsigned long long new_size =
1720 info->custom_array_size/2;
1721
1722 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1723 new_size > current_size) {
1724 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1725 < 0)
1726 dprintf("Error: Cannot set array size");
1727 else {
1728 ret_val = 0;
1729 dprintf("Array size changed");
1730 }
1731 dprintf_cont(" from %llu to %llu.\n",
1732 current_size, new_size);
1733 }
1734 sysfs_free(info);
1735 } else
1736 dprintf("Error: set_array_size(): info pointer in NULL\n");
1737
1738 return ret_val;
1739 }
1740
1741 static int reshape_array(char *container, int fd, char *devname,
1742 struct supertype *st, struct mdinfo *info,
1743 int force, struct mddev_dev *devlist,
1744 unsigned long long data_offset,
1745 char *backup_file, int verbose, int forked,
1746 int restart, int freeze_reshape);
1747 static int reshape_container(char *container, char *devname,
1748 int mdfd,
1749 struct supertype *st,
1750 struct mdinfo *info,
1751 int force,
1752 char *backup_file, int verbose,
1753 int forked, int restart, int freeze_reshape);
1754
1755 int Grow_reshape(char *devname, int fd,
1756 struct mddev_dev *devlist,
1757 unsigned long long data_offset,
1758 struct context *c, struct shape *s)
1759 {
1760 /* Make some changes in the shape of an array.
1761 * The kernel must support the change.
1762 *
1763 * There are three different changes. Each can trigger
1764 * a resync or recovery so we freeze that until we have
1765 * requested everything (if kernel supports freezing - 2.6.30).
1766 * The steps are:
1767 * - change size (i.e. component_size)
1768 * - change level
1769 * - change layout/chunksize/ndisks
1770 *
1771 * The last can require a reshape. It is different on different
1772 * levels so we need to check the level before actioning it.
1773 * Some times the level change needs to be requested after the
1774 * reshape (e.g. raid6->raid5, raid5->raid0)
1775 *
1776 */
1777 struct mdu_array_info_s array;
1778 int rv = 0;
1779 struct supertype *st;
1780 char *subarray = NULL;
1781
1782 int frozen;
1783 int changed = 0;
1784 char *container = NULL;
1785 int cfd = -1;
1786
1787 struct mddev_dev *dv;
1788 int added_disks;
1789
1790 struct mdinfo info;
1791 struct mdinfo *sra;
1792
1793 if (md_get_array_info(fd, &array) < 0) {
1794 pr_err("%s is not an active md array - aborting\n",
1795 devname);
1796 return 1;
1797 }
1798 if (data_offset != INVALID_SECTORS && array.level != 10 &&
1799 (array.level < 4 || array.level > 6)) {
1800 pr_err("--grow --data-offset not yet supported\n");
1801 return 1;
1802 }
1803
1804 if (s->size > 0 &&
1805 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1806 pr_err("cannot change component size at the same time as other changes.\n"
1807 " Change size first, then check data is intact before making other changes.\n");
1808 return 1;
1809 }
1810
1811 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1812 array.level > 1 && get_linux_version() < 2006032 &&
1813 !check_env("MDADM_FORCE_FEWER")) {
1814 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1815 " Please use a newer kernel\n");
1816 return 1;
1817 }
1818
1819 if (array.level > 1 &&
1820 (array.chunk_size / 1024) > (int)s->size) {
1821 pr_err("component size must be larger than chunk size.\n");
1822 return 1;
1823 }
1824
1825 st = super_by_fd(fd, &subarray);
1826 if (!st) {
1827 pr_err("Unable to determine metadata format for %s\n", devname);
1828 return 1;
1829 }
1830 if (s->raiddisks > st->max_devs) {
1831 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1832 return 1;
1833 }
1834 if (s->level == 0 &&
1835 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1836 !(array.state & (1<<MD_SB_CLUSTERED))) {
1837 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1838 if (md_set_array_info(fd, &array)!= 0) {
1839 pr_err("failed to remove internal bitmap.\n");
1840 return 1;
1841 }
1842 }
1843
1844 /* in the external case we need to check that the requested reshape is
1845 * supported, and perform an initial check that the container holds the
1846 * pre-requisite spare devices (mdmon owns final validation)
1847 */
1848 if (st->ss->external) {
1849 int retval;
1850
1851 if (subarray) {
1852 container = st->container_devnm;
1853 cfd = open_dev_excl(st->container_devnm);
1854 } else {
1855 container = st->devnm;
1856 close(fd);
1857 cfd = open_dev_excl(st->devnm);
1858 fd = cfd;
1859 }
1860 if (cfd < 0) {
1861 pr_err("Unable to open container for %s\n",
1862 devname);
1863 free(subarray);
1864 return 1;
1865 }
1866
1867 retval = st->ss->load_container(st, cfd, NULL);
1868
1869 if (retval) {
1870 pr_err("Cannot read superblock for %s\n",
1871 devname);
1872 free(subarray);
1873 return 1;
1874 }
1875
1876 /* check if operation is supported for metadata handler */
1877 if (st->ss->container_content) {
1878 struct mdinfo *cc = NULL;
1879 struct mdinfo *content = NULL;
1880
1881 cc = st->ss->container_content(st, subarray);
1882 for (content = cc; content ; content = content->next) {
1883 int allow_reshape = 1;
1884
1885 /* check if reshape is allowed based on metadata
1886 * indications stored in content.array.status
1887 */
1888 if (content->array.state &
1889 (1 << MD_SB_BLOCK_VOLUME))
1890 allow_reshape = 0;
1891 if (content->array.state &
1892 (1 << MD_SB_BLOCK_CONTAINER_RESHAPE))
1893 allow_reshape = 0;
1894 if (!allow_reshape) {
1895 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1896 devname, container);
1897 sysfs_free(cc);
1898 free(subarray);
1899 return 1;
1900 }
1901 if (content->consistency_policy ==
1902 CONSISTENCY_POLICY_PPL) {
1903 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1904 sysfs_free(cc);
1905 free(subarray);
1906 return 1;
1907 }
1908 }
1909 sysfs_free(cc);
1910 }
1911 if (mdmon_running(container))
1912 st->update_tail = &st->updates;
1913 }
1914
1915 added_disks = 0;
1916 for (dv = devlist; dv; dv = dv->next)
1917 added_disks++;
1918 if (s->raiddisks > array.raid_disks &&
1919 array.spare_disks + added_disks < (s->raiddisks - array.raid_disks) &&
1920 !c->force) {
1921 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1922 " Use --force to over-ride this check.\n",
1923 s->raiddisks - array.raid_disks,
1924 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1925 array.spare_disks + added_disks);
1926 return 1;
1927 }
1928
1929 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1930 GET_STATE | GET_VERSION);
1931 if (sra) {
1932 if (st->ss->external && subarray == NULL) {
1933 array.level = LEVEL_CONTAINER;
1934 sra->array.level = LEVEL_CONTAINER;
1935 }
1936 } else {
1937 pr_err("failed to read sysfs parameters for %s\n",
1938 devname);
1939 return 1;
1940 }
1941 frozen = freeze(st);
1942 if (frozen < -1) {
1943 /* freeze() already spewed the reason */
1944 sysfs_free(sra);
1945 return 1;
1946 } else if (frozen < 0) {
1947 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1948 sysfs_free(sra);
1949 return 1;
1950 }
1951
1952 /* ========= set size =============== */
1953 if (s->size > 0 &&
1954 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1955 unsigned long long orig_size = get_component_size(fd)/2;
1956 unsigned long long min_csize;
1957 struct mdinfo *mdi;
1958 int raid0_takeover = 0;
1959
1960 if (orig_size == 0)
1961 orig_size = (unsigned) array.size;
1962
1963 if (orig_size == 0) {
1964 pr_err("Cannot set device size in this type of array.\n");
1965 rv = 1;
1966 goto release;
1967 }
1968
1969 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1970 devname, APPLY_METADATA_CHANGES,
1971 c->verbose > 0)) {
1972 rv = 1;
1973 goto release;
1974 }
1975 sync_metadata(st);
1976 if (st->ss->external) {
1977 /* metadata can have size limitation
1978 * update size value according to metadata information
1979 */
1980 struct mdinfo *sizeinfo =
1981 st->ss->container_content(st, subarray);
1982 if (sizeinfo) {
1983 unsigned long long new_size =
1984 sizeinfo->custom_array_size/2;
1985 int data_disks = get_data_disks(
1986 sizeinfo->array.level,
1987 sizeinfo->array.layout,
1988 sizeinfo->array.raid_disks);
1989 new_size /= data_disks;
1990 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
1991 orig_size, new_size,
1992 new_size * data_disks);
1993 s->size = new_size;
1994 sysfs_free(sizeinfo);
1995 }
1996 }
1997
1998 /* Update the size of each member device in case
1999 * they have been resized. This will never reduce
2000 * below the current used-size. The "size" attribute
2001 * understands '0' to mean 'max'.
2002 */
2003 min_csize = 0;
2004 for (mdi = sra->devs; mdi; mdi = mdi->next) {
2005 sysfs_set_num(sra, mdi, "size", s->size == MAX_SIZE ? 0
2006 : s->size);
2007 if (array.not_persistent == 0 &&
2008 array.major_version == 0 &&
2009 get_linux_version() < 3001000) {
2010 /* Dangerous to allow size to exceed 2TB */
2011 unsigned long long csize;
2012 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
2013 if (csize >= 2ULL*1024*1024*1024)
2014 csize = 2ULL*1024*1024*1024;
2015 if ((min_csize == 0 || (min_csize
2016 > csize)))
2017 min_csize = csize;
2018 }
2019 }
2020 }
2021 if (min_csize && s->size > min_csize) {
2022 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2023 rv = 1;
2024 goto size_change_error;
2025 }
2026 if (min_csize && s->size == MAX_SIZE) {
2027 /* Don't let the kernel choose a size - it will get
2028 * it wrong
2029 */
2030 pr_err("Limited v0.90 array to 2TB per device\n");
2031 s->size = min_csize;
2032 }
2033 if (st->ss->external) {
2034 if (sra->array.level == 0) {
2035 rv = sysfs_set_str(sra, NULL, "level",
2036 "raid5");
2037 if (!rv) {
2038 raid0_takeover = 1;
2039 /* get array parameters after takeover
2040 * to change one parameter at time only
2041 */
2042 rv = md_get_array_info(fd, &array);
2043 }
2044 }
2045 /* make sure mdmon is
2046 * aware of the new level */
2047 if (!mdmon_running(st->container_devnm))
2048 start_mdmon(st->container_devnm);
2049 ping_monitor(container);
2050 if (mdmon_running(st->container_devnm) &&
2051 st->update_tail == NULL)
2052 st->update_tail = &st->updates;
2053 }
2054
2055 if (s->size == MAX_SIZE)
2056 s->size = 0;
2057 array.size = s->size;
2058 if (s->size & ~INT32_MAX) {
2059 /* got truncated to 32bit, write to
2060 * component_size instead
2061 */
2062 if (sra)
2063 rv = sysfs_set_num(sra, NULL,
2064 "component_size", s->size);
2065 else
2066 rv = -1;
2067 } else {
2068 rv = md_set_array_info(fd, &array);
2069
2070 /* manage array size when it is managed externally
2071 */
2072 if ((rv == 0) && st->ss->external)
2073 rv = set_array_size(st, sra, sra->text_version);
2074 }
2075
2076 if (raid0_takeover) {
2077 /* do not recync non-existing parity,
2078 * we will drop it anyway
2079 */
2080 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2081 /* go back to raid0, drop parity disk
2082 */
2083 sysfs_set_str(sra, NULL, "level", "raid0");
2084 md_get_array_info(fd, &array);
2085 }
2086
2087 size_change_error:
2088 if (rv != 0) {
2089 int err = errno;
2090
2091 /* restore metadata */
2092 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2093 UnSet, NULL, devname,
2094 ROLLBACK_METADATA_CHANGES,
2095 c->verbose) == 0)
2096 sync_metadata(st);
2097 pr_err("Cannot set device size for %s: %s\n",
2098 devname, strerror(err));
2099 if (err == EBUSY &&
2100 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2101 cont_err("Bitmap must be removed before size can be changed\n");
2102 rv = 1;
2103 goto release;
2104 }
2105 if (s->assume_clean) {
2106 /* This will fail on kernels older than 3.0 unless
2107 * a backport has been arranged.
2108 */
2109 if (sra == NULL ||
2110 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
2111 pr_err("--assume-clean not supported with --grow on this kernel\n");
2112 }
2113 md_get_array_info(fd, &array);
2114 s->size = get_component_size(fd)/2;
2115 if (s->size == 0)
2116 s->size = array.size;
2117 if (c->verbose >= 0) {
2118 if (s->size == orig_size)
2119 pr_err("component size of %s unchanged at %lluK\n",
2120 devname, s->size);
2121 else
2122 pr_err("component size of %s has been set to %lluK\n",
2123 devname, s->size);
2124 }
2125 changed = 1;
2126 } else if (array.level != LEVEL_CONTAINER) {
2127 s->size = get_component_size(fd)/2;
2128 if (s->size == 0)
2129 s->size = array.size;
2130 }
2131
2132 /* See if there is anything else to do */
2133 if ((s->level == UnSet || s->level == array.level) &&
2134 (s->layout_str == NULL) &&
2135 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2136 data_offset == INVALID_SECTORS &&
2137 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2138 /* Nothing more to do */
2139 if (!changed && c->verbose >= 0)
2140 pr_err("%s: no change requested\n",
2141 devname);
2142 goto release;
2143 }
2144
2145 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2146 * current implementation assumes that following conditions must be met:
2147 * - RAID10:
2148 * - far_copies == 1
2149 * - near_copies == 2
2150 */
2151 if ((s->level == 0 && array.level == 10 && sra &&
2152 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2153 (s->level == 0 && array.level == 1 && sra)) {
2154 int err;
2155 err = remove_disks_for_takeover(st, sra, array.layout);
2156 if (err) {
2157 dprintf("Array cannot be reshaped\n");
2158 if (cfd > -1)
2159 close(cfd);
2160 rv = 1;
2161 goto release;
2162 }
2163 /* Make sure mdmon has seen the device removal
2164 * and updated metadata before we continue with
2165 * level change
2166 */
2167 if (container)
2168 ping_monitor(container);
2169 }
2170
2171 memset(&info, 0, sizeof(info));
2172 info.array = array;
2173 if (sysfs_init(&info, fd, NULL)) {
2174 pr_err("failed to intialize sysfs.\n");
2175 rv = 1;
2176 goto release;
2177 }
2178 strcpy(info.text_version, sra->text_version);
2179 info.component_size = s->size*2;
2180 info.new_level = s->level;
2181 info.new_chunk = s->chunk * 1024;
2182 if (info.array.level == LEVEL_CONTAINER) {
2183 info.delta_disks = UnSet;
2184 info.array.raid_disks = s->raiddisks;
2185 } else if (s->raiddisks)
2186 info.delta_disks = s->raiddisks - info.array.raid_disks;
2187 else
2188 info.delta_disks = UnSet;
2189 if (s->layout_str == NULL) {
2190 info.new_layout = UnSet;
2191 if (info.array.level == 6 &&
2192 (info.new_level == 6 || info.new_level == UnSet) &&
2193 info.array.layout >= 16) {
2194 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2195 cont_err("during the reshape, please specify --layout=preserve\n");
2196 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2197 rv = 1;
2198 goto release;
2199 }
2200 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2201 strcmp(s->layout_str, "normalize") == 0) {
2202 /* If we have a -6 RAID6 layout, remove the '-6'. */
2203 info.new_layout = UnSet;
2204 if (info.array.level == 6 && info.new_level == UnSet) {
2205 char l[40], *h;
2206 strcpy(l, map_num(r6layout, info.array.layout));
2207 h = strrchr(l, '-');
2208 if (h && strcmp(h, "-6") == 0) {
2209 *h = 0;
2210 info.new_layout = map_name(r6layout, l);
2211 }
2212 } else {
2213 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2214 rv = 1;
2215 goto release;
2216 }
2217 } else if (strcmp(s->layout_str, "preserve") == 0) {
2218 /* This means that a non-standard RAID6 layout
2219 * is OK.
2220 * In particular:
2221 * - When reshape a RAID6 (e.g. adding a device)
2222 * which is in a non-standard layout, it is OK
2223 * to preserve that layout.
2224 * - When converting a RAID5 to RAID6, leave it in
2225 * the XXX-6 layout, don't re-layout.
2226 */
2227 if (info.array.level == 6 && info.new_level == UnSet)
2228 info.new_layout = info.array.layout;
2229 else if (info.array.level == 5 && info.new_level == 6) {
2230 char l[40];
2231 strcpy(l, map_num(r5layout, info.array.layout));
2232 strcat(l, "-6");
2233 info.new_layout = map_name(r6layout, l);
2234 } else {
2235 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2236 rv = 1;
2237 goto release;
2238 }
2239 } else {
2240 int l = info.new_level;
2241 if (l == UnSet)
2242 l = info.array.level;
2243 switch (l) {
2244 case 5:
2245 info.new_layout = map_name(r5layout, s->layout_str);
2246 break;
2247 case 6:
2248 info.new_layout = map_name(r6layout, s->layout_str);
2249 break;
2250 case 10:
2251 info.new_layout = parse_layout_10(s->layout_str);
2252 break;
2253 case LEVEL_FAULTY:
2254 info.new_layout = parse_layout_faulty(s->layout_str);
2255 break;
2256 default:
2257 pr_err("layout not meaningful with this level\n");
2258 rv = 1;
2259 goto release;
2260 }
2261 if (info.new_layout == UnSet) {
2262 pr_err("layout %s not understood for this level\n",
2263 s->layout_str);
2264 rv = 1;
2265 goto release;
2266 }
2267 }
2268
2269 if (array.level == LEVEL_FAULTY) {
2270 if (s->level != UnSet && s->level != array.level) {
2271 pr_err("cannot change level of Faulty device\n");
2272 rv =1 ;
2273 }
2274 if (s->chunk) {
2275 pr_err("cannot set chunksize of Faulty device\n");
2276 rv =1 ;
2277 }
2278 if (s->raiddisks && s->raiddisks != 1) {
2279 pr_err("cannot set raid_disks of Faulty device\n");
2280 rv =1 ;
2281 }
2282 if (s->layout_str) {
2283 if (md_get_array_info(fd, &array) != 0) {
2284 dprintf("Cannot get array information.\n");
2285 goto release;
2286 }
2287 array.layout = info.new_layout;
2288 if (md_set_array_info(fd, &array) != 0) {
2289 pr_err("failed to set new layout\n");
2290 rv = 1;
2291 } else if (c->verbose >= 0)
2292 printf("layout for %s set to %d\n",
2293 devname, array.layout);
2294 }
2295 } else if (array.level == LEVEL_CONTAINER) {
2296 /* This change is to be applied to every array in the
2297 * container. This is only needed when the metadata imposes
2298 * restraints of the various arrays in the container.
2299 * Currently we only know that IMSM requires all arrays
2300 * to have the same number of devices so changing the
2301 * number of devices (On-Line Capacity Expansion) must be
2302 * performed at the level of the container
2303 */
2304 if (fd > 0) {
2305 close(fd);
2306 fd = -1;
2307 }
2308 rv = reshape_container(container, devname, -1, st, &info,
2309 c->force, c->backup_file, c->verbose, 0, 0, 0);
2310 frozen = 0;
2311 } else {
2312 /* get spare devices from external metadata
2313 */
2314 if (st->ss->external) {
2315 struct mdinfo *info2;
2316
2317 info2 = st->ss->container_content(st, subarray);
2318 if (info2) {
2319 info.array.spare_disks =
2320 info2->array.spare_disks;
2321 sysfs_free(info2);
2322 }
2323 }
2324
2325 /* Impose these changes on a single array. First
2326 * check that the metadata is OK with the change. */
2327
2328 if (reshape_super(st, 0, info.new_level,
2329 info.new_layout, info.new_chunk,
2330 info.array.raid_disks, info.delta_disks,
2331 c->backup_file, devname, APPLY_METADATA_CHANGES,
2332 c->verbose)) {
2333 rv = 1;
2334 goto release;
2335 }
2336 sync_metadata(st);
2337 rv = reshape_array(container, fd, devname, st, &info, c->force,
2338 devlist, data_offset, c->backup_file, c->verbose,
2339 0, 0, 0);
2340 frozen = 0;
2341 }
2342 release:
2343 sysfs_free(sra);
2344 if (frozen > 0)
2345 unfreeze(st);
2346 return rv;
2347 }
2348
2349 /* verify_reshape_position()
2350 * Function checks if reshape position in metadata is not farther
2351 * than position in md.
2352 * Return value:
2353 * 0 : not valid sysfs entry
2354 * it can be caused by not started reshape, it should be started
2355 * by reshape array or raid0 array is before takeover
2356 * -1 : error, reshape position is obviously wrong
2357 * 1 : success, reshape progress correct or updated
2358 */
2359 static int verify_reshape_position(struct mdinfo *info, int level)
2360 {
2361 int ret_val = 0;
2362 char buf[40];
2363 int rv;
2364
2365 /* read sync_max, failure can mean raid0 array */
2366 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2367
2368 if (rv > 0) {
2369 char *ep;
2370 unsigned long long position = strtoull(buf, &ep, 0);
2371
2372 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2373 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2374 position *= get_data_disks(level,
2375 info->new_layout,
2376 info->array.raid_disks);
2377 if (info->reshape_progress < position) {
2378 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2379 info->reshape_progress, position);
2380 info->reshape_progress = position;
2381 ret_val = 1;
2382 } else if (info->reshape_progress > position) {
2383 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2384 position, info->reshape_progress);
2385 ret_val = -1;
2386 } else {
2387 dprintf("Reshape position in md and metadata are the same;");
2388 ret_val = 1;
2389 }
2390 }
2391 } else if (rv == 0) {
2392 /* for valid sysfs entry, 0-length content
2393 * should be indicated as error
2394 */
2395 ret_val = -1;
2396 }
2397
2398 return ret_val;
2399 }
2400
2401 static unsigned long long choose_offset(unsigned long long lo,
2402 unsigned long long hi,
2403 unsigned long long min,
2404 unsigned long long max)
2405 {
2406 /* Choose a new offset between hi and lo.
2407 * It must be between min and max, but
2408 * we would prefer something near the middle of hi/lo, and also
2409 * prefer to be aligned to a big power of 2.
2410 *
2411 * So we start with the middle, then for each bit,
2412 * starting at '1' and increasing, if it is set, we either
2413 * add it or subtract it if possible, preferring the option
2414 * which is furthest from the boundary.
2415 *
2416 * We stop once we get a 1MB alignment. As units are in sectors,
2417 * 1MB = 2*1024 sectors.
2418 */
2419 unsigned long long choice = (lo + hi) / 2;
2420 unsigned long long bit = 1;
2421
2422 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2423 unsigned long long bigger, smaller;
2424 if (! (bit & choice))
2425 continue;
2426 bigger = choice + bit;
2427 smaller = choice - bit;
2428 if (bigger > max && smaller < min)
2429 break;
2430 if (bigger > max)
2431 choice = smaller;
2432 else if (smaller < min)
2433 choice = bigger;
2434 else if (hi - bigger > smaller - lo)
2435 choice = bigger;
2436 else
2437 choice = smaller;
2438 }
2439 return choice;
2440 }
2441
2442 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2443 char *devname, int delta_disks,
2444 unsigned long long data_offset,
2445 unsigned long long min,
2446 int can_fallback)
2447 {
2448 struct mdinfo *sd;
2449 int dir = 0;
2450 int err = 0;
2451 unsigned long long before, after;
2452
2453 /* Need to find min space before and after so same is used
2454 * on all devices
2455 */
2456 before = UINT64_MAX;
2457 after = UINT64_MAX;
2458 for (sd = sra->devs; sd; sd = sd->next) {
2459 char *dn;
2460 int dfd;
2461 int rv;
2462 struct supertype *st2;
2463 struct mdinfo info2;
2464
2465 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2466 continue;
2467 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2468 dfd = dev_open(dn, O_RDONLY);
2469 if (dfd < 0) {
2470 pr_err("%s: cannot open component %s\n",
2471 devname, dn ? dn : "-unknown-");
2472 goto release;
2473 }
2474 st2 = dup_super(st);
2475 rv = st2->ss->load_super(st2,dfd, NULL);
2476 close(dfd);
2477 if (rv) {
2478 free(st2);
2479 pr_err("%s: cannot get superblock from %s\n",
2480 devname, dn);
2481 goto release;
2482 }
2483 st2->ss->getinfo_super(st2, &info2, NULL);
2484 st2->ss->free_super(st2);
2485 free(st2);
2486 if (info2.space_before == 0 &&
2487 info2.space_after == 0) {
2488 /* Metadata doesn't support data_offset changes */
2489 if (!can_fallback)
2490 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2491 devname);
2492 goto fallback;
2493 }
2494 if (before > info2.space_before)
2495 before = info2.space_before;
2496 if (after > info2.space_after)
2497 after = info2.space_after;
2498
2499 if (data_offset != INVALID_SECTORS) {
2500 if (dir == 0) {
2501 if (info2.data_offset == data_offset) {
2502 pr_err("%s: already has that data_offset\n",
2503 dn);
2504 goto release;
2505 }
2506 if (data_offset < info2.data_offset)
2507 dir = -1;
2508 else
2509 dir = 1;
2510 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2511 (data_offset >= info2.data_offset && dir == -1)) {
2512 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2513 dn);
2514 goto release;
2515 }
2516 }
2517 }
2518 if (before == UINT64_MAX)
2519 /* impossible really, there must be no devices */
2520 return 1;
2521
2522 for (sd = sra->devs; sd; sd = sd->next) {
2523 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2524 unsigned long long new_data_offset;
2525
2526 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2527 continue;
2528 if (delta_disks < 0) {
2529 /* Don't need any space as array is shrinking
2530 * just move data_offset up by min
2531 */
2532 if (data_offset == INVALID_SECTORS)
2533 new_data_offset = sd->data_offset + min;
2534 else {
2535 if (data_offset < sd->data_offset + min) {
2536 pr_err("--data-offset too small for %s\n",
2537 dn);
2538 goto release;
2539 }
2540 new_data_offset = data_offset;
2541 }
2542 } else if (delta_disks > 0) {
2543 /* need space before */
2544 if (before < min) {
2545 if (can_fallback)
2546 goto fallback;
2547 pr_err("Insufficient head-space for reshape on %s\n",
2548 dn);
2549 goto release;
2550 }
2551 if (data_offset == INVALID_SECTORS)
2552 new_data_offset = sd->data_offset - min;
2553 else {
2554 if (data_offset > sd->data_offset - min) {
2555 pr_err("--data-offset too large for %s\n",
2556 dn);
2557 goto release;
2558 }
2559 new_data_offset = data_offset;
2560 }
2561 } else {
2562 if (dir == 0) {
2563 /* can move up or down. If 'data_offset'
2564 * was set we would have already decided,
2565 * so just choose direction with most space.
2566 */
2567 if (before > after)
2568 dir = -1;
2569 else
2570 dir = 1;
2571 }
2572 sysfs_set_str(sra, NULL, "reshape_direction",
2573 dir == 1 ? "backwards" : "forwards");
2574 if (dir > 0) {
2575 /* Increase data offset */
2576 if (after < min) {
2577 if (can_fallback)
2578 goto fallback;
2579 pr_err("Insufficient tail-space for reshape on %s\n",
2580 dn);
2581 goto release;
2582 }
2583 if (data_offset != INVALID_SECTORS &&
2584 data_offset < sd->data_offset + min) {
2585 pr_err("--data-offset too small on %s\n",
2586 dn);
2587 goto release;
2588 }
2589 if (data_offset != INVALID_SECTORS)
2590 new_data_offset = data_offset;
2591 else
2592 new_data_offset = choose_offset(sd->data_offset,
2593 sd->data_offset + after,
2594 sd->data_offset + min,
2595 sd->data_offset + after);
2596 } else {
2597 /* Decrease data offset */
2598 if (before < min) {
2599 if (can_fallback)
2600 goto fallback;
2601 pr_err("insufficient head-room on %s\n",
2602 dn);
2603 goto release;
2604 }
2605 if (data_offset != INVALID_SECTORS &&
2606 data_offset < sd->data_offset - min) {
2607 pr_err("--data-offset too small on %s\n",
2608 dn);
2609 goto release;
2610 }
2611 if (data_offset != INVALID_SECTORS)
2612 new_data_offset = data_offset;
2613 else
2614 new_data_offset = choose_offset(sd->data_offset - before,
2615 sd->data_offset,
2616 sd->data_offset - before,
2617 sd->data_offset - min);
2618 }
2619 }
2620 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2621 if (err < 0 && errno == E2BIG) {
2622 /* try again after increasing data size to max */
2623 err = sysfs_set_num(sra, sd, "size", 0);
2624 if (err < 0 && errno == EINVAL &&
2625 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2626 /* some kernels have a bug where you cannot
2627 * use '0' on spare devices. */
2628 sysfs_set_num(sra, sd, "size",
2629 (sra->component_size + after)/2);
2630 }
2631 err = sysfs_set_num(sra, sd, "new_offset",
2632 new_data_offset);
2633 }
2634 if (err < 0) {
2635 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2636 pr_err("data-offset is too big for %s\n",
2637 dn);
2638 goto release;
2639 }
2640 if (sd == sra->devs &&
2641 (errno == ENOENT || errno == E2BIG))
2642 /* Early kernel, no 'new_offset' file,
2643 * or kernel doesn't like us.
2644 * For RAID5/6 this is not fatal
2645 */
2646 return 1;
2647 pr_err("Cannot set new_offset for %s\n",
2648 dn);
2649 break;
2650 }
2651 }
2652 return err;
2653 release:
2654 return -1;
2655 fallback:
2656 /* Just use a backup file */
2657 return 1;
2658 }
2659
2660 static int raid10_reshape(char *container, int fd, char *devname,
2661 struct supertype *st, struct mdinfo *info,
2662 struct reshape *reshape,
2663 unsigned long long data_offset,
2664 int force, int verbose)
2665 {
2666 /* Changing raid_disks, layout, chunksize or possibly
2667 * just data_offset for a RAID10.
2668 * We must always change data_offset. We change by at least
2669 * ->min_offset_change which is the largest of the old and new
2670 * chunk sizes.
2671 * If raid_disks is increasing, then data_offset must decrease
2672 * by at least this copy size.
2673 * If raid_disks is unchanged, data_offset must increase or
2674 * decrease by at least min_offset_change but preferably by much more.
2675 * We choose half of the available space.
2676 * If raid_disks is decreasing, data_offset must increase by
2677 * at least min_offset_change. To allow of this, component_size
2678 * must be decreased by the same amount.
2679 *
2680 * So we calculate the required minimum and direction, possibly
2681 * reduce the component_size, then iterate through the devices
2682 * and set the new_data_offset.
2683 * If that all works, we set chunk_size, layout, raid_disks, and start
2684 * 'reshape'
2685 */
2686 struct mdinfo *sra;
2687 unsigned long long min;
2688 int err = 0;
2689
2690 sra = sysfs_read(fd, NULL,
2691 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2692 );
2693 if (!sra) {
2694 pr_err("%s: Cannot get array details from sysfs\n",
2695 devname);
2696 goto release;
2697 }
2698 min = reshape->min_offset_change;
2699
2700 if (info->delta_disks)
2701 sysfs_set_str(sra, NULL, "reshape_direction",
2702 info->delta_disks < 0 ? "backwards" : "forwards");
2703 if (info->delta_disks < 0 &&
2704 info->space_after < min) {
2705 int rv = sysfs_set_num(sra, NULL, "component_size",
2706 (sra->component_size -
2707 min)/2);
2708 if (rv) {
2709 pr_err("cannot reduce component size\n");
2710 goto release;
2711 }
2712 }
2713 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2714 min, 0);
2715 if (err == 1) {
2716 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2717 cont_err("supported on this kernel\n");
2718 err = -1;
2719 }
2720 if (err < 0)
2721 goto release;
2722
2723 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2724 err = errno;
2725 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2726 err = errno;
2727 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2728 info->array.raid_disks + info->delta_disks) < 0)
2729 err = errno;
2730 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2731 err = errno;
2732 if (err) {
2733 pr_err("Cannot set array shape for %s\n",
2734 devname);
2735 if (err == EBUSY &&
2736 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2737 cont_err(" Bitmap must be removed before shape can be changed\n");
2738 goto release;
2739 }
2740 sysfs_free(sra);
2741 return 0;
2742 release:
2743 sysfs_free(sra);
2744 return 1;
2745 }
2746
2747 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2748 {
2749 struct mdinfo *sra, *sd;
2750 /* Initialisation to silence compiler warning */
2751 unsigned long long min_space_before = 0, min_space_after = 0;
2752 int first = 1;
2753
2754 sra = sysfs_read(fd, NULL, GET_DEVS);
2755 if (!sra)
2756 return;
2757 for (sd = sra->devs; sd; sd = sd->next) {
2758 char *dn;
2759 int dfd;
2760 struct supertype *st2;
2761 struct mdinfo info2;
2762
2763 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2764 continue;
2765 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2766 dfd = dev_open(dn, O_RDONLY);
2767 if (dfd < 0)
2768 break;
2769 st2 = dup_super(st);
2770 if (st2->ss->load_super(st2,dfd, NULL)) {
2771 close(dfd);
2772 free(st2);
2773 break;
2774 }
2775 close(dfd);
2776 st2->ss->getinfo_super(st2, &info2, NULL);
2777 st2->ss->free_super(st2);
2778 free(st2);
2779 if (first ||
2780 min_space_before > info2.space_before)
2781 min_space_before = info2.space_before;
2782 if (first ||
2783 min_space_after > info2.space_after)
2784 min_space_after = info2.space_after;
2785 first = 0;
2786 }
2787 if (sd == NULL && !first) {
2788 info->space_after = min_space_after;
2789 info->space_before = min_space_before;
2790 }
2791 sysfs_free(sra);
2792 }
2793
2794 static void update_cache_size(char *container, struct mdinfo *sra,
2795 struct mdinfo *info,
2796 int disks, unsigned long long blocks)
2797 {
2798 /* Check that the internal stripe cache is
2799 * large enough, or it won't work.
2800 * It must hold at least 4 stripes of the larger
2801 * chunk size
2802 */
2803 unsigned long cache;
2804 cache = max(info->array.chunk_size, info->new_chunk);
2805 cache *= 4; /* 4 stripes minimum */
2806 cache /= 512; /* convert to sectors */
2807 /* make sure there is room for 'blocks' with a bit to spare */
2808 if (cache < 16 + blocks / disks)
2809 cache = 16 + blocks / disks;
2810 cache /= (4096/512); /* Convert from sectors to pages */
2811
2812 if (sra->cache_size < cache)
2813 subarray_set_num(container, sra, "stripe_cache_size",
2814 cache+1);
2815 }
2816
2817 static int impose_reshape(struct mdinfo *sra,
2818 struct mdinfo *info,
2819 struct supertype *st,
2820 int fd,
2821 int restart,
2822 char *devname, char *container,
2823 struct reshape *reshape)
2824 {
2825 struct mdu_array_info_s array;
2826
2827 sra->new_chunk = info->new_chunk;
2828
2829 if (restart) {
2830 /* for external metadata checkpoint saved by mdmon can be lost
2831 * or missed /due to e.g. crash/. Check if md is not during
2832 * restart farther than metadata points to.
2833 * If so, this means metadata information is obsolete.
2834 */
2835 if (st->ss->external)
2836 verify_reshape_position(info, reshape->level);
2837 sra->reshape_progress = info->reshape_progress;
2838 } else {
2839 sra->reshape_progress = 0;
2840 if (reshape->after.data_disks < reshape->before.data_disks)
2841 /* start from the end of the new array */
2842 sra->reshape_progress = (sra->component_size
2843 * reshape->after.data_disks);
2844 }
2845
2846 md_get_array_info(fd, &array);
2847 if (info->array.chunk_size == info->new_chunk &&
2848 reshape->before.layout == reshape->after.layout &&
2849 st->ss->external == 0) {
2850 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2851 array.raid_disks = reshape->after.data_disks + reshape->parity;
2852 if (!restart && md_set_array_info(fd, &array) != 0) {
2853 int err = errno;
2854
2855 pr_err("Cannot set device shape for %s: %s\n",
2856 devname, strerror(errno));
2857
2858 if (err == EBUSY &&
2859 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2860 cont_err("Bitmap must be removed before shape can be changed\n");
2861
2862 goto release;
2863 }
2864 } else if (!restart) {
2865 /* set them all just in case some old 'new_*' value
2866 * persists from some earlier problem.
2867 */
2868 int err = 0;
2869 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2870 err = errno;
2871 if (!err && sysfs_set_num(sra, NULL, "layout",
2872 reshape->after.layout) < 0)
2873 err = errno;
2874 if (!err && subarray_set_num(container, sra, "raid_disks",
2875 reshape->after.data_disks +
2876 reshape->parity) < 0)
2877 err = errno;
2878 if (err) {
2879 pr_err("Cannot set device shape for %s\n",
2880 devname);
2881
2882 if (err == EBUSY &&
2883 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2884 cont_err("Bitmap must be removed before shape can be changed\n");
2885 goto release;
2886 }
2887 }
2888 return 0;
2889 release:
2890 return -1;
2891 }
2892
2893 static int impose_level(int fd, int level, char *devname, int verbose)
2894 {
2895 char *c;
2896 struct mdu_array_info_s array;
2897 struct mdinfo info;
2898
2899 if (sysfs_init(&info, fd, NULL)) {
2900 pr_err("failed to intialize sysfs.\n");
2901 return 1;
2902 }
2903
2904 md_get_array_info(fd, &array);
2905 if (level == 0 &&
2906 (array.level >= 4 && array.level <= 6)) {
2907 /* To convert to RAID0 we need to fail and
2908 * remove any non-data devices. */
2909 int found = 0;
2910 int d;
2911 int data_disks = array.raid_disks - 1;
2912 if (array.level == 6)
2913 data_disks -= 1;
2914 if (array.level == 5 &&
2915 array.layout != ALGORITHM_PARITY_N)
2916 return -1;
2917 if (array.level == 6 &&
2918 array.layout != ALGORITHM_PARITY_N_6)
2919 return -1;
2920 sysfs_set_str(&info, NULL,"sync_action", "idle");
2921 /* First remove any spares so no recovery starts */
2922 for (d = 0, found = 0;
2923 d < MAX_DISKS && found < array.nr_disks;
2924 d++) {
2925 mdu_disk_info_t disk;
2926 disk.number = d;
2927 if (md_get_disk_info(fd, &disk) < 0)
2928 continue;
2929 if (disk.major == 0 && disk.minor == 0)
2930 continue;
2931 found++;
2932 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2933 disk.raid_disk < data_disks)
2934 /* keep this */
2935 continue;
2936 ioctl(fd, HOT_REMOVE_DISK,
2937 makedev(disk.major, disk.minor));
2938 }
2939 /* Now fail anything left */
2940 md_get_array_info(fd, &array);
2941 for (d = 0, found = 0;
2942 d < MAX_DISKS && found < array.nr_disks;
2943 d++) {
2944 mdu_disk_info_t disk;
2945 disk.number = d;
2946 if (md_get_disk_info(fd, &disk) < 0)
2947 continue;
2948 if (disk.major == 0 && disk.minor == 0)
2949 continue;
2950 found++;
2951 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2952 disk.raid_disk < data_disks)
2953 /* keep this */
2954 continue;
2955 ioctl(fd, SET_DISK_FAULTY,
2956 makedev(disk.major, disk.minor));
2957 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2958 }
2959 }
2960 c = map_num(pers, level);
2961 if (c) {
2962 int err = sysfs_set_str(&info, NULL, "level", c);
2963 if (err) {
2964 err = errno;
2965 pr_err("%s: could not set level to %s\n",
2966 devname, c);
2967 if (err == EBUSY &&
2968 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2969 cont_err("Bitmap must be removed before level can be changed\n");
2970 return err;
2971 }
2972 if (verbose >= 0)
2973 pr_err("level of %s changed to %s\n",
2974 devname, c);
2975 }
2976 return 0;
2977 }
2978
2979 int sigterm = 0;
2980 static void catch_term(int sig)
2981 {
2982 sigterm = 1;
2983 }
2984
2985 static int continue_via_systemd(char *devnm)
2986 {
2987 int skipped, i, pid, status;
2988 char pathbuf[1024];
2989 /* In a systemd/udev world, it is best to get systemd to
2990 * run "mdadm --grow --continue" rather than running in the
2991 * background.
2992 */
2993 switch(fork()) {
2994 case 0:
2995 /* FIXME yuk. CLOSE_EXEC?? */
2996 skipped = 0;
2997 for (i = 3; skipped < 20; i++)
2998 if (close(i) < 0)
2999 skipped++;
3000 else
3001 skipped = 0;
3002
3003 /* Don't want to see error messages from
3004 * systemctl. If the service doesn't exist,
3005 * we fork ourselves.
3006 */
3007 close(2);
3008 open("/dev/null", O_WRONLY);
3009 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
3010 devnm);
3011 status = execl("/usr/bin/systemctl", "systemctl",
3012 "restart",
3013 pathbuf, NULL);
3014 status = execl("/bin/systemctl", "systemctl", "restart",
3015 pathbuf, NULL);
3016 exit(1);
3017 case -1: /* Just do it ourselves. */
3018 break;
3019 default: /* parent - good */
3020 pid = wait(&status);
3021 if (pid >= 0 && status == 0)
3022 return 1;
3023 }
3024 return 0;
3025 }
3026
3027 static int reshape_array(char *container, int fd, char *devname,
3028 struct supertype *st, struct mdinfo *info,
3029 int force, struct mddev_dev *devlist,
3030 unsigned long long data_offset,
3031 char *backup_file, int verbose, int forked,
3032 int restart, int freeze_reshape)
3033 {
3034 struct reshape reshape;
3035 int spares_needed;
3036 char *msg;
3037 int orig_level = UnSet;
3038 int odisks;
3039 int delayed;
3040
3041 struct mdu_array_info_s array;
3042 char *c;
3043
3044 struct mddev_dev *dv;
3045 int added_disks;
3046
3047 int *fdlist = NULL;
3048 unsigned long long *offsets = NULL;
3049 int d;
3050 int nrdisks;
3051 int err;
3052 unsigned long blocks;
3053 unsigned long long array_size;
3054 int done;
3055 struct mdinfo *sra = NULL;
3056 char buf[20];
3057
3058 /* when reshaping a RAID0, the component_size might be zero.
3059 * So try to fix that up.
3060 */
3061 if (md_get_array_info(fd, &array) != 0) {
3062 dprintf("Cannot get array information.\n");
3063 goto release;
3064 }
3065 if (array.level == 0 && info->component_size == 0) {
3066 get_dev_size(fd, NULL, &array_size);
3067 info->component_size = array_size / array.raid_disks;
3068 }
3069
3070 if (array.level == 10)
3071 /* Need space_after info */
3072 get_space_after(fd, st, info);
3073
3074 if (info->reshape_active) {
3075 int new_level = info->new_level;
3076 info->new_level = UnSet;
3077 if (info->delta_disks > 0)
3078 info->array.raid_disks -= info->delta_disks;
3079 msg = analyse_change(devname, info, &reshape);
3080 info->new_level = new_level;
3081 if (info->delta_disks > 0)
3082 info->array.raid_disks += info->delta_disks;
3083 if (!restart)
3084 /* Make sure the array isn't read-only */
3085 ioctl(fd, RESTART_ARRAY_RW, 0);
3086 } else
3087 msg = analyse_change(devname, info, &reshape);
3088 if (msg) {
3089 /* if msg == "", error has already been printed */
3090 if (msg[0])
3091 pr_err("%s\n", msg);
3092 goto release;
3093 }
3094 if (restart &&
3095 (reshape.level != info->array.level ||
3096 reshape.before.layout != info->array.layout ||
3097 reshape.before.data_disks + reshape.parity !=
3098 info->array.raid_disks - max(0, info->delta_disks))) {
3099 pr_err("reshape info is not in native format - cannot continue.\n");
3100 goto release;
3101 }
3102
3103 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3104 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
3105 (strncmp(buf, "reshape", 7) == 0))) {
3106 /* When reshape is restarted from '0', very begin of array
3107 * it is possible that for external metadata reshape and array
3108 * configuration doesn't happen.
3109 * Check if md has the same opinion, and reshape is restarted
3110 * from 0. If so, this is regular reshape start after reshape
3111 * switch in metadata to next array only.
3112 */
3113 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3114 (info->reshape_progress == 0))
3115 restart = 0;
3116 }
3117 if (restart) {
3118 /* reshape already started. just skip to monitoring the reshape */
3119 if (reshape.backup_blocks == 0)
3120 return 0;
3121 if (restart & RESHAPE_NO_BACKUP)
3122 return 0;
3123
3124 /* Need 'sra' down at 'started:' */
3125 sra = sysfs_read(fd, NULL,
3126 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3127 GET_CACHE);
3128 if (!sra) {
3129 pr_err("%s: Cannot get array details from sysfs\n",
3130 devname);
3131 goto release;
3132 }
3133
3134 if (!backup_file)
3135 backup_file = locate_backup(sra->sys_name);
3136
3137 goto started;
3138 }
3139 /* The container is frozen but the array may not be.
3140 * So freeze the array so spares don't get put to the wrong use
3141 * FIXME there should probably be a cleaner separation between
3142 * freeze_array and freeze_container.
3143 */
3144 sysfs_freeze_array(info);
3145 /* Check we have enough spares to not be degraded */
3146 added_disks = 0;
3147 for (dv = devlist; dv ; dv=dv->next)
3148 added_disks++;
3149 spares_needed = max(reshape.before.data_disks,
3150 reshape.after.data_disks)
3151 + reshape.parity - array.raid_disks;
3152
3153 if (!force &&
3154 info->new_level > 1 && info->array.level > 1 &&
3155 spares_needed > info->array.spare_disks + added_disks) {
3156 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3157 " Use --force to over-ride this check.\n",
3158 spares_needed,
3159 spares_needed == 1 ? "" : "s",
3160 info->array.spare_disks + added_disks);
3161 goto release;
3162 }
3163 /* Check we have enough spares to not fail */
3164 spares_needed = max(reshape.before.data_disks,
3165 reshape.after.data_disks)
3166 - array.raid_disks;
3167 if ((info->new_level > 1 || info->new_level == 0) &&
3168 spares_needed > info->array.spare_disks +added_disks) {
3169 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3170 spares_needed,
3171 spares_needed == 1 ? "" : "s",
3172 info->array.spare_disks + added_disks);
3173 goto release;
3174 }
3175
3176 if (reshape.level != array.level) {
3177 int err = impose_level(fd, reshape.level, devname, verbose);
3178 if (err)
3179 goto release;
3180 info->new_layout = UnSet; /* after level change,
3181 * layout is meaningless */
3182 orig_level = array.level;
3183 sysfs_freeze_array(info);
3184
3185 if (reshape.level > 0 && st->ss->external) {
3186 /* make sure mdmon is aware of the new level */
3187 if (mdmon_running(container))
3188 flush_mdmon(container);
3189
3190 if (!mdmon_running(container))
3191 start_mdmon(container);
3192 ping_monitor(container);
3193 if (mdmon_running(container) &&
3194 st->update_tail == NULL)
3195 st->update_tail = &st->updates;
3196 }
3197 }
3198 /* ->reshape_super might have chosen some spares from the
3199 * container that it wants to be part of the new array.
3200 * We can collect them with ->container_content and give
3201 * them to the kernel.
3202 */
3203 if (st->ss->reshape_super && st->ss->container_content) {
3204 char *subarray = strchr(info->text_version+1, '/')+1;
3205 struct mdinfo *info2 =
3206 st->ss->container_content(st, subarray);
3207 struct mdinfo *d;
3208
3209 if (info2) {
3210 if (sysfs_init(info2, fd, st->devnm)) {
3211 pr_err("unable to initialize sysfs for %s\n",
3212 st->devnm);
3213 free(info2);
3214 goto release;
3215 }
3216 /* When increasing number of devices, we need to set
3217 * new raid_disks before adding these, or they might
3218 * be rejected.
3219 */
3220 if (reshape.backup_blocks &&
3221 reshape.after.data_disks > reshape.before.data_disks)
3222 subarray_set_num(container, info2, "raid_disks",
3223 reshape.after.data_disks +
3224 reshape.parity);
3225 for (d = info2->devs; d; d = d->next) {
3226 if (d->disk.state == 0 &&
3227 d->disk.raid_disk >= 0) {
3228 /* This is a spare that wants to
3229 * be part of the array.
3230 */
3231 add_disk(fd, st, info2, d);
3232 }
3233 }
3234 sysfs_free(info2);
3235 }
3236 }
3237 /* We might have been given some devices to add to the
3238 * array. Now that the array has been changed to the right
3239 * level and frozen, we can safely add them.
3240 */
3241 if (devlist) {
3242 if (Manage_subdevs(devname, fd, devlist, verbose,
3243 0, NULL, 0))
3244 goto release;
3245 }
3246
3247 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3248 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3249 if (reshape.backup_blocks == 0) {
3250 /* No restriping needed, but we might need to impose
3251 * some more changes: layout, raid_disks, chunk_size
3252 */
3253 /* read current array info */
3254 if (md_get_array_info(fd, &array) != 0) {
3255 dprintf("Cannot get array information.\n");
3256 goto release;
3257 }
3258 /* compare current array info with new values and if
3259 * it is different update them to new */
3260 if (info->new_layout != UnSet &&
3261 info->new_layout != array.layout) {
3262 array.layout = info->new_layout;
3263 if (md_set_array_info(fd, &array) != 0) {
3264 pr_err("failed to set new layout\n");
3265 goto release;
3266 } else if (verbose >= 0)
3267 printf("layout for %s set to %d\n",
3268 devname, array.layout);
3269 }
3270 if (info->delta_disks != UnSet &&
3271 info->delta_disks != 0 &&
3272 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3273 array.raid_disks += info->delta_disks;
3274 if (md_set_array_info(fd, &array) != 0) {
3275 pr_err("failed to set raid disks\n");
3276 goto release;
3277 } else if (verbose >= 0) {
3278 printf("raid_disks for %s set to %d\n",
3279 devname, array.raid_disks);
3280 }
3281 }
3282 if (info->new_chunk != 0 &&
3283 info->new_chunk != array.chunk_size) {
3284 if (sysfs_set_num(info, NULL,
3285 "chunk_size", info->new_chunk) != 0) {
3286 pr_err("failed to set chunk size\n");
3287 goto release;
3288 } else if (verbose >= 0)
3289 printf("chunk size for %s set to %d\n",
3290 devname, array.chunk_size);
3291 }
3292 unfreeze(st);
3293 return 0;
3294 }
3295
3296 /*
3297 * There are three possibilities.
3298 * 1/ The array will shrink.
3299 * We need to ensure the reshape will pause before reaching
3300 * the 'critical section'. We also need to fork and wait for
3301 * that to happen. When it does we
3302 * suspend/backup/complete/unfreeze
3303 *
3304 * 2/ The array will not change size.
3305 * This requires that we keep a backup of a sliding window
3306 * so that we can restore data after a crash. So we need
3307 * to fork and monitor progress.
3308 * In future we will allow the data_offset to change, so
3309 * a sliding backup becomes unnecessary.
3310 *
3311 * 3/ The array will grow. This is relatively easy.
3312 * However the kernel's restripe routines will cheerfully
3313 * overwrite some early data before it is safe. So we
3314 * need to make a backup of the early parts of the array
3315 * and be ready to restore it if rebuild aborts very early.
3316 * For externally managed metadata, we still need a forked
3317 * child to monitor the reshape and suspend IO over the region
3318 * that is being reshaped.
3319 *
3320 * We backup data by writing it to one spare, or to a
3321 * file which was given on command line.
3322 *
3323 * In each case, we first make sure that storage is available
3324 * for the required backup.
3325 * Then we:
3326 * - request the shape change.
3327 * - fork to handle backup etc.
3328 */
3329 /* Check that we can hold all the data */
3330 get_dev_size(fd, NULL, &array_size);
3331 if (reshape.new_size < (array_size/512)) {
3332 pr_err("this change will reduce the size of the array.\n"
3333 " use --grow --array-size first to truncate array.\n"
3334 " e.g. mdadm --grow %s --array-size %llu\n",
3335 devname, reshape.new_size/2);
3336 goto release;
3337 }
3338
3339 if (array.level == 10) {
3340 /* Reshaping RAID10 does not require any data backup by
3341 * user-space. Instead it requires that the data_offset
3342 * is changed to avoid the need for backup.
3343 * So this is handled very separately
3344 */
3345 if (restart)
3346 /* Nothing to do. */
3347 return 0;
3348 return raid10_reshape(container, fd, devname, st, info,
3349 &reshape, data_offset,
3350 force, verbose);
3351 }
3352 sra = sysfs_read(fd, NULL,
3353 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3354 GET_CACHE);
3355 if (!sra) {
3356 pr_err("%s: Cannot get array details from sysfs\n",
3357 devname);
3358 goto release;
3359 }
3360
3361 if (!backup_file)
3362 switch(set_new_data_offset(sra, st, devname,
3363 reshape.after.data_disks - reshape.before.data_disks,
3364 data_offset,
3365 reshape.min_offset_change, 1)) {
3366 case -1:
3367 goto release;
3368 case 0:
3369 /* Updated data_offset, so it's easy now */
3370 update_cache_size(container, sra, info,
3371 min(reshape.before.data_disks,
3372 reshape.after.data_disks),
3373 reshape.backup_blocks);
3374
3375 /* Right, everything seems fine. Let's kick things off.
3376 */
3377 sync_metadata(st);
3378
3379 if (impose_reshape(sra, info, st, fd, restart,
3380 devname, container, &reshape) < 0)
3381 goto release;
3382 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3383 struct mdinfo *sd;
3384 if (errno != EINVAL) {
3385 pr_err("Failed to initiate reshape!\n");
3386 goto release;
3387 }
3388 /* revert data_offset and try the old way */
3389 for (sd = sra->devs; sd; sd = sd->next) {
3390 sysfs_set_num(sra, sd, "new_offset",
3391 sd->data_offset);
3392 sysfs_set_str(sra, NULL, "reshape_direction",
3393 "forwards");
3394 }
3395 break;
3396 }
3397 if (info->new_level == reshape.level)
3398 return 0;
3399 /* need to adjust level when reshape completes */
3400 switch(fork()) {
3401 case -1: /* ignore error, but don't wait */
3402 return 0;
3403 default: /* parent */
3404 return 0;
3405 case 0:
3406 map_fork();
3407 break;
3408 }
3409 close(fd);
3410 wait_reshape(sra);
3411 fd = open_dev(sra->sys_name);
3412 if (fd >= 0)
3413 impose_level(fd, info->new_level, devname, verbose);
3414 return 0;
3415 case 1: /* Couldn't set data_offset, try the old way */
3416 if (data_offset != INVALID_SECTORS) {
3417 pr_err("Cannot update data_offset on this array\n");
3418 goto release;
3419 }
3420 break;
3421 }
3422
3423 started:
3424 /* Decide how many blocks (sectors) for a reshape
3425 * unit. The number we have so far is just a minimum
3426 */
3427 blocks = reshape.backup_blocks;
3428 if (reshape.before.data_disks ==
3429 reshape.after.data_disks) {
3430 /* Make 'blocks' bigger for better throughput, but
3431 * not so big that we reject it below.
3432 * Try for 16 megabytes
3433 */
3434 while (blocks * 32 < sra->component_size &&
3435 blocks < 16*1024*2)
3436 blocks *= 2;
3437 } else
3438 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3439
3440 if (blocks >= sra->component_size/2) {
3441 pr_err("%s: Something wrong - reshape aborted\n",
3442 devname);
3443 goto release;
3444 }
3445
3446 /* Now we need to open all these devices so we can read/write.
3447 */
3448 nrdisks = max(reshape.before.data_disks,
3449 reshape.after.data_disks) + reshape.parity
3450 + sra->array.spare_disks;
3451 fdlist = xcalloc((1+nrdisks), sizeof(int));
3452 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3453
3454 odisks = reshape.before.data_disks + reshape.parity;
3455 d = reshape_prepare_fdlist(devname, sra, odisks,
3456 nrdisks, blocks, backup_file,
3457 fdlist, offsets);
3458 if (d < odisks) {
3459 goto release;
3460 }
3461 if ((st->ss->manage_reshape == NULL) ||
3462 (st->ss->recover_backup == NULL)) {
3463 if (backup_file == NULL) {
3464 if (reshape.after.data_disks <=
3465 reshape.before.data_disks) {
3466 pr_err("%s: Cannot grow - need backup-file\n",
3467 devname);
3468 pr_err(" Please provide one with \"--backup=...\"\n");
3469 goto release;
3470 } else if (d == odisks) {
3471 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3472 goto release;
3473 }
3474 } else {
3475 if (!reshape_open_backup_file(backup_file, fd, devname,
3476 (signed)blocks,
3477 fdlist+d, offsets+d,
3478 sra->sys_name,
3479 restart)) {
3480 goto release;
3481 }
3482 d++;
3483 }
3484 }
3485
3486 update_cache_size(container, sra, info,
3487 min(reshape.before.data_disks, reshape.after.data_disks),
3488 blocks);
3489
3490 /* Right, everything seems fine. Let's kick things off.
3491 * If only changing raid_disks, use ioctl, else use
3492 * sysfs.
3493 */
3494 sync_metadata(st);
3495
3496 if (impose_reshape(sra, info, st, fd, restart,
3497 devname, container, &reshape) < 0)
3498 goto release;
3499
3500 err = start_reshape(sra, restart, reshape.before.data_disks,
3501 reshape.after.data_disks);
3502 if (err) {
3503 pr_err("Cannot %s reshape for %s\n",
3504 restart ? "continue" : "start",
3505 devname);
3506 goto release;
3507 }
3508 if (restart)
3509 sysfs_set_str(sra, NULL, "array_state", "active");
3510 if (freeze_reshape) {
3511 free(fdlist);
3512 free(offsets);
3513 sysfs_free(sra);
3514 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3515 sra->reshape_progress);
3516 return 1;
3517 }
3518
3519 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3520 if (continue_via_systemd(container ?: sra->sys_name)) {
3521 free(fdlist);
3522 free(offsets);
3523 sysfs_free(sra);
3524 return 0;
3525 }
3526
3527 /* Now we just need to kick off the reshape and watch, while
3528 * handling backups of the data...
3529 * This is all done by a forked background process.
3530 */
3531 switch(forked ? 0 : fork()) {
3532 case -1:
3533 pr_err("Cannot run child to monitor reshape: %s\n",
3534 strerror(errno));
3535 abort_reshape(sra);
3536 goto release;
3537 default:
3538 free(fdlist);
3539 free(offsets);
3540 sysfs_free(sra);
3541 return 0;
3542 case 0:
3543 map_fork();
3544 break;
3545 }
3546
3547 /* If another array on the same devices is busy, the
3548 * reshape will wait for them. This would mean that
3549 * the first section that we suspend will stay suspended
3550 * for a long time. So check on that possibility
3551 * by looking for "DELAYED" in /proc/mdstat, and if found,
3552 * wait a while
3553 */
3554 do {
3555 struct mdstat_ent *mds, *m;
3556 delayed = 0;
3557 mds = mdstat_read(1, 0);
3558 for (m = mds; m; m = m->next)
3559 if (strcmp(m->devnm, sra->sys_name) == 0) {
3560 if (m->resync &&
3561 m->percent == RESYNC_DELAYED)
3562 delayed = 1;
3563 if (m->resync == 0)
3564 /* Haven't started the reshape thread
3565 * yet, wait a bit
3566 */
3567 delayed = 2;
3568 break;
3569 }
3570 free_mdstat(mds);
3571 if (delayed == 1 && get_linux_version() < 3007000) {
3572 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3573 " You might experience problems until other reshapes complete.\n");
3574 delayed = 0;
3575 }
3576 if (delayed)
3577 mdstat_wait(30 - (delayed-1) * 25);
3578 } while (delayed);
3579 mdstat_close();
3580 close(fd);
3581 if (check_env("MDADM_GROW_VERIFY"))
3582 fd = open(devname, O_RDONLY | O_DIRECT);
3583 else
3584 fd = -1;
3585 mlockall(MCL_FUTURE);
3586
3587 signal(SIGTERM, catch_term);
3588
3589 if (st->ss->external) {
3590 /* metadata handler takes it from here */
3591 done = st->ss->manage_reshape(
3592 fd, sra, &reshape, st, blocks,
3593 fdlist, offsets,
3594 d - odisks, fdlist+odisks,
3595 offsets+odisks);
3596 } else
3597 done = child_monitor(
3598 fd, sra, &reshape, st, blocks,
3599 fdlist, offsets,
3600 d - odisks, fdlist+odisks,
3601 offsets+odisks);
3602
3603 free(fdlist);
3604 free(offsets);
3605
3606 if (backup_file && done) {
3607 char *bul;
3608 bul = make_backup(sra->sys_name);
3609 if (bul) {
3610 char buf[1024];
3611 int l = readlink(bul, buf, sizeof(buf) - 1);
3612 if (l > 0) {
3613 buf[l]=0;
3614 unlink(buf);
3615 }
3616 unlink(bul);
3617 free(bul);
3618 }
3619 unlink(backup_file);
3620 }
3621 if (!done) {
3622 abort_reshape(sra);
3623 goto out;
3624 }
3625
3626 if (!st->ss->external &&
3627 !(reshape.before.data_disks != reshape.after.data_disks &&
3628 info->custom_array_size) && info->new_level == reshape.level &&
3629 !forked) {
3630 /* no need to wait for the reshape to finish as
3631 * there is nothing more to do.
3632 */
3633 sysfs_free(sra);
3634 exit(0);
3635 }
3636 wait_reshape(sra);
3637
3638 if (st->ss->external) {
3639 /* Re-load the metadata as much could have changed */
3640 int cfd = open_dev(st->container_devnm);
3641 if (cfd >= 0) {
3642 flush_mdmon(container);
3643 st->ss->free_super(st);
3644 st->ss->load_container(st, cfd, container);
3645 close(cfd);
3646 }
3647 }
3648
3649 /* set new array size if required customer_array_size is used
3650 * by this metadata.
3651 */
3652 if (reshape.before.data_disks !=
3653 reshape.after.data_disks &&
3654 info->custom_array_size)
3655 set_array_size(st, info, info->text_version);
3656
3657 if (info->new_level != reshape.level) {
3658 if (fd < 0)
3659 fd = open(devname, O_RDONLY);
3660 impose_level(fd, info->new_level, devname, verbose);
3661 close(fd);
3662 if (info->new_level == 0)
3663 st->update_tail = NULL;
3664 }
3665 out:
3666 sysfs_free(sra);
3667 if (forked)
3668 return 0;
3669 unfreeze(st);
3670 exit(0);
3671
3672 release:
3673 free(fdlist);
3674 free(offsets);
3675 if (orig_level != UnSet && sra) {
3676 c = map_num(pers, orig_level);
3677 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3678 pr_err("aborting level change\n");
3679 }
3680 sysfs_free(sra);
3681 if (!forked)
3682 unfreeze(st);
3683 return 1;
3684 }
3685
3686 /* mdfd handle is passed to be closed in child process (after fork).
3687 */
3688 int reshape_container(char *container, char *devname,
3689 int mdfd,
3690 struct supertype *st,
3691 struct mdinfo *info,
3692 int force,
3693 char *backup_file, int verbose,
3694 int forked, int restart, int freeze_reshape)
3695 {
3696 struct mdinfo *cc = NULL;
3697 int rv = restart;
3698 char last_devnm[32] = "";
3699
3700 /* component_size is not meaningful for a container,
3701 * so pass '0' meaning 'no change'
3702 */
3703 if (!restart &&
3704 reshape_super(st, 0, info->new_level,
3705 info->new_layout, info->new_chunk,
3706 info->array.raid_disks, info->delta_disks,
3707 backup_file, devname, APPLY_METADATA_CHANGES,
3708 verbose)) {
3709 unfreeze(st);
3710 return 1;
3711 }
3712
3713 sync_metadata(st);
3714
3715 /* ping monitor to be sure that update is on disk
3716 */
3717 ping_monitor(container);
3718
3719 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3720 if (continue_via_systemd(container))
3721 return 0;
3722
3723 switch (forked ? 0 : fork()) {
3724 case -1: /* error */
3725 perror("Cannot fork to complete reshape\n");
3726 unfreeze(st);
3727 return 1;
3728 default: /* parent */
3729 if (!freeze_reshape)
3730 printf("%s: multi-array reshape continues in background\n", Name);
3731 return 0;
3732 case 0: /* child */
3733 map_fork();
3734 break;
3735 }
3736
3737 /* close unused handle in child process
3738 */
3739 if (mdfd > -1)
3740 close(mdfd);
3741
3742 while(1) {
3743 /* For each member array with reshape_active,
3744 * we need to perform the reshape.
3745 * We pick the first array that needs reshaping and
3746 * reshape it. reshape_array() will re-read the metadata
3747 * so the next time through a different array should be
3748 * ready for reshape.
3749 * It is possible that the 'different' array will not
3750 * be assembled yet. In that case we simple exit.
3751 * When it is assembled, the mdadm which assembles it
3752 * will take over the reshape.
3753 */
3754 struct mdinfo *content;
3755 int fd;
3756 struct mdstat_ent *mdstat;
3757 char *adev;
3758 dev_t devid;
3759
3760 sysfs_free(cc);
3761
3762 cc = st->ss->container_content(st, NULL);
3763
3764 for (content = cc; content ; content = content->next) {
3765 char *subarray;
3766 if (!content->reshape_active)
3767 continue;
3768
3769 subarray = strchr(content->text_version+1, '/')+1;
3770 mdstat = mdstat_by_subdev(subarray, container);
3771 if (!mdstat)
3772 continue;
3773 if (mdstat->active == 0) {
3774 pr_err("Skipping inactive array %s.\n",
3775 mdstat->devnm);
3776 free_mdstat(mdstat);
3777 mdstat = NULL;
3778 continue;
3779 }
3780 break;
3781 }
3782 if (!content)
3783 break;
3784
3785 devid = devnm2devid(mdstat->devnm);
3786 adev = map_dev(major(devid), minor(devid), 0);
3787 if (!adev)
3788 adev = content->text_version;
3789
3790 fd = open_dev(mdstat->devnm);
3791 if (fd < 0) {
3792 pr_err("Device %s cannot be opened for reshape.\n", adev);
3793 break;
3794 }
3795
3796 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3797 /* Do not allow for multiple reshape_array() calls for
3798 * the same array.
3799 * It can happen when reshape_array() returns without
3800 * error, when reshape is not finished (wrong reshape
3801 * starting/continuation conditions). Mdmon doesn't
3802 * switch to next array in container and reentry
3803 * conditions for the same array occur.
3804 * This is possibly interim until the behaviour of
3805 * reshape_array is resolved().
3806 */
3807 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3808 close(fd);
3809 break;
3810 }
3811 strcpy(last_devnm, mdstat->devnm);
3812
3813 if (sysfs_init(content, fd, mdstat->devnm)) {
3814 pr_err("Unable to initialize sysfs for %s\n",
3815 mdstat->devnm);
3816 rv = 1;
3817 break;
3818 }
3819
3820 if (mdmon_running(container))
3821 flush_mdmon(container);
3822
3823 rv = reshape_array(container, fd, adev, st,
3824 content, force, NULL, INVALID_SECTORS,
3825 backup_file, verbose, 1, restart,
3826 freeze_reshape);
3827 close(fd);
3828
3829 if (freeze_reshape) {
3830 sysfs_free(cc);
3831 exit(0);
3832 }
3833
3834 restart = 0;
3835 if (rv)
3836 break;
3837
3838 if (mdmon_running(container))
3839 flush_mdmon(container);
3840 }
3841 if (!rv)
3842 unfreeze(st);
3843 sysfs_free(cc);
3844 exit(0);
3845 }
3846
3847 /*
3848 * We run a child process in the background which performs the following
3849 * steps:
3850 * - wait for resync to reach a certain point
3851 * - suspend io to the following section
3852 * - backup that section
3853 * - allow resync to proceed further
3854 * - resume io
3855 * - discard the backup.
3856 *
3857 * When are combined in slightly different ways in the three cases.
3858 * Grow:
3859 * - suspend/backup/allow/wait/resume/discard
3860 * Shrink:
3861 * - allow/wait/suspend/backup/allow/wait/resume/discard
3862 * same-size:
3863 * - wait/resume/discard/suspend/backup/allow
3864 *
3865 * suspend/backup/allow always come together
3866 * wait/resume/discard do too.
3867 * For the same-size case we have two backups to improve flow.
3868 *
3869 */
3870
3871 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3872 unsigned long long backup_point,
3873 unsigned long long wait_point,
3874 unsigned long long *suspend_point,
3875 unsigned long long *reshape_completed, int *frozen)
3876 {
3877 /* This function is called repeatedly by the reshape manager.
3878 * It determines how much progress can safely be made and allows
3879 * that progress.
3880 * - 'info' identifies the array and particularly records in
3881 * ->reshape_progress the metadata's knowledge of progress
3882 * This is a sector offset from the start of the array
3883 * of the next array block to be relocated. This number
3884 * may increase from 0 or decrease from array_size, depending
3885 * on the type of reshape that is happening.
3886 * Note that in contrast, 'sync_completed' is a block count of the
3887 * reshape so far. It gives the distance between the start point
3888 * (head or tail of device) and the next place that data will be
3889 * written. It always increases.
3890 * - 'reshape' is the structure created by analyse_change
3891 * - 'backup_point' shows how much the metadata manager has backed-up
3892 * data. For reshapes with increasing progress, it is the next address
3893 * to be backed up, previous addresses have been backed-up. For
3894 * decreasing progress, it is the earliest address that has been
3895 * backed up - later address are also backed up.
3896 * So addresses between reshape_progress and backup_point are
3897 * backed up providing those are in the 'correct' order.
3898 * - 'wait_point' is an array address. When reshape_completed
3899 * passes this point, progress_reshape should return. It might
3900 * return earlier if it determines that ->reshape_progress needs
3901 * to be updated or further backup is needed.
3902 * - suspend_point is maintained by progress_reshape and the caller
3903 * should not touch it except to initialise to zero.
3904 * It is an array address and it only increases in 2.6.37 and earlier.
3905 * This makes it difficult to handle reducing reshapes with
3906 * external metadata.
3907 * However: it is similar to backup_point in that it records the
3908 * other end of a suspended region from reshape_progress.
3909 * it is moved to extend the region that is safe to backup and/or
3910 * reshape
3911 * - reshape_completed is read from sysfs and returned. The caller
3912 * should copy this into ->reshape_progress when it has reason to
3913 * believe that the metadata knows this, and any backup outside this
3914 * has been erased.
3915 *
3916 * Return value is:
3917 * 1 if more data from backup_point - but only as far as suspend_point,
3918 * should be backed up
3919 * 0 if things are progressing smoothly
3920 * -1 if the reshape is finished because it is all done,
3921 * -2 if the reshape is finished due to an error.
3922 */
3923
3924 int advancing = (reshape->after.data_disks
3925 >= reshape->before.data_disks);
3926 unsigned long long need_backup; /* All data between start of array and
3927 * here will at some point need to
3928 * be backed up.
3929 */
3930 unsigned long long read_offset, write_offset;
3931 unsigned long long write_range;
3932 unsigned long long max_progress, target, completed;
3933 unsigned long long array_size = (info->component_size
3934 * reshape->before.data_disks);
3935 int fd;
3936 char buf[20];
3937
3938 /* First, we unsuspend any region that is now known to be safe.
3939 * If suspend_point is on the 'wrong' side of reshape_progress, then
3940 * we don't have or need suspension at the moment. This is true for
3941 * native metadata when we don't need to back-up.
3942 */
3943 if (advancing) {
3944 if (info->reshape_progress <= *suspend_point)
3945 sysfs_set_num(info, NULL, "suspend_lo",
3946 info->reshape_progress);
3947 } else {
3948 /* Note: this won't work in 2.6.37 and before.
3949 * Something somewhere should make sure we don't need it!
3950 */
3951 if (info->reshape_progress >= *suspend_point)
3952 sysfs_set_num(info, NULL, "suspend_hi",
3953 info->reshape_progress);
3954 }
3955
3956 /* Now work out how far it is safe to progress.
3957 * If the read_offset for ->reshape_progress is less than
3958 * 'blocks' beyond the write_offset, we can only progress as far
3959 * as a backup.
3960 * Otherwise we can progress until the write_offset for the new location
3961 * reaches (within 'blocks' of) the read_offset at the current location.
3962 * However that region must be suspended unless we are using native
3963 * metadata.
3964 * If we need to suspend more, we limit it to 128M per device, which is
3965 * rather arbitrary and should be some time-based calculation.
3966 */
3967 read_offset = info->reshape_progress / reshape->before.data_disks;
3968 write_offset = info->reshape_progress / reshape->after.data_disks;
3969 write_range = info->new_chunk/512;
3970 if (reshape->before.data_disks == reshape->after.data_disks)
3971 need_backup = array_size;
3972 else
3973 need_backup = reshape->backup_blocks;
3974 if (advancing) {
3975 if (read_offset < write_offset + write_range)
3976 max_progress = backup_point;
3977 else
3978 max_progress =
3979 read_offset *
3980 reshape->after.data_disks;
3981 } else {
3982 if (read_offset > write_offset - write_range)
3983 /* Can only progress as far as has been backed up,
3984 * which must be suspended */
3985 max_progress = backup_point;
3986 else if (info->reshape_progress <= need_backup)
3987 max_progress = backup_point;
3988 else {
3989 if (info->array.major_version >= 0)
3990 /* Can progress until backup is needed */
3991 max_progress = need_backup;
3992 else {
3993 /* Can progress until metadata update is required */
3994 max_progress =
3995 read_offset *
3996 reshape->after.data_disks;
3997 /* but data must be suspended */
3998 if (max_progress < *suspend_point)
3999 max_progress = *suspend_point;
4000 }
4001 }
4002 }
4003
4004 /* We know it is safe to progress to 'max_progress' providing
4005 * it is suspended or we are using native metadata.
4006 * Consider extending suspend_point 128M per device if it
4007 * is less than 64M per device beyond reshape_progress.
4008 * But always do a multiple of 'blocks'
4009 * FIXME this is too big - it takes to long to complete
4010 * this much.
4011 */
4012 target = 64*1024*2 * min(reshape->before.data_disks,
4013 reshape->after.data_disks);
4014 target /= reshape->backup_blocks;
4015 if (target < 2)
4016 target = 2;
4017 target *= reshape->backup_blocks;
4018
4019 /* For externally managed metadata we always need to suspend IO to
4020 * the area being reshaped so we regularly push suspend_point forward.
4021 * For native metadata we only need the suspend if we are going to do
4022 * a backup.
4023 */
4024 if (advancing) {
4025 if ((need_backup > info->reshape_progress ||
4026 info->array.major_version < 0) &&
4027 *suspend_point < info->reshape_progress + target) {
4028 if (need_backup < *suspend_point + 2 * target)
4029 *suspend_point = need_backup;
4030 else if (*suspend_point + 2 * target < array_size)
4031 *suspend_point += 2 * target;
4032 else
4033 *suspend_point = array_size;
4034 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4035 if (max_progress > *suspend_point)
4036 max_progress = *suspend_point;
4037 }
4038 } else {
4039 if (info->array.major_version >= 0) {
4040 /* Only need to suspend when about to backup */
4041 if (info->reshape_progress < need_backup * 2 &&
4042 *suspend_point > 0) {
4043 *suspend_point = 0;
4044 sysfs_set_num(info, NULL, "suspend_lo", 0);
4045 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
4046 }
4047 } else {
4048 /* Need to suspend continually */
4049 if (info->reshape_progress < *suspend_point)
4050 *suspend_point = info->reshape_progress;
4051 if (*suspend_point + target < info->reshape_progress)
4052 /* No need to move suspend region yet */;
4053 else {
4054 if (*suspend_point >= 2 * target)
4055 *suspend_point -= 2 * target;
4056 else
4057 *suspend_point = 0;
4058 sysfs_set_num(info, NULL, "suspend_lo",
4059 *suspend_point);
4060 }
4061 if (max_progress < *suspend_point)
4062 max_progress = *suspend_point;
4063 }
4064 }
4065
4066 /* now set sync_max to allow that progress. sync_max, like
4067 * sync_completed is a count of sectors written per device, so
4068 * we find the difference between max_progress and the start point,
4069 * and divide that by after.data_disks to get a sync_max
4070 * number.
4071 * At the same time we convert wait_point to a similar number
4072 * for comparing against sync_completed.
4073 */
4074 /* scale down max_progress to per_disk */
4075 max_progress /= reshape->after.data_disks;
4076 /* Round to chunk size as some kernels give an erroneously high number */
4077 max_progress /= info->new_chunk/512;
4078 max_progress *= info->new_chunk/512;
4079 /* And round to old chunk size as the kernel wants that */
4080 max_progress /= info->array.chunk_size/512;
4081 max_progress *= info->array.chunk_size/512;
4082 /* Limit progress to the whole device */
4083 if (max_progress > info->component_size)
4084 max_progress = info->component_size;
4085 wait_point /= reshape->after.data_disks;
4086 if (!advancing) {
4087 /* switch from 'device offset' to 'processed block count' */
4088 max_progress = info->component_size - max_progress;
4089 wait_point = info->component_size - wait_point;
4090 }
4091
4092 if (!*frozen)
4093 sysfs_set_num(info, NULL, "sync_max", max_progress);
4094
4095 /* Now wait. If we have already reached the point that we were
4096 * asked to wait to, don't wait at all, else wait for any change.
4097 * We need to select on 'sync_completed' as that is the place that
4098 * notifications happen, but we are really interested in
4099 * 'reshape_position'
4100 */
4101 fd = sysfs_get_fd(info, NULL, "sync_completed");
4102 if (fd < 0)
4103 goto check_progress;
4104
4105 if (sysfs_fd_get_ll(fd, &completed) < 0)
4106 goto check_progress;
4107
4108 while (completed < max_progress && completed < wait_point) {
4109 /* Check that sync_action is still 'reshape' to avoid
4110 * waiting forever on a dead array
4111 */
4112 char action[20];
4113 if (sysfs_get_str(info, NULL, "sync_action",
4114 action, 20) <= 0 ||
4115 strncmp(action, "reshape", 7) != 0)
4116 break;
4117 /* Some kernels reset 'sync_completed' to zero
4118 * before setting 'sync_action' to 'idle'.
4119 * So we need these extra tests.
4120 */
4121 if (completed == 0 && advancing &&
4122 strncmp(action, "idle", 4) == 0 &&
4123 info->reshape_progress > 0)
4124 break;
4125 if (completed == 0 && !advancing &&
4126 strncmp(action, "idle", 4) == 0 &&
4127 info->reshape_progress < (info->component_size
4128 * reshape->after.data_disks))
4129 break;
4130 sysfs_wait(fd, NULL);
4131 if (sysfs_fd_get_ll(fd, &completed) < 0)
4132 goto check_progress;
4133 }
4134 /* Some kernels reset 'sync_completed' to zero,
4135 * we need to have real point we are in md.
4136 * So in that case, read 'reshape_position' from sysfs.
4137 */
4138 if (completed == 0) {
4139 unsigned long long reshapep;
4140 char action[20];
4141 if (sysfs_get_str(info, NULL, "sync_action",
4142 action, 20) > 0 &&
4143 strncmp(action, "idle", 4) == 0 &&
4144 sysfs_get_ll(info, NULL,
4145 "reshape_position", &reshapep) == 0)
4146 *reshape_completed = reshapep;
4147 } else {
4148 /* some kernels can give an incorrectly high
4149 * 'completed' number, so round down */
4150 completed /= (info->new_chunk/512);
4151 completed *= (info->new_chunk/512);
4152 /* Convert 'completed' back in to a 'progress' number */
4153 completed *= reshape->after.data_disks;
4154 if (!advancing)
4155 completed = (info->component_size
4156 * reshape->after.data_disks
4157 - completed);
4158 *reshape_completed = completed;
4159 }
4160
4161 close(fd);
4162
4163 /* We return the need_backup flag. Caller will decide
4164 * how much - a multiple of ->backup_blocks up to *suspend_point
4165 */
4166 if (advancing)
4167 return need_backup > info->reshape_progress;
4168 else
4169 return need_backup >= info->reshape_progress;
4170
4171 check_progress:
4172 /* if we couldn't read a number from sync_completed, then
4173 * either the reshape did complete, or it aborted.
4174 * We can tell which by checking for 'none' in reshape_position.
4175 * If it did abort, then it might immediately restart if it
4176 * it was just a device failure that leaves us degraded but
4177 * functioning.
4178 */
4179 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4180 sizeof(buf)) < 0 ||
4181 strncmp(buf, "none", 4) != 0) {
4182 /* The abort might only be temporary. Wait up to 10
4183 * seconds for fd to contain a valid number again.
4184 */
4185 int wait = 10000;
4186 int rv = -2;
4187 unsigned long long new_sync_max;
4188 while (fd >= 0 && rv < 0 && wait > 0) {
4189 if (sysfs_wait(fd, &wait) != 1)
4190 break;
4191 switch (sysfs_fd_get_ll(fd, &completed)) {
4192 case 0:
4193 /* all good again */
4194 rv = 1;
4195 /* If "sync_max" is no longer max_progress
4196 * we need to freeze things
4197 */
4198 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
4199 *frozen = (new_sync_max != max_progress);
4200 break;
4201 case -2: /* read error - abort */
4202 wait = 0;
4203 break;
4204 }
4205 }
4206 if (fd >= 0)
4207 close(fd);
4208 return rv; /* abort */
4209 } else {
4210 /* Maybe racing with array shutdown - check state */
4211 if (fd >= 0)
4212 close(fd);
4213 if (sysfs_get_str(info, NULL, "array_state", buf,
4214 sizeof(buf)) < 0 ||
4215 strncmp(buf, "inactive", 8) == 0 ||
4216 strncmp(buf, "clear",5) == 0)
4217 return -2; /* abort */
4218 return -1; /* complete */
4219 }
4220 }
4221
4222 /* FIXME return status is never checked */
4223 static int grow_backup(struct mdinfo *sra,
4224 unsigned long long offset, /* per device */
4225 unsigned long stripes, /* per device, in old chunks */
4226 int *sources, unsigned long long *offsets,
4227 int disks, int chunk, int level, int layout,
4228 int dests, int *destfd, unsigned long long *destoffsets,
4229 int part, int *degraded,
4230 char *buf)
4231 {
4232 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4233 * to storage 'destfd' (offset 'destoffsets'), after first
4234 * suspending IO. Then allow resync to continue
4235 * over the suspended section.
4236 * Use part 'part' of the backup-super-block.
4237 */
4238 int odata = disks;
4239 int rv = 0;
4240 int i;
4241 unsigned long long ll;
4242 int new_degraded;
4243 //printf("offset %llu\n", offset);
4244 if (level >= 4)
4245 odata--;
4246 if (level == 6)
4247 odata--;
4248
4249 /* Check that array hasn't become degraded, else we might backup the wrong data */
4250 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4251 return -1; /* FIXME this error is ignored */
4252 new_degraded = (int)ll;
4253 if (new_degraded != *degraded) {
4254 /* check each device to ensure it is still working */
4255 struct mdinfo *sd;
4256 for (sd = sra->devs ; sd ; sd = sd->next) {
4257 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4258 continue;
4259 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4260 char sbuf[100];
4261
4262 if (sysfs_get_str(sra, sd, "state",
4263 sbuf, sizeof(sbuf)) < 0 ||
4264 strstr(sbuf, "faulty") ||
4265 strstr(sbuf, "in_sync") == NULL) {
4266 /* this device is dead */
4267 sd->disk.state = (1<<MD_DISK_FAULTY);
4268 if (sd->disk.raid_disk >= 0 &&
4269 sources[sd->disk.raid_disk] >= 0) {
4270 close(sources[sd->disk.raid_disk]);
4271 sources[sd->disk.raid_disk] = -1;
4272 }
4273 }
4274 }
4275 }
4276 *degraded = new_degraded;
4277 }
4278 if (part) {
4279 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4280 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4281 } else {
4282 bsb.arraystart = __cpu_to_le64(offset * odata);
4283 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4284 }
4285 if (part)
4286 bsb.magic[15] = '2';
4287 for (i = 0; i < dests; i++)
4288 if (part)
4289 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4290 else
4291 lseek64(destfd[i], destoffsets[i], 0);
4292
4293 rv = save_stripes(sources, offsets,
4294 disks, chunk, level, layout,
4295 dests, destfd,
4296 offset*512*odata, stripes * chunk * odata,
4297 buf);
4298
4299 if (rv)
4300 return rv;
4301 bsb.mtime = __cpu_to_le64(time(0));
4302 for (i = 0; i < dests; i++) {
4303 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4304
4305 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4306 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4307 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4308 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4309
4310 rv = -1;
4311 if ((unsigned long long)lseek64(destfd[i],
4312 destoffsets[i] - 4096, 0) !=
4313 destoffsets[i] - 4096)
4314 break;
4315 if (write(destfd[i], &bsb, 512) != 512)
4316 break;
4317 if (destoffsets[i] > 4096) {
4318 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4319 destoffsets[i]+stripes*chunk*odata)
4320 break;
4321 if (write(destfd[i], &bsb, 512) != 512)
4322 break;
4323 }
4324 fsync(destfd[i]);
4325 rv = 0;
4326 }
4327
4328 return rv;
4329 }
4330
4331 /* in 2.6.30, the value reported by sync_completed can be
4332 * less that it should be by one stripe.
4333 * This only happens when reshape hits sync_max and pauses.
4334 * So allow wait_backup to either extent sync_max further
4335 * than strictly necessary, or return before the
4336 * sync has got quite as far as we would really like.
4337 * This is what 'blocks2' is for.
4338 * The various caller give appropriate values so that
4339 * every works.
4340 */
4341 /* FIXME return value is often ignored */
4342 static int forget_backup(int dests, int *destfd,
4343 unsigned long long *destoffsets,
4344 int part)
4345 {
4346 /*
4347 * Erase backup 'part' (which is 0 or 1)
4348 */
4349 int i;
4350 int rv;
4351
4352 if (part) {
4353 bsb.arraystart2 = __cpu_to_le64(0);
4354 bsb.length2 = __cpu_to_le64(0);
4355 } else {
4356 bsb.arraystart = __cpu_to_le64(0);
4357 bsb.length = __cpu_to_le64(0);
4358 }
4359 bsb.mtime = __cpu_to_le64(time(0));
4360 rv = 0;
4361 for (i = 0; i < dests; i++) {
4362 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4363 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4364 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4365 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4366 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4367 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4368 destoffsets[i]-4096)
4369 rv = -1;
4370 if (rv == 0 &&
4371 write(destfd[i], &bsb, 512) != 512)
4372 rv = -1;
4373 fsync(destfd[i]);
4374 }
4375 return rv;
4376 }
4377
4378 static void fail(char *msg)
4379 {
4380 int rv;
4381 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4382 rv |= (write(2, "\n", 1) != 1);
4383 exit(rv ? 1 : 2);
4384 }
4385
4386 static char *abuf, *bbuf;
4387 static unsigned long long abuflen;
4388 static void validate(int afd, int bfd, unsigned long long offset)
4389 {
4390 /* check that the data in the backup against the array.
4391 * This is only used for regression testing and should not
4392 * be used while the array is active
4393 */
4394 if (afd < 0)
4395 return;
4396 lseek64(bfd, offset - 4096, 0);
4397 if (read(bfd, &bsb2, 512) != 512)
4398 fail("cannot read bsb");
4399 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4400 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4401 fail("first csum bad");
4402 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4403 fail("magic is bad");
4404 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4405 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4406 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4407 fail("second csum bad");
4408
4409 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4410 fail("devstart is wrong");
4411
4412 if (bsb2.length) {
4413 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4414
4415 if (abuflen < len) {
4416 free(abuf);
4417 free(bbuf);
4418 abuflen = len;
4419 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4420 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4421 abuflen = 0;
4422 /* just stop validating on mem-alloc failure */
4423 return;
4424 }
4425 }
4426
4427 lseek64(bfd, offset, 0);
4428 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4429 //printf("len %llu\n", len);
4430 fail("read first backup failed");
4431 }
4432 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4433 if ((unsigned long long)read(afd, abuf, len) != len)
4434 fail("read first from array failed");
4435 if (memcmp(bbuf, abuf, len) != 0) {
4436 #if 0
4437 int i;
4438 printf("offset=%llu len=%llu\n",
4439 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4440 for (i=0; i<len; i++)
4441 if (bbuf[i] != abuf[i]) {
4442 printf("first diff byte %d\n", i);
4443 break;
4444 }
4445 #endif
4446 fail("data1 compare failed");
4447 }
4448 }
4449 if (bsb2.length2) {
4450 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4451
4452 if (abuflen < len) {
4453 free(abuf);
4454 free(bbuf);
4455 abuflen = len;
4456 abuf = xmalloc(abuflen);
4457 bbuf = xmalloc(abuflen);
4458 }
4459
4460 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4461 if ((unsigned long long)read(bfd, bbuf, len) != len)
4462 fail("read second backup failed");
4463 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4464 if ((unsigned long long)read(afd, abuf, len) != len)
4465 fail("read second from array failed");
4466 if (memcmp(bbuf, abuf, len) != 0)
4467 fail("data2 compare failed");
4468 }
4469 }
4470
4471 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4472 struct supertype *st, unsigned long blocks,
4473 int *fds, unsigned long long *offsets,
4474 int dests, int *destfd, unsigned long long *destoffsets)
4475 {
4476 /* Monitor a reshape where backup is being performed using
4477 * 'native' mechanism - either to a backup file, or
4478 * to some space in a spare.
4479 */
4480 char *buf;
4481 int degraded = -1;
4482 unsigned long long speed;
4483 unsigned long long suspend_point, array_size;
4484 unsigned long long backup_point, wait_point;
4485 unsigned long long reshape_completed;
4486 int done = 0;
4487 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4488 int part = 0; /* The next part of the backup area to fill. It may already
4489 * be full, so we need to check */
4490 int level = reshape->level;
4491 int layout = reshape->before.layout;
4492 int data = reshape->before.data_disks;
4493 int disks = reshape->before.data_disks + reshape->parity;
4494 int chunk = sra->array.chunk_size;
4495 struct mdinfo *sd;
4496 unsigned long stripes;
4497 int uuid[4];
4498 int frozen = 0;
4499
4500 /* set up the backup-super-block. This requires the
4501 * uuid from the array.
4502 */
4503 /* Find a superblock */
4504 for (sd = sra->devs; sd; sd = sd->next) {
4505 char *dn;
4506 int devfd;
4507 int ok;
4508 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4509 continue;
4510 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4511 devfd = dev_open(dn, O_RDONLY);
4512 if (devfd < 0)
4513 continue;
4514 ok = st->ss->load_super(st, devfd, NULL);
4515 close(devfd);
4516 if (ok == 0)
4517 break;
4518 }
4519 if (!sd) {
4520 pr_err("Cannot find a superblock\n");
4521 return 0;
4522 }
4523
4524 memset(&bsb, 0, 512);
4525 memcpy(bsb.magic, "md_backup_data-1", 16);
4526 st->ss->uuid_from_super(st, uuid);
4527 memcpy(bsb.set_uuid, uuid, 16);
4528 bsb.mtime = __cpu_to_le64(time(0));
4529 bsb.devstart2 = blocks;
4530
4531 stripes = blocks / (sra->array.chunk_size/512) /
4532 reshape->before.data_disks;
4533
4534 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4535 /* Don't start the 'reshape' */
4536 return 0;
4537 if (reshape->before.data_disks == reshape->after.data_disks) {
4538 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4539 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4540 }
4541
4542 if (increasing) {
4543 array_size = sra->component_size * reshape->after.data_disks;
4544 backup_point = sra->reshape_progress;
4545 suspend_point = 0;
4546 } else {
4547 array_size = sra->component_size * reshape->before.data_disks;
4548 backup_point = reshape->backup_blocks;
4549 suspend_point = array_size;
4550 }
4551
4552 while (!done) {
4553 int rv;
4554
4555 /* Want to return as soon the oldest backup slot can
4556 * be released as that allows us to start backing up
4557 * some more, providing suspend_point has been
4558 * advanced, which it should have.
4559 */
4560 if (increasing) {
4561 wait_point = array_size;
4562 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4563 wait_point = (__le64_to_cpu(bsb.arraystart) +
4564 __le64_to_cpu(bsb.length));
4565 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4566 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4567 __le64_to_cpu(bsb.length2));
4568 } else {
4569 wait_point = 0;
4570 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4571 wait_point = __le64_to_cpu(bsb.arraystart);
4572 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4573 wait_point = __le64_to_cpu(bsb.arraystart2);
4574 }
4575
4576 reshape_completed = sra->reshape_progress;
4577 rv = progress_reshape(sra, reshape,
4578 backup_point, wait_point,
4579 &suspend_point, &reshape_completed,
4580 &frozen);
4581 /* external metadata would need to ping_monitor here */
4582 sra->reshape_progress = reshape_completed;
4583
4584 /* Clear any backup region that is before 'here' */
4585 if (increasing) {
4586 if (__le64_to_cpu(bsb.length) > 0 &&
4587 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4588 __le64_to_cpu(bsb.length)))
4589 forget_backup(dests, destfd,
4590 destoffsets, 0);
4591 if (__le64_to_cpu(bsb.length2) > 0 &&
4592 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4593 __le64_to_cpu(bsb.length2)))
4594 forget_backup(dests, destfd,
4595 destoffsets, 1);
4596 } else {
4597 if (__le64_to_cpu(bsb.length) > 0 &&
4598 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4599 forget_backup(dests, destfd,
4600 destoffsets, 0);
4601 if (__le64_to_cpu(bsb.length2) > 0 &&
4602 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4603 forget_backup(dests, destfd,
4604 destoffsets, 1);
4605 }
4606 if (sigterm)
4607 rv = -2;
4608 if (rv < 0) {
4609 if (rv == -1)
4610 done = 1;
4611 break;
4612 }
4613 if (rv == 0 && increasing && !st->ss->external) {
4614 /* No longer need to monitor this reshape */
4615 sysfs_set_str(sra, NULL, "sync_max", "max");
4616 done = 1;
4617 break;
4618 }
4619
4620 while (rv) {
4621 unsigned long long offset;
4622 unsigned long actual_stripes;
4623 /* Need to backup some data.
4624 * If 'part' is not used and the desired
4625 * backup size is suspended, do a backup,
4626 * then consider the next part.
4627 */
4628 /* Check that 'part' is unused */
4629 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4630 break;
4631 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4632 break;
4633
4634 offset = backup_point / data;
4635 actual_stripes = stripes;
4636 if (increasing) {
4637 if (offset + actual_stripes * (chunk/512) >
4638 sra->component_size)
4639 actual_stripes = ((sra->component_size - offset)
4640 / (chunk/512));
4641 if (offset + actual_stripes * (chunk/512) >
4642 suspend_point/data)
4643 break;
4644 } else {
4645 if (offset < actual_stripes * (chunk/512))
4646 actual_stripes = offset / (chunk/512);
4647 offset -= actual_stripes * (chunk/512);
4648 if (offset < suspend_point/data)
4649 break;
4650 }
4651 if (actual_stripes == 0)
4652 break;
4653 grow_backup(sra, offset, actual_stripes,
4654 fds, offsets,
4655 disks, chunk, level, layout,
4656 dests, destfd, destoffsets,
4657 part, &degraded, buf);
4658 validate(afd, destfd[0], destoffsets[0]);
4659 /* record where 'part' is up to */
4660 part = !part;
4661 if (increasing)
4662 backup_point += actual_stripes * (chunk/512) * data;
4663 else
4664 backup_point -= actual_stripes * (chunk/512) * data;
4665 }
4666 }
4667
4668 /* FIXME maybe call progress_reshape one more time instead */
4669 /* remove any remaining suspension */
4670 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4671 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4672 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4673 sysfs_set_num(sra, NULL, "sync_min", 0);
4674
4675 if (reshape->before.data_disks == reshape->after.data_disks)
4676 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4677 free(buf);
4678 return done;
4679 }
4680
4681 /*
4682 * If any spare contains md_back_data-1 which is recent wrt mtime,
4683 * write that data into the array and update the super blocks with
4684 * the new reshape_progress
4685 */
4686 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4687 char *backup_file, int verbose)
4688 {
4689 int i, j;
4690 int old_disks;
4691 unsigned long long *offsets;
4692 unsigned long long nstripe, ostripe;
4693 int ndata, odata;
4694
4695 odata = info->array.raid_disks - info->delta_disks - 1;
4696 if (info->array.level == 6) odata--; /* number of data disks */
4697 ndata = info->array.raid_disks - 1;
4698 if (info->new_level == 6) ndata--;
4699
4700 old_disks = info->array.raid_disks - info->delta_disks;
4701
4702 if (info->delta_disks <= 0)
4703 /* Didn't grow, so the backup file must have
4704 * been used
4705 */
4706 old_disks = cnt;
4707 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4708 struct mdinfo dinfo;
4709 int fd;
4710 int bsbsize;
4711 char *devname, namebuf[20];
4712 unsigned long long lo, hi;
4713
4714 /* This was a spare and may have some saved data on it.
4715 * Load the superblock, find and load the
4716 * backup_super_block.
4717 * If either fail, go on to next device.
4718 * If the backup contains no new info, just return
4719 * else restore data and update all superblocks
4720 */
4721 if (i == old_disks-1) {
4722 fd = open(backup_file, O_RDONLY);
4723 if (fd<0) {
4724 pr_err("backup file %s inaccessible: %s\n",
4725 backup_file, strerror(errno));
4726 continue;
4727 }
4728 devname = backup_file;
4729 } else {
4730 fd = fdlist[i];
4731 if (fd < 0)
4732 continue;
4733 if (st->ss->load_super(st, fd, NULL))
4734 continue;
4735
4736 st->ss->getinfo_super(st, &dinfo, NULL);
4737 st->ss->free_super(st);
4738
4739 if (lseek64(fd,
4740 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4741 0) < 0) {
4742 pr_err("Cannot seek on device %d\n", i);
4743 continue; /* Cannot seek */
4744 }
4745 sprintf(namebuf, "device-%d", i);
4746 devname = namebuf;
4747 }
4748 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4749 if (verbose)
4750 pr_err("Cannot read from %s\n", devname);
4751 continue; /* Cannot read */
4752 }
4753 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4754 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4755 if (verbose)
4756 pr_err("No backup metadata on %s\n", devname);
4757 continue;
4758 }
4759 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4760 if (verbose)
4761 pr_err("Bad backup-metadata checksum on %s\n", devname);
4762 continue; /* bad checksum */
4763 }
4764 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4765 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4766 if (verbose)
4767 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4768 continue; /* Bad second checksum */
4769 }
4770 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4771 if (verbose)
4772 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4773 continue; /* Wrong uuid */
4774 }
4775
4776 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4777 * sometimes they aren't... So allow considerable flexability in matching, and allow
4778 * this test to be overridden by an environment variable.
4779 */
4780 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4781 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4782 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4783 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4784 (unsigned long)__le64_to_cpu(bsb.mtime),
4785 (unsigned long)info->array.utime);
4786 } else {
4787 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4788 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4789 continue; /* time stamp is too bad */
4790 }
4791 }
4792
4793 if (bsb.magic[15] == '1') {
4794 if (bsb.length == 0)
4795 continue;
4796 if (info->delta_disks >= 0) {
4797 /* reshape_progress is increasing */
4798 if (__le64_to_cpu(bsb.arraystart)
4799 + __le64_to_cpu(bsb.length)
4800 < info->reshape_progress) {
4801 nonew:
4802 if (verbose)
4803 pr_err("backup-metadata found on %s but is not needed\n", devname);
4804 continue; /* No new data here */
4805 }
4806 } else {
4807 /* reshape_progress is decreasing */
4808 if (__le64_to_cpu(bsb.arraystart) >=
4809 info->reshape_progress)
4810 goto nonew; /* No new data here */
4811 }
4812 } else {
4813 if (bsb.length == 0 && bsb.length2 == 0)
4814 continue;
4815 if (info->delta_disks >= 0) {
4816 /* reshape_progress is increasing */
4817 if ((__le64_to_cpu(bsb.arraystart)
4818 + __le64_to_cpu(bsb.length)
4819 < info->reshape_progress) &&
4820 (__le64_to_cpu(bsb.arraystart2)
4821 + __le64_to_cpu(bsb.length2)
4822 < info->reshape_progress))
4823 goto nonew; /* No new data here */
4824 } else {
4825 /* reshape_progress is decreasing */
4826 if (__le64_to_cpu(bsb.arraystart) >=
4827 info->reshape_progress &&
4828 __le64_to_cpu(bsb.arraystart2) >=
4829 info->reshape_progress)
4830 goto nonew; /* No new data here */
4831 }
4832 }
4833 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4834 second_fail:
4835 if (verbose)
4836 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4837 devname);
4838 continue; /* Cannot seek */
4839 }
4840 /* There should be a duplicate backup superblock 4k before here */
4841 if (lseek64(fd, -4096, 1) < 0 ||
4842 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4843 goto second_fail; /* Cannot find leading superblock */
4844 if (bsb.magic[15] == '1')
4845 bsbsize = offsetof(struct mdp_backup_super, pad1);
4846 else
4847 bsbsize = offsetof(struct mdp_backup_super, pad);
4848 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4849 goto second_fail; /* Cannot find leading superblock */
4850
4851 /* Now need the data offsets for all devices. */
4852 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4853 for(j=0; j<info->array.raid_disks; j++) {
4854 if (fdlist[j] < 0)
4855 continue;
4856 if (st->ss->load_super(st, fdlist[j], NULL))
4857 /* FIXME should be this be an error */
4858 continue;
4859 st->ss->getinfo_super(st, &dinfo, NULL);
4860 st->ss->free_super(st);
4861 offsets[j] = dinfo.data_offset * 512;
4862 }
4863 printf("%s: restoring critical section\n", Name);
4864
4865 if (restore_stripes(fdlist, offsets,
4866 info->array.raid_disks,
4867 info->new_chunk,
4868 info->new_level,
4869 info->new_layout,
4870 fd, __le64_to_cpu(bsb.devstart)*512,
4871 __le64_to_cpu(bsb.arraystart)*512,
4872 __le64_to_cpu(bsb.length)*512, NULL)) {
4873 /* didn't succeed, so giveup */
4874 if (verbose)
4875 pr_err("Error restoring backup from %s\n",
4876 devname);
4877 free(offsets);
4878 return 1;
4879 }
4880
4881 if (bsb.magic[15] == '2' &&
4882 restore_stripes(fdlist, offsets,
4883 info->array.raid_disks,
4884 info->new_chunk,
4885 info->new_level,
4886 info->new_layout,
4887 fd, __le64_to_cpu(bsb.devstart)*512 +
4888 __le64_to_cpu(bsb.devstart2)*512,
4889 __le64_to_cpu(bsb.arraystart2)*512,
4890 __le64_to_cpu(bsb.length2)*512, NULL)) {
4891 /* didn't succeed, so giveup */
4892 if (verbose)
4893 pr_err("Error restoring second backup from %s\n",
4894 devname);
4895 free(offsets);
4896 return 1;
4897 }
4898
4899 free(offsets);
4900
4901 /* Ok, so the data is restored. Let's update those superblocks. */
4902
4903 lo = hi = 0;
4904 if (bsb.length) {
4905 lo = __le64_to_cpu(bsb.arraystart);
4906 hi = lo + __le64_to_cpu(bsb.length);
4907 }
4908 if (bsb.magic[15] == '2' && bsb.length2) {
4909 unsigned long long lo1, hi1;
4910 lo1 = __le64_to_cpu(bsb.arraystart2);
4911 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4912 if (lo == hi) {
4913 lo = lo1;
4914 hi = hi1;
4915 } else if (lo < lo1)
4916 hi = hi1;
4917 else
4918 lo = lo1;
4919 }
4920 if (lo < hi &&
4921 (info->reshape_progress < lo ||
4922 info->reshape_progress > hi))
4923 /* backup does not affect reshape_progress*/ ;
4924 else if (info->delta_disks >= 0) {
4925 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4926 __le64_to_cpu(bsb.length);
4927 if (bsb.magic[15] == '2') {
4928 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4929 __le64_to_cpu(bsb.length2);
4930 if (p2 > info->reshape_progress)
4931 info->reshape_progress = p2;
4932 }
4933 } else {
4934 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4935 if (bsb.magic[15] == '2') {
4936 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4937 if (p2 < info->reshape_progress)
4938 info->reshape_progress = p2;
4939 }
4940 }
4941 for (j=0; j<info->array.raid_disks; j++) {
4942 if (fdlist[j] < 0)
4943 continue;
4944 if (st->ss->load_super(st, fdlist[j], NULL))
4945 continue;
4946 st->ss->getinfo_super(st, &dinfo, NULL);
4947 dinfo.reshape_progress = info->reshape_progress;
4948 st->ss->update_super(st, &dinfo,
4949 "_reshape_progress",
4950 NULL,0, 0, NULL);
4951 st->ss->store_super(st, fdlist[j]);
4952 st->ss->free_super(st);
4953 }
4954 return 0;
4955 }
4956 /* Didn't find any backup data, try to see if any
4957 * was needed.
4958 */
4959 if (info->delta_disks < 0) {
4960 /* When shrinking, the critical section is at the end.
4961 * So see if we are before the critical section.
4962 */
4963 unsigned long long first_block;
4964 nstripe = ostripe = 0;
4965 first_block = 0;
4966 while (ostripe >= nstripe) {
4967 ostripe += info->array.chunk_size / 512;
4968 first_block = ostripe * odata;
4969 nstripe = first_block / ndata / (info->new_chunk/512) *
4970 (info->new_chunk/512);
4971 }
4972
4973 if (info->reshape_progress >= first_block)
4974 return 0;
4975 }
4976 if (info->delta_disks > 0) {
4977 /* See if we are beyond the critical section. */
4978 unsigned long long last_block;
4979 nstripe = ostripe = 0;
4980 last_block = 0;
4981 while (nstripe >= ostripe) {
4982 nstripe += info->new_chunk / 512;
4983 last_block = nstripe * ndata;
4984 ostripe = last_block / odata / (info->array.chunk_size/512) *
4985 (info->array.chunk_size/512);
4986 }
4987
4988 if (info->reshape_progress >= last_block)
4989 return 0;
4990 }
4991 /* needed to recover critical section! */
4992 if (verbose)
4993 pr_err("Failed to find backup of critical section\n");
4994 return 1;
4995 }
4996
4997 int Grow_continue_command(char *devname, int fd,
4998 char *backup_file, int verbose)
4999 {
5000 int ret_val = 0;
5001 struct supertype *st = NULL;
5002 struct mdinfo *content = NULL;
5003 struct mdinfo array;
5004 char *subarray = NULL;
5005 struct mdinfo *cc = NULL;
5006 struct mdstat_ent *mdstat = NULL;
5007 int cfd = -1;
5008 int fd2;
5009
5010 dprintf("Grow continue from command line called for %s\n",
5011 devname);
5012
5013 st = super_by_fd(fd, &subarray);
5014 if (!st || !st->ss) {
5015 pr_err("Unable to determine metadata format for %s\n",
5016 devname);
5017 return 1;
5018 }
5019 dprintf("Grow continue is run for ");
5020 if (st->ss->external == 0) {
5021 int d;
5022 int cnt = 5;
5023 dprintf_cont("native array (%s)\n", devname);
5024 if (md_get_array_info(fd, &array.array) < 0) {
5025 pr_err("%s is not an active md array - aborting\n",
5026 devname);
5027 ret_val = 1;
5028 goto Grow_continue_command_exit;
5029 }
5030 content = &array;
5031 sysfs_init(content, fd, NULL);
5032 /* Need to load a superblock.
5033 * FIXME we should really get what we need from
5034 * sysfs
5035 */
5036 do {
5037 for (d = 0; d < MAX_DISKS; d++) {
5038 mdu_disk_info_t disk;
5039 char *dv;
5040 int err;
5041 disk.number = d;
5042 if (md_get_disk_info(fd, &disk) < 0)
5043 continue;
5044 if (disk.major == 0 && disk.minor == 0)
5045 continue;
5046 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5047 continue;
5048 dv = map_dev(disk.major, disk.minor, 1);
5049 if (!dv)
5050 continue;
5051 fd2 = dev_open(dv, O_RDONLY);
5052 if (fd2 < 0)
5053 continue;
5054 err = st->ss->load_super(st, fd2, NULL);
5055 close(fd2);
5056 if (err)
5057 continue;
5058 break;
5059 }
5060 if (d == MAX_DISKS) {
5061 pr_err("Unable to load metadata for %s\n",
5062 devname);
5063 ret_val = 1;
5064 goto Grow_continue_command_exit;
5065 }
5066 st->ss->getinfo_super(st, content, NULL);
5067 if (!content->reshape_active)
5068 sleep(3);
5069 else
5070 break;
5071 } while (cnt-- > 0);
5072 } else {
5073 char *container;
5074
5075 if (subarray) {
5076 dprintf_cont("subarray (%s)\n", subarray);
5077 container = st->container_devnm;
5078 cfd = open_dev_excl(st->container_devnm);
5079 } else {
5080 container = st->devnm;
5081 close(fd);
5082 cfd = open_dev_excl(st->devnm);
5083 dprintf_cont("container (%s)\n", container);
5084 fd = cfd;
5085 }
5086 if (cfd < 0) {
5087 pr_err("Unable to open container for %s\n", devname);
5088 ret_val = 1;
5089 goto Grow_continue_command_exit;
5090 }
5091
5092 /* find in container array under reshape
5093 */
5094 ret_val = st->ss->load_container(st, cfd, NULL);
5095 if (ret_val) {
5096 pr_err("Cannot read superblock for %s\n",
5097 devname);
5098 ret_val = 1;
5099 goto Grow_continue_command_exit;
5100 }
5101
5102 cc = st->ss->container_content(st, subarray);
5103 for (content = cc; content ; content = content->next) {
5104 char *array_name;
5105 int allow_reshape = 1;
5106
5107 if (content->reshape_active == 0)
5108 continue;
5109 /* The decision about array or container wide
5110 * reshape is taken in Grow_continue based
5111 * content->reshape_active state, therefore we
5112 * need to check_reshape based on
5113 * reshape_active and subarray name
5114 */
5115 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5116 allow_reshape = 0;
5117 if (content->reshape_active == CONTAINER_RESHAPE &&
5118 (content->array.state
5119 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5120 allow_reshape = 0;
5121
5122 if (!allow_reshape) {
5123 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5124 devname, container);
5125 ret_val = 1;
5126 goto Grow_continue_command_exit;
5127 }
5128
5129 array_name = strchr(content->text_version+1, '/')+1;
5130 mdstat = mdstat_by_subdev(array_name, container);
5131 if (!mdstat)
5132 continue;
5133 if (mdstat->active == 0) {
5134 pr_err("Skipping inactive array %s.\n",
5135 mdstat->devnm);
5136 free_mdstat(mdstat);
5137 mdstat = NULL;
5138 continue;
5139 }
5140 break;
5141 }
5142 if (!content) {
5143 pr_err("Unable to determine reshaped array for %s\n", devname);
5144 ret_val = 1;
5145 goto Grow_continue_command_exit;
5146 }
5147 fd2 = open_dev(mdstat->devnm);
5148 if (fd2 < 0) {
5149 pr_err("cannot open (%s)\n", mdstat->devnm);
5150 ret_val = 1;
5151 goto Grow_continue_command_exit;
5152 }
5153
5154 if (sysfs_init(content, fd2, mdstat->devnm)) {
5155 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5156 mdstat->devnm);
5157 ret_val = 1;
5158 close(fd2);
5159 goto Grow_continue_command_exit;
5160 }
5161
5162 close(fd2);
5163
5164 /* start mdmon in case it is not running
5165 */
5166 if (!mdmon_running(container))
5167 start_mdmon(container);
5168 ping_monitor(container);
5169
5170 if (mdmon_running(container))
5171 st->update_tail = &st->updates;
5172 else {
5173 pr_err("No mdmon found. Grow cannot continue.\n");
5174 ret_val = 1;
5175 goto Grow_continue_command_exit;
5176 }
5177 }
5178
5179 /* verify that array under reshape is started from
5180 * correct position
5181 */
5182 if (verify_reshape_position(content, content->array.level) < 0) {
5183 ret_val = 1;
5184 goto Grow_continue_command_exit;
5185 }
5186
5187 /* continue reshape
5188 */
5189 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5190
5191 Grow_continue_command_exit:
5192 if (cfd > -1)
5193 close(cfd);
5194 st->ss->free_super(st);
5195 free_mdstat(mdstat);
5196 sysfs_free(cc);
5197 free(subarray);
5198
5199 return ret_val;
5200 }
5201
5202 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5203 char *backup_file, int forked, int freeze_reshape)
5204 {
5205 int ret_val = 2;
5206
5207 if (!info->reshape_active)
5208 return ret_val;
5209
5210 if (st->ss->external) {
5211 int cfd = open_dev(st->container_devnm);
5212
5213 if (cfd < 0)
5214 return 1;
5215
5216 st->ss->load_container(st, cfd, st->container_devnm);
5217 close(cfd);
5218 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5219 st, info, 0, backup_file,
5220 0, forked,
5221 1 | info->reshape_active,
5222 freeze_reshape);
5223 } else
5224 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5225 NULL, INVALID_SECTORS,
5226 backup_file, 0, forked,
5227 1 | info->reshape_active,
5228 freeze_reshape);
5229
5230 return ret_val;
5231 }
5232
5233 char *make_backup(char *name)
5234 {
5235 char *base = "backup_file-";
5236 int len;
5237 char *fname;
5238
5239 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5240 fname = xmalloc(len);
5241 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5242 return fname;
5243 }
5244
5245 char *locate_backup(char *name)
5246 {
5247 char *fl = make_backup(name);
5248 struct stat stb;
5249
5250 if (stat(fl, &stb) == 0 &&
5251 S_ISREG(stb.st_mode))
5252 return fl;
5253
5254 free(fl);
5255 return NULL;
5256 }