]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
mdadm/Grow: Fix use after close bug by closing after fork
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <sys/wait.h>
30
31 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
32 #error no endian defined
33 #endif
34 #include "md_u.h"
35 #include "md_p.h"
36
37 int restore_backup(struct supertype *st,
38 struct mdinfo *content,
39 int working_disks,
40 int next_spare,
41 char **backup_filep,
42 int verbose)
43 {
44 int i;
45 int *fdlist;
46 struct mdinfo *dev;
47 int err;
48 int disk_count = next_spare + working_disks;
49 char *backup_file = *backup_filep;
50
51 dprintf("Called restore_backup()\n");
52 fdlist = xmalloc(sizeof(int) * disk_count);
53
54 enable_fds(next_spare);
55 for (i = 0; i < next_spare; i++)
56 fdlist[i] = -1;
57 for (dev = content->devs; dev; dev = dev->next) {
58 char buf[22];
59 int fd;
60
61 sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor);
62 fd = dev_open(buf, O_RDWR);
63
64 if (dev->disk.raid_disk >= 0)
65 fdlist[dev->disk.raid_disk] = fd;
66 else
67 fdlist[next_spare++] = fd;
68 }
69
70 if (!backup_file) {
71 backup_file = locate_backup(content->sys_name);
72 *backup_filep = backup_file;
73 }
74
75 if (st->ss->external && st->ss->recover_backup)
76 err = st->ss->recover_backup(st, content);
77 else
78 err = Grow_restart(st, content, fdlist, next_spare,
79 backup_file, verbose > 0);
80
81 while (next_spare > 0) {
82 next_spare--;
83 if (fdlist[next_spare] >= 0)
84 close(fdlist[next_spare]);
85 }
86 free(fdlist);
87 if (err) {
88 pr_err("Failed to restore critical section for reshape - sorry.\n");
89 if (!backup_file)
90 pr_err("Possibly you need to specify a --backup-file\n");
91 return 1;
92 }
93
94 dprintf("restore_backup() returns status OK.\n");
95 return 0;
96 }
97
98 int Grow_Add_device(char *devname, int fd, char *newdev)
99 {
100 /* Add a device to an active array.
101 * Currently, just extend a linear array.
102 * This requires writing a new superblock on the
103 * new device, calling the kernel to add the device,
104 * and if that succeeds, update the superblock on
105 * all other devices.
106 * This means that we need to *find* all other devices.
107 */
108 struct mdinfo info;
109
110 dev_t rdev;
111 int nfd, fd2;
112 int d, nd;
113 struct supertype *st = NULL;
114 char *subarray = NULL;
115
116 if (md_get_array_info(fd, &info.array) < 0) {
117 pr_err("cannot get array info for %s\n", devname);
118 return 1;
119 }
120
121 if (info.array.level != -1) {
122 pr_err("can only add devices to linear arrays\n");
123 return 1;
124 }
125
126 st = super_by_fd(fd, &subarray);
127 if (!st) {
128 pr_err("cannot handle arrays with superblock version %d\n",
129 info.array.major_version);
130 return 1;
131 }
132
133 if (subarray) {
134 pr_err("Cannot grow linear sub-arrays yet\n");
135 free(subarray);
136 free(st);
137 return 1;
138 }
139
140 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
141 if (nfd < 0) {
142 pr_err("cannot open %s\n", newdev);
143 free(st);
144 return 1;
145 }
146 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
147 close(nfd);
148 free(st);
149 return 1;
150 }
151 /* now check out all the devices and make sure we can read the
152 * superblock */
153 for (d=0 ; d < info.array.raid_disks ; d++) {
154 mdu_disk_info_t disk;
155 char *dv;
156
157 st->ss->free_super(st);
158
159 disk.number = d;
160 if (md_get_disk_info(fd, &disk) < 0) {
161 pr_err("cannot get device detail for device %d\n", d);
162 close(nfd);
163 free(st);
164 return 1;
165 }
166 dv = map_dev(disk.major, disk.minor, 1);
167 if (!dv) {
168 pr_err("cannot find device file for device %d\n", d);
169 close(nfd);
170 free(st);
171 return 1;
172 }
173 fd2 = dev_open(dv, O_RDWR);
174 if (fd2 < 0) {
175 pr_err("cannot open device file %s\n", dv);
176 close(nfd);
177 free(st);
178 return 1;
179 }
180
181 if (st->ss->load_super(st, fd2, NULL)) {
182 pr_err("cannot find super block on %s\n", dv);
183 close(nfd);
184 close(fd2);
185 free(st);
186 return 1;
187 }
188 close(fd2);
189 }
190 /* Ok, looks good. Lets update the superblock and write it out to
191 * newdev.
192 */
193
194 info.disk.number = d;
195 info.disk.major = major(rdev);
196 info.disk.minor = minor(rdev);
197 info.disk.raid_disk = d;
198 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
199 if (st->ss->update_super(st, &info, "linear-grow-new", newdev,
200 0, 0, NULL) != 0) {
201 pr_err("Preparing new metadata failed on %s\n", newdev);
202 close(nfd);
203 return 1;
204 }
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n", newdev);
208 close(nfd);
209 return 1;
210 }
211 close(nfd);
212
213 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
214 pr_err("Cannot add new disk to this array\n");
215 return 1;
216 }
217 /* Well, that seems to have worked.
218 * Now go through and update all superblocks
219 */
220
221 if (md_get_array_info(fd, &info.array) < 0) {
222 pr_err("cannot get array info for %s\n", devname);
223 return 1;
224 }
225
226 nd = d;
227 for (d=0 ; d < info.array.raid_disks ; d++) {
228 mdu_disk_info_t disk;
229 char *dv;
230
231 disk.number = d;
232 if (md_get_disk_info(fd, &disk) < 0) {
233 pr_err("cannot get device detail for device %d\n", d);
234 return 1;
235 }
236 dv = map_dev(disk.major, disk.minor, 1);
237 if (!dv) {
238 pr_err("cannot find device file for device %d\n", d);
239 return 1;
240 }
241 fd2 = dev_open(dv, O_RDWR);
242 if (fd2 < 0) {
243 pr_err("cannot open device file %s\n", dv);
244 return 1;
245 }
246 if (st->ss->load_super(st, fd2, NULL)) {
247 pr_err("cannot find super block on %s\n", dv);
248 close(fd);
249 close(fd2);
250 return 1;
251 }
252 info.array.raid_disks = nd+1;
253 info.array.nr_disks = nd+1;
254 info.array.active_disks = nd+1;
255 info.array.working_disks = nd+1;
256
257 if (st->ss->update_super(st, &info, "linear-grow-update", dv,
258 0, 0, NULL) != 0) {
259 pr_err("Updating metadata failed on %s\n", dv);
260 close(fd2);
261 return 1;
262 }
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 unsigned long long bitmapsize, array_size;
292 struct mdinfo *mdi;
293
294 /*
295 * We only ever get called if s->bitmap_file is != NULL, so this check
296 * is just here to quiet down static code checkers.
297 */
298 if (!s->bitmap_file)
299 return 1;
300
301 if (strcmp(s->bitmap_file, "clustered") == 0)
302 major = BITMAP_MAJOR_CLUSTERED;
303
304 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
305 if (errno == ENOMEM)
306 pr_err("Memory allocation failure.\n");
307 else
308 pr_err("bitmaps not supported by this kernel.\n");
309 return 1;
310 }
311 if (bmf.pathname[0]) {
312 if (strcmp(s->bitmap_file,"none") == 0) {
313 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
314 pr_err("failed to remove bitmap %s\n",
315 bmf.pathname);
316 return 1;
317 }
318 return 0;
319 }
320 pr_err("%s already has a bitmap (%s)\n", devname, bmf.pathname);
321 return 1;
322 }
323 if (md_get_array_info(fd, &array) != 0) {
324 pr_err("cannot get array status for %s\n", devname);
325 return 1;
326 }
327 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
328 if (strcmp(s->bitmap_file, "none")==0) {
329 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
330 if (md_set_array_info(fd, &array) != 0) {
331 if (array.state & (1 << MD_SB_CLUSTERED))
332 pr_err("failed to remove clustered bitmap.\n");
333 else
334 pr_err("failed to remove internal bitmap.\n");
335 return 1;
336 }
337 return 0;
338 }
339 pr_err("bitmap already present on %s\n", devname);
340 return 1;
341 }
342
343 if (strcmp(s->bitmap_file, "none") == 0) {
344 pr_err("no bitmap found on %s\n", devname);
345 return 1;
346 }
347 if (array.level <= 0) {
348 pr_err("Bitmaps not meaningful with level %s\n",
349 map_num(pers, array.level)?:"of this array");
350 return 1;
351 }
352 bitmapsize = array.size;
353 bitmapsize <<= 1;
354 if (get_dev_size(fd, NULL, &array_size) &&
355 array_size > (0x7fffffffULL << 9)) {
356 /* Array is big enough that we cannot trust array.size
357 * try other approaches
358 */
359 bitmapsize = get_component_size(fd);
360 }
361 if (bitmapsize == 0) {
362 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
363 return 1;
364 }
365
366 if (array.level == 10) {
367 int ncopies;
368
369 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
370 bitmapsize = bitmapsize * array.raid_disks / ncopies;
371
372 if (strcmp(s->bitmap_file, "clustered") == 0 &&
373 !is_near_layout_10(array.layout)) {
374 pr_err("only near layout is supported with clustered raid10\n");
375 return 1;
376 }
377 }
378
379 st = super_by_fd(fd, &subarray);
380 if (!st) {
381 pr_err("Cannot understand version %d.%d\n",
382 array.major_version, array.minor_version);
383 return 1;
384 }
385 if (subarray) {
386 pr_err("Cannot add bitmaps to sub-arrays yet\n");
387 free(subarray);
388 free(st);
389 return 1;
390 }
391
392 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
393 if (mdi) {
394 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
395 pr_err("Cannot add bitmap to array with PPL\n");
396 free(mdi);
397 free(st);
398 return 1;
399 }
400 free(mdi);
401 }
402
403 if (strcmp(s->bitmap_file, "internal") == 0 ||
404 strcmp(s->bitmap_file, "clustered") == 0) {
405 int rv;
406 int d;
407 int offset_setable = 0;
408 if (st->ss->add_internal_bitmap == NULL) {
409 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
410 return 1;
411 }
412 st->nodes = c->nodes;
413 st->cluster_name = c->homecluster;
414 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
415 if (mdi)
416 offset_setable = 1;
417 for (d = 0; d < st->max_devs; d++) {
418 mdu_disk_info_t disk;
419 char *dv;
420 int fd2;
421
422 disk.number = d;
423 if (md_get_disk_info(fd, &disk) < 0)
424 continue;
425 if (disk.major == 0 && disk.minor == 0)
426 continue;
427 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
428 continue;
429 dv = map_dev(disk.major, disk.minor, 1);
430 if (!dv)
431 continue;
432 if (((disk.state & (1 << MD_DISK_WRITEMOSTLY)) == 0) &&
433 (strcmp(s->bitmap_file, "clustered") == 0)) {
434 pr_err("%s disks marked write-mostly are not supported with clustered bitmap\n",devname);
435 return 1;
436 }
437 fd2 = dev_open(dv, O_RDWR);
438 if (fd2 < 0)
439 continue;
440 rv = st->ss->load_super(st, fd2, NULL);
441 if (!rv) {
442 rv = st->ss->add_internal_bitmap(
443 st, &s->bitmap_chunk, c->delay,
444 s->write_behind, bitmapsize,
445 offset_setable, major);
446 if (!rv) {
447 st->ss->write_bitmap(st, fd2,
448 NodeNumUpdate);
449 } else {
450 pr_err("failed to create internal bitmap - chunksize problem.\n");
451 }
452 } else {
453 pr_err("failed to load super-block.\n");
454 }
455 close(fd2);
456 if (rv)
457 return 1;
458 }
459 if (offset_setable) {
460 st->ss->getinfo_super(st, mdi, NULL);
461 if (sysfs_init(mdi, fd, NULL)) {
462 pr_err("failed to initialize sysfs.\n");
463 free(mdi);
464 }
465 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
466 mdi->bitmap_offset);
467 free(mdi);
468 } else {
469 if (strcmp(s->bitmap_file, "clustered") == 0)
470 array.state |= (1 << MD_SB_CLUSTERED);
471 array.state |= (1 << MD_SB_BITMAP_PRESENT);
472 rv = md_set_array_info(fd, &array);
473 }
474 if (rv < 0) {
475 if (errno == EBUSY)
476 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
477 pr_err("failed to set internal bitmap.\n");
478 return 1;
479 }
480 } else {
481 int uuid[4];
482 int bitmap_fd;
483 int d;
484 int max_devs = st->max_devs;
485
486 /* try to load a superblock */
487 for (d = 0; d < max_devs; d++) {
488 mdu_disk_info_t disk;
489 char *dv;
490 int fd2;
491 disk.number = d;
492 if (md_get_disk_info(fd, &disk) < 0)
493 continue;
494 if ((disk.major==0 && disk.minor == 0) ||
495 (disk.state & (1 << MD_DISK_REMOVED)))
496 continue;
497 dv = map_dev(disk.major, disk.minor, 1);
498 if (!dv)
499 continue;
500 fd2 = dev_open(dv, O_RDONLY);
501 if (fd2 >= 0) {
502 if (st->ss->load_super(st, fd2, NULL) == 0) {
503 close(fd2);
504 st->ss->uuid_from_super(st, uuid);
505 break;
506 }
507 close(fd2);
508 }
509 }
510 if (d == max_devs) {
511 pr_err("cannot find UUID for array!\n");
512 return 1;
513 }
514 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
515 s->bitmap_chunk, c->delay, s->write_behind,
516 bitmapsize, major)) {
517 return 1;
518 }
519 bitmap_fd = open(s->bitmap_file, O_RDWR);
520 if (bitmap_fd < 0) {
521 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
522 return 1;
523 }
524 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
525 int err = errno;
526 if (errno == EBUSY)
527 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
528 pr_err("Cannot set bitmap file for %s: %s\n",
529 devname, strerror(err));
530 return 1;
531 }
532 }
533
534 return 0;
535 }
536
537 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
538 {
539 struct supertype *st;
540 struct mdinfo *sra;
541 struct mdinfo *sd;
542 char *subarray = NULL;
543 int ret = 0;
544 char container_dev[PATH_MAX];
545 char buf[20];
546
547 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
548 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
549 pr_err("Operation not supported for consistency policy %s\n",
550 map_num_s(consistency_policies, s->consistency_policy));
551 return 1;
552 }
553
554 st = super_by_fd(fd, &subarray);
555 if (!st)
556 return 1;
557
558 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
559 GET_DEVS|GET_STATE);
560 if (!sra) {
561 ret = 1;
562 goto free_st;
563 }
564
565 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
566 !st->ss->write_init_ppl) {
567 pr_err("%s metadata does not support PPL\n", st->ss->name);
568 ret = 1;
569 goto free_info;
570 }
571
572 if (sra->array.level != 5) {
573 pr_err("Operation not supported for array level %d\n",
574 sra->array.level);
575 ret = 1;
576 goto free_info;
577 }
578
579 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
580 pr_err("Consistency policy is already %s\n",
581 map_num_s(consistency_policies, s->consistency_policy));
582 ret = 1;
583 goto free_info;
584 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
585 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
586 pr_err("Current consistency policy is %s, cannot change to %s\n",
587 map_num_s(consistency_policies, sra->consistency_policy),
588 map_num_s(consistency_policies, s->consistency_policy));
589 ret = 1;
590 goto free_info;
591 }
592
593 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
594 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
595 ret = 1;
596 goto free_info;
597 } else if (strcmp(buf, "reshape\n") == 0) {
598 pr_err("PPL cannot be enabled when reshape is in progress\n");
599 ret = 1;
600 goto free_info;
601 }
602 }
603
604 if (subarray) {
605 char *update;
606
607 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
608 update = "ppl";
609 else
610 update = "no-ppl";
611
612 sprintf(container_dev, "/dev/%s", st->container_devnm);
613
614 ret = Update_subarray(container_dev, subarray, update, NULL,
615 c->verbose);
616 if (ret)
617 goto free_info;
618 }
619
620 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
621 struct mdinfo info;
622
623 if (subarray) {
624 struct mdinfo *mdi;
625 int cfd;
626
627 cfd = open(container_dev, O_RDWR|O_EXCL);
628 if (cfd < 0) {
629 pr_err("Failed to open %s\n", container_dev);
630 ret = 1;
631 goto free_info;
632 }
633
634 ret = st->ss->load_container(st, cfd, st->container_devnm);
635 close(cfd);
636
637 if (ret) {
638 pr_err("Cannot read superblock for %s\n",
639 container_dev);
640 goto free_info;
641 }
642
643 mdi = st->ss->container_content(st, subarray);
644 info = *mdi;
645 free(mdi);
646 }
647
648 for (sd = sra->devs; sd; sd = sd->next) {
649 int dfd;
650 char *devpath;
651
652 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
653 dfd = dev_open(devpath, O_RDWR);
654 if (dfd < 0) {
655 pr_err("Failed to open %s\n", devpath);
656 ret = 1;
657 goto free_info;
658 }
659
660 if (!subarray) {
661 ret = st->ss->load_super(st, dfd, NULL);
662 if (ret) {
663 pr_err("Failed to load super-block.\n");
664 close(dfd);
665 goto free_info;
666 }
667
668 ret = st->ss->update_super(st, sra, "ppl",
669 devname,
670 c->verbose, 0, NULL);
671 if (ret) {
672 close(dfd);
673 st->ss->free_super(st);
674 goto free_info;
675 }
676 st->ss->getinfo_super(st, &info, NULL);
677 }
678
679 ret |= sysfs_set_num(sra, sd, "ppl_sector",
680 info.ppl_sector);
681 ret |= sysfs_set_num(sra, sd, "ppl_size",
682 info.ppl_size);
683
684 if (ret) {
685 pr_err("Failed to set PPL attributes for %s\n",
686 sd->sys_name);
687 close(dfd);
688 st->ss->free_super(st);
689 goto free_info;
690 }
691
692 ret = st->ss->write_init_ppl(st, &info, dfd);
693 if (ret)
694 pr_err("Failed to write PPL\n");
695
696 close(dfd);
697
698 if (!subarray)
699 st->ss->free_super(st);
700
701 if (ret)
702 goto free_info;
703 }
704 }
705
706 ret = sysfs_set_str(sra, NULL, "consistency_policy",
707 map_num_s(consistency_policies,
708 s->consistency_policy));
709 if (ret)
710 pr_err("Failed to change array consistency policy\n");
711
712 free_info:
713 sysfs_free(sra);
714 free_st:
715 free(st);
716 free(subarray);
717
718 return ret;
719 }
720
721 /*
722 * When reshaping an array we might need to backup some data.
723 * This is written to all spares with a 'super_block' describing it.
724 * The superblock goes 4K from the end of the used space on the
725 * device.
726 * It if written after the backup is complete.
727 * It has the following structure.
728 */
729
730 static struct mdp_backup_super {
731 char magic[16]; /* md_backup_data-1 or -2 */
732 __u8 set_uuid[16];
733 __u64 mtime;
734 /* start/sizes in 512byte sectors */
735 __u64 devstart; /* address on backup device/file of data */
736 __u64 arraystart;
737 __u64 length;
738 __u32 sb_csum; /* csum of preceeding bytes. */
739 __u32 pad1;
740 __u64 devstart2; /* offset in to data of second section */
741 __u64 arraystart2;
742 __u64 length2;
743 __u32 sb_csum2; /* csum of preceeding bytes. */
744 __u8 pad[512-68-32];
745 } __attribute__((aligned(512))) bsb, bsb2;
746
747 static __u32 bsb_csum(char *buf, int len)
748 {
749 int i;
750 int csum = 0;
751 for (i = 0; i < len; i++)
752 csum = (csum<<3) + buf[0];
753 return __cpu_to_le32(csum);
754 }
755
756 static int check_idle(struct supertype *st)
757 {
758 /* Check that all member arrays for this container, or the
759 * container of this array, are idle
760 */
761 char *container = (st->container_devnm[0]
762 ? st->container_devnm : st->devnm);
763 struct mdstat_ent *ent, *e;
764 int is_idle = 1;
765
766 ent = mdstat_read(0, 0);
767 for (e = ent ; e; e = e->next) {
768 if (!is_container_member(e, container))
769 continue;
770 /* frozen array is not idle*/
771 if (e->percent >= 0 || e->metadata_version[9] == '-') {
772 is_idle = 0;
773 break;
774 }
775 }
776 free_mdstat(ent);
777 return is_idle;
778 }
779
780 static int freeze_container(struct supertype *st)
781 {
782 char *container = (st->container_devnm[0]
783 ? st->container_devnm : st->devnm);
784
785 if (!check_idle(st))
786 return -1;
787
788 if (block_monitor(container, 1)) {
789 pr_err("failed to freeze container\n");
790 return -2;
791 }
792
793 return 1;
794 }
795
796 static void unfreeze_container(struct supertype *st)
797 {
798 char *container = (st->container_devnm[0]
799 ? st->container_devnm : st->devnm);
800
801 unblock_monitor(container, 1);
802 }
803
804 static int freeze(struct supertype *st)
805 {
806 /* Try to freeze resync/rebuild on this array/container.
807 * Return -1 if the array is busy,
808 * return -2 container cannot be frozen,
809 * return 0 if this kernel doesn't support 'frozen'
810 * return 1 if it worked.
811 */
812 if (st->ss->external)
813 return freeze_container(st);
814 else {
815 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
816 int err;
817 char buf[20];
818
819 if (!sra)
820 return -1;
821 /* Need to clear any 'read-auto' status */
822 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
823 strncmp(buf, "read-auto", 9) == 0)
824 sysfs_set_str(sra, NULL, "array_state", "clean");
825
826 err = sysfs_freeze_array(sra);
827 sysfs_free(sra);
828 return err;
829 }
830 }
831
832 static void unfreeze(struct supertype *st)
833 {
834 if (st->ss->external)
835 return unfreeze_container(st);
836 else {
837 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
838 char buf[20];
839
840 if (sra &&
841 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
842 strcmp(buf, "frozen\n") == 0)
843 sysfs_set_str(sra, NULL, "sync_action", "idle");
844 sysfs_free(sra);
845 }
846 }
847
848 static void wait_reshape(struct mdinfo *sra)
849 {
850 int fd = sysfs_get_fd(sra, NULL, "sync_action");
851 char action[20];
852
853 if (fd < 0)
854 return;
855
856 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
857 strncmp(action, "reshape", 7) == 0)
858 sysfs_wait(fd, NULL);
859 close(fd);
860 }
861
862 static int reshape_super(struct supertype *st, unsigned long long size,
863 int level, int layout, int chunksize, int raid_disks,
864 int delta_disks, char *backup_file, char *dev,
865 int direction, int verbose)
866 {
867 /* nothing extra to check in the native case */
868 if (!st->ss->external)
869 return 0;
870 if (!st->ss->reshape_super || !st->ss->manage_reshape) {
871 pr_err("%s metadata does not support reshape\n",
872 st->ss->name);
873 return 1;
874 }
875
876 return st->ss->reshape_super(st, size, level, layout, chunksize,
877 raid_disks, delta_disks, backup_file, dev,
878 direction, verbose);
879 }
880
881 static void sync_metadata(struct supertype *st)
882 {
883 if (st->ss->external) {
884 if (st->update_tail) {
885 flush_metadata_updates(st);
886 st->update_tail = &st->updates;
887 } else
888 st->ss->sync_metadata(st);
889 }
890 }
891
892 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
893 {
894 /* when dealing with external metadata subarrays we need to be
895 * prepared to handle EAGAIN. The kernel may need to wait for
896 * mdmon to mark the array active so the kernel can handle
897 * allocations/writeback when preparing the reshape action
898 * (md_allow_write()). We temporarily disable safe_mode_delay
899 * to close a race with the array_state going clean before the
900 * next write to raid_disks / stripe_cache_size
901 */
902 char safe[50];
903 int rc;
904
905 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
906 if (!container ||
907 (strcmp(name, "raid_disks") != 0 &&
908 strcmp(name, "stripe_cache_size") != 0))
909 return sysfs_set_num(sra, NULL, name, n);
910
911 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
912 if (rc <= 0)
913 return -1;
914 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
915 rc = sysfs_set_num(sra, NULL, name, n);
916 if (rc < 0 && errno == EAGAIN) {
917 ping_monitor(container);
918 /* if we get EAGAIN here then the monitor is not active
919 * so stop trying
920 */
921 rc = sysfs_set_num(sra, NULL, name, n);
922 }
923 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
924 return rc;
925 }
926
927 int start_reshape(struct mdinfo *sra, int already_running,
928 int before_data_disks, int data_disks, struct supertype *st)
929 {
930 int err;
931 unsigned long long sync_max_to_set;
932
933 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
934 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
935 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
936 sra->reshape_progress);
937 if (before_data_disks <= data_disks)
938 sync_max_to_set = sra->reshape_progress / data_disks;
939 else
940 sync_max_to_set = (sra->component_size * data_disks
941 - sra->reshape_progress) / data_disks;
942
943 if (!already_running)
944 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
945
946 if (st->ss->external)
947 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
948 else
949 err = err ?: sysfs_set_str(sra, NULL, "sync_max", "max");
950
951 if (!already_running && err == 0) {
952 int cnt = 5;
953 do {
954 err = sysfs_set_str(sra, NULL, "sync_action",
955 "reshape");
956 if (err)
957 sleep(1);
958 } while (err && errno == EBUSY && cnt-- > 0);
959 }
960 return err;
961 }
962
963 void abort_reshape(struct mdinfo *sra)
964 {
965 sysfs_set_str(sra, NULL, "sync_action", "idle");
966 /*
967 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
968 * suspend_hi to decrease as well as increase.")
969 * you could only increase suspend_{lo,hi} unless the region they
970 * covered was empty. So to reset to 0, you need to push suspend_lo
971 * up past suspend_hi first. So to maximize the chance of mdadm
972 * working on all kernels, we want to keep doing that.
973 */
974 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
975 sysfs_set_num(sra, NULL, "suspend_hi", 0);
976 sysfs_set_num(sra, NULL, "suspend_lo", 0);
977 sysfs_set_num(sra, NULL, "sync_min", 0);
978 // It isn't safe to reset sync_max as we aren't monitoring.
979 // Array really should be stopped at this point.
980 }
981
982 int remove_disks_for_takeover(struct supertype *st,
983 struct mdinfo *sra,
984 int layout)
985 {
986 int nr_of_copies;
987 struct mdinfo *remaining;
988 int slot;
989
990 if (st->ss->external) {
991 int rv = 0;
992 struct mdinfo *arrays = st->ss->container_content(st, NULL);
993 /*
994 * containter_content returns list of arrays in container
995 * If arrays->next is not NULL it means that there are
996 * 2 arrays in container and operation should be blocked
997 */
998 if (arrays) {
999 if (arrays->next)
1000 rv = 1;
1001 sysfs_free(arrays);
1002 if (rv) {
1003 pr_err("Error. Cannot perform operation on %s- for this operation "
1004 "it MUST be single array in container\n", st->devnm);
1005 return rv;
1006 }
1007 }
1008 }
1009
1010 if (sra->array.level == 10)
1011 nr_of_copies = layout & 0xff;
1012 else if (sra->array.level == 1)
1013 nr_of_copies = sra->array.raid_disks;
1014 else
1015 return 1;
1016
1017 remaining = sra->devs;
1018 sra->devs = NULL;
1019 /* for each 'copy', select one device and remove from the list. */
1020 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
1021 struct mdinfo **diskp;
1022 int found = 0;
1023
1024 /* Find a working device to keep */
1025 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1026 struct mdinfo *disk = *diskp;
1027
1028 if (disk->disk.raid_disk < slot)
1029 continue;
1030 if (disk->disk.raid_disk >= slot + nr_of_copies)
1031 continue;
1032 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1033 continue;
1034 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1035 continue;
1036 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1037 continue;
1038
1039 /* We have found a good disk to use! */
1040 *diskp = disk->next;
1041 disk->next = sra->devs;
1042 sra->devs = disk;
1043 found = 1;
1044 break;
1045 }
1046 if (!found)
1047 break;
1048 }
1049
1050 if (slot < sra->array.raid_disks) {
1051 /* didn't find all slots */
1052 struct mdinfo **e;
1053 e = &remaining;
1054 while (*e)
1055 e = &(*e)->next;
1056 *e = sra->devs;
1057 sra->devs = remaining;
1058 return 1;
1059 }
1060
1061 /* Remove all 'remaining' devices from the array */
1062 while (remaining) {
1063 struct mdinfo *sd = remaining;
1064 remaining = sd->next;
1065
1066 sysfs_set_str(sra, sd, "state", "faulty");
1067 sysfs_set_str(sra, sd, "slot", "none");
1068 /* for external metadata disks should be removed in mdmon */
1069 if (!st->ss->external)
1070 sysfs_set_str(sra, sd, "state", "remove");
1071 sd->disk.state |= (1<<MD_DISK_REMOVED);
1072 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1073 sd->next = sra->devs;
1074 sra->devs = sd;
1075 }
1076 return 0;
1077 }
1078
1079 void reshape_free_fdlist(int *fdlist,
1080 unsigned long long *offsets,
1081 int size)
1082 {
1083 int i;
1084
1085 for (i = 0; i < size; i++)
1086 if (fdlist[i] >= 0)
1087 close(fdlist[i]);
1088
1089 free(fdlist);
1090 free(offsets);
1091 }
1092
1093 int reshape_prepare_fdlist(char *devname,
1094 struct mdinfo *sra,
1095 int raid_disks,
1096 int nrdisks,
1097 unsigned long blocks,
1098 char *backup_file,
1099 int *fdlist,
1100 unsigned long long *offsets)
1101 {
1102 int d = 0;
1103 struct mdinfo *sd;
1104
1105 enable_fds(nrdisks);
1106 for (d = 0; d <= nrdisks; d++)
1107 fdlist[d] = -1;
1108 d = raid_disks;
1109 for (sd = sra->devs; sd; sd = sd->next) {
1110 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1111 continue;
1112 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1113 sd->disk.raid_disk < raid_disks) {
1114 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1115 fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY);
1116 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1117 if (fdlist[sd->disk.raid_disk] < 0) {
1118 pr_err("%s: cannot open component %s\n",
1119 devname, dn ? dn : "-unknown-");
1120 d = -1;
1121 goto release;
1122 }
1123 } else if (backup_file == NULL) {
1124 /* spare */
1125 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1126 fdlist[d] = dev_open(dn, O_RDWR);
1127 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1128 if (fdlist[d] < 0) {
1129 pr_err("%s: cannot open component %s\n",
1130 devname, dn ? dn : "-unknown-");
1131 d = -1;
1132 goto release;
1133 }
1134 d++;
1135 }
1136 }
1137 release:
1138 return d;
1139 }
1140
1141 int reshape_open_backup_file(char *backup_file,
1142 int fd,
1143 char *devname,
1144 long blocks,
1145 int *fdlist,
1146 unsigned long long *offsets,
1147 char *sys_name,
1148 int restart)
1149 {
1150 /* Return 1 on success, 0 on any form of failure */
1151 /* need to check backup file is large enough */
1152 char buf[512];
1153 struct stat stb;
1154 unsigned int dev;
1155 int i;
1156
1157 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1158 S_IRUSR | S_IWUSR);
1159 *offsets = 8 * 512;
1160 if (*fdlist < 0) {
1161 pr_err("%s: cannot create backup file %s: %s\n",
1162 devname, backup_file, strerror(errno));
1163 return 0;
1164 }
1165 /* Guard against backup file being on array device.
1166 * If array is partitioned or if LVM etc is in the
1167 * way this will not notice, but it is better than
1168 * nothing.
1169 */
1170 fstat(*fdlist, &stb);
1171 dev = stb.st_dev;
1172 fstat(fd, &stb);
1173 if (stb.st_rdev == dev) {
1174 pr_err("backup file must NOT be on the array being reshaped.\n");
1175 close(*fdlist);
1176 return 0;
1177 }
1178
1179 memset(buf, 0, 512);
1180 for (i=0; i < blocks + 8 ; i++) {
1181 if (write(*fdlist, buf, 512) != 512) {
1182 pr_err("%s: cannot create backup file %s: %s\n",
1183 devname, backup_file, strerror(errno));
1184 return 0;
1185 }
1186 }
1187 if (fsync(*fdlist) != 0) {
1188 pr_err("%s: cannot create backup file %s: %s\n",
1189 devname, backup_file, strerror(errno));
1190 return 0;
1191 }
1192
1193 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1194 char *bu = make_backup(sys_name);
1195 if (symlink(backup_file, bu))
1196 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1197 strerror(errno));
1198 free(bu);
1199 }
1200
1201 return 1;
1202 }
1203
1204 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1205 unsigned int ndata, unsigned int odata)
1206 {
1207 unsigned long a, b, blocks;
1208 /* So how much do we need to backup.
1209 * We need an amount of data which is both a whole number of
1210 * old stripes and a whole number of new stripes.
1211 * So LCM for (chunksize*datadisks).
1212 */
1213 a = (ochunk/512) * odata;
1214 b = (nchunk/512) * ndata;
1215 /* Find GCD */
1216 a = GCD(a, b);
1217 /* LCM == product / GCD */
1218 blocks = (unsigned long)(ochunk/512) * (unsigned long)(nchunk/512) *
1219 odata * ndata / a;
1220
1221 return blocks;
1222 }
1223
1224 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1225 {
1226 /* Based on the current array state in info->array and
1227 * the changes in info->new_* etc, determine:
1228 * - whether the change is possible
1229 * - Intermediate level/raid_disks/layout
1230 * - whether a restriping reshape is needed
1231 * - number of sectors in minimum change unit. This
1232 * will cover a whole number of stripes in 'before' and
1233 * 'after'.
1234 *
1235 * Return message if the change should be rejected
1236 * NULL if the change can be achieved
1237 *
1238 * This can be called as part of starting a reshape, or
1239 * when assembling an array that is undergoing reshape.
1240 */
1241 int near, far, offset, copies;
1242 int new_disks;
1243 int old_chunk, new_chunk;
1244 /* delta_parity records change in number of devices
1245 * caused by level change
1246 */
1247 int delta_parity = 0;
1248
1249 memset(re, 0, sizeof(*re));
1250
1251 /* If a new level not explicitly given, we assume no-change */
1252 if (info->new_level == UnSet)
1253 info->new_level = info->array.level;
1254
1255 if (info->new_chunk)
1256 switch (info->new_level) {
1257 case 0:
1258 case 4:
1259 case 5:
1260 case 6:
1261 case 10:
1262 /* chunk size is meaningful, must divide component_size
1263 * evenly
1264 */
1265 if (info->component_size % (info->new_chunk/512)) {
1266 unsigned long long shrink = info->component_size;
1267 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1268 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1269 info->new_chunk/1024, info->component_size/2);
1270 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1271 devname, shrink/2);
1272 pr_err("will shrink the array so the given chunk size would work.\n");
1273 return "";
1274 }
1275 break;
1276 default:
1277 return "chunk size not meaningful for this level";
1278 }
1279 else
1280 info->new_chunk = info->array.chunk_size;
1281
1282 switch (info->array.level) {
1283 default:
1284 return "No reshape is possibly for this RAID level";
1285 case LEVEL_LINEAR:
1286 if (info->delta_disks != UnSet)
1287 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1288 else
1289 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1290 case 1:
1291 /* RAID1 can convert to RAID1 with different disks, or
1292 * raid5 with 2 disks, or
1293 * raid0 with 1 disk
1294 */
1295 if (info->new_level > 1 && (info->component_size & 7))
1296 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1297 if (info->new_level == 0) {
1298 if (info->delta_disks != UnSet &&
1299 info->delta_disks != 0)
1300 return "Cannot change number of disks with RAID1->RAID0 conversion";
1301 re->level = 0;
1302 re->before.data_disks = 1;
1303 re->after.data_disks = 1;
1304 return NULL;
1305 }
1306 if (info->new_level == 1) {
1307 if (info->delta_disks == UnSet)
1308 /* Don't know what to do */
1309 return "no change requested for Growing RAID1";
1310 re->level = 1;
1311 return NULL;
1312 }
1313 if (info->array.raid_disks != 2 && info->new_level == 5)
1314 return "Can only convert a 2-device array to RAID5";
1315 if (info->array.raid_disks == 2 && info->new_level == 5) {
1316 re->level = 5;
1317 re->before.data_disks = 1;
1318 if (info->delta_disks != UnSet &&
1319 info->delta_disks != 0)
1320 re->after.data_disks = 1 + info->delta_disks;
1321 else
1322 re->after.data_disks = 1;
1323 if (re->after.data_disks < 1)
1324 return "Number of disks too small for RAID5";
1325
1326 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1327 info->array.chunk_size = 65536;
1328 break;
1329 }
1330 /* Could do some multi-stage conversions, but leave that to
1331 * later.
1332 */
1333 return "Impossibly level change request for RAID1";
1334
1335 case 10:
1336 /* RAID10 can be converted from near mode to
1337 * RAID0 by removing some devices.
1338 * It can also be reshaped if the kernel supports
1339 * new_data_offset.
1340 */
1341 switch (info->new_level) {
1342 case 0:
1343 if ((info->array.layout & ~0xff) != 0x100)
1344 return "Cannot Grow RAID10 with far/offset layout";
1345 /*
1346 * number of devices must be multiple of
1347 * number of copies
1348 */
1349 if (info->array.raid_disks %
1350 (info->array.layout & 0xff))
1351 return "RAID10 layout too complex for Grow operation";
1352
1353 new_disks = (info->array.raid_disks /
1354 (info->array.layout & 0xff));
1355 if (info->delta_disks == UnSet)
1356 info->delta_disks = (new_disks
1357 - info->array.raid_disks);
1358
1359 if (info->delta_disks !=
1360 new_disks - info->array.raid_disks)
1361 return "New number of raid-devices impossible for RAID10";
1362 if (info->new_chunk &&
1363 info->new_chunk != info->array.chunk_size)
1364 return "Cannot change chunk-size with RAID10 Grow";
1365
1366 /* looks good */
1367 re->level = 0;
1368 re->before.data_disks = new_disks;
1369 re->after.data_disks = re->before.data_disks;
1370 return NULL;
1371
1372 case 10:
1373 near = info->array.layout & 0xff;
1374 far = (info->array.layout >> 8) & 0xff;
1375 offset = info->array.layout & 0x10000;
1376 if (far > 1 && !offset)
1377 return "Cannot reshape RAID10 in far-mode";
1378 copies = near * far;
1379
1380 old_chunk = info->array.chunk_size * far;
1381
1382 if (info->new_layout == UnSet)
1383 info->new_layout = info->array.layout;
1384 else {
1385 near = info->new_layout & 0xff;
1386 far = (info->new_layout >> 8) & 0xff;
1387 offset = info->new_layout & 0x10000;
1388 if (far > 1 && !offset)
1389 return "Cannot reshape RAID10 to far-mode";
1390 if (near * far != copies)
1391 return "Cannot change number of copies when reshaping RAID10";
1392 }
1393 if (info->delta_disks == UnSet)
1394 info->delta_disks = 0;
1395 new_disks = (info->array.raid_disks +
1396 info->delta_disks);
1397
1398 new_chunk = info->new_chunk * far;
1399
1400 re->level = 10;
1401 re->before.layout = info->array.layout;
1402 re->before.data_disks = info->array.raid_disks;
1403 re->after.layout = info->new_layout;
1404 re->after.data_disks = new_disks;
1405 /* For RAID10 we don't do backup but do allow reshape,
1406 * so set backup_blocks to INVALID_SECTORS rather than
1407 * zero.
1408 * And there is no need to synchronise stripes on both
1409 * 'old' and 'new'. So the important
1410 * number is the minimum data_offset difference
1411 * which is the larger of (offset copies * chunk).
1412 */
1413 re->backup_blocks = INVALID_SECTORS;
1414 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1415 if (new_disks < re->before.data_disks &&
1416 info->space_after < re->min_offset_change)
1417 /* Reduce component size by one chunk */
1418 re->new_size = (info->component_size -
1419 re->min_offset_change);
1420 else
1421 re->new_size = info->component_size;
1422 re->new_size = re->new_size * new_disks / copies;
1423 return NULL;
1424
1425 default:
1426 return "RAID10 can only be changed to RAID0";
1427 }
1428 case 0:
1429 /* RAID0 can be converted to RAID10, or to RAID456 */
1430 if (info->new_level == 10) {
1431 if (info->new_layout == UnSet &&
1432 info->delta_disks == UnSet) {
1433 /* Assume near=2 layout */
1434 info->new_layout = 0x102;
1435 info->delta_disks = info->array.raid_disks;
1436 }
1437 if (info->new_layout == UnSet) {
1438 int copies = 1 + (info->delta_disks
1439 / info->array.raid_disks);
1440 if (info->array.raid_disks * (copies-1) !=
1441 info->delta_disks)
1442 return "Impossible number of devices for RAID0->RAID10";
1443 info->new_layout = 0x100 + copies;
1444 }
1445 if (info->delta_disks == UnSet) {
1446 int copies = info->new_layout & 0xff;
1447 if (info->new_layout != 0x100 + copies)
1448 return "New layout impossible for RAID0->RAID10";;
1449 info->delta_disks = (copies - 1) *
1450 info->array.raid_disks;
1451 }
1452 if (info->new_chunk &&
1453 info->new_chunk != info->array.chunk_size)
1454 return "Cannot change chunk-size with RAID0->RAID10";
1455 /* looks good */
1456 re->level = 10;
1457 re->before.data_disks = (info->array.raid_disks +
1458 info->delta_disks);
1459 re->after.data_disks = re->before.data_disks;
1460 re->before.layout = info->new_layout;
1461 return NULL;
1462 }
1463
1464 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1465 * a raid4 style layout of the final level.
1466 */
1467 switch (info->new_level) {
1468 case 4:
1469 delta_parity = 1;
1470 case 0:
1471 re->level = 4;
1472 re->before.layout = 0;
1473 break;
1474 case 5:
1475 delta_parity = 1;
1476 re->level = 5;
1477 re->before.layout = ALGORITHM_PARITY_N;
1478 if (info->new_layout == UnSet)
1479 info->new_layout = map_name(r5layout, "default");
1480 break;
1481 case 6:
1482 delta_parity = 2;
1483 re->level = 6;
1484 re->before.layout = ALGORITHM_PARITY_N;
1485 if (info->new_layout == UnSet)
1486 info->new_layout = map_name(r6layout, "default");
1487 break;
1488 default:
1489 return "Impossible level change requested";
1490 }
1491 re->before.data_disks = info->array.raid_disks;
1492 /* determining 'after' layout happens outside this 'switch' */
1493 break;
1494
1495 case 4:
1496 info->array.layout = ALGORITHM_PARITY_N;
1497 case 5:
1498 switch (info->new_level) {
1499 case 0:
1500 delta_parity = -1;
1501 case 4:
1502 re->level = info->array.level;
1503 re->before.data_disks = info->array.raid_disks - 1;
1504 re->before.layout = info->array.layout;
1505 break;
1506 case 5:
1507 re->level = 5;
1508 re->before.data_disks = info->array.raid_disks - 1;
1509 re->before.layout = info->array.layout;
1510 break;
1511 case 6:
1512 delta_parity = 1;
1513 re->level = 6;
1514 re->before.data_disks = info->array.raid_disks - 1;
1515 switch (info->array.layout) {
1516 case ALGORITHM_LEFT_ASYMMETRIC:
1517 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1518 break;
1519 case ALGORITHM_RIGHT_ASYMMETRIC:
1520 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1521 break;
1522 case ALGORITHM_LEFT_SYMMETRIC:
1523 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1524 break;
1525 case ALGORITHM_RIGHT_SYMMETRIC:
1526 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1527 break;
1528 case ALGORITHM_PARITY_0:
1529 re->before.layout = ALGORITHM_PARITY_0_6;
1530 break;
1531 case ALGORITHM_PARITY_N:
1532 re->before.layout = ALGORITHM_PARITY_N_6;
1533 break;
1534 default:
1535 return "Cannot convert an array with this layout";
1536 }
1537 break;
1538 case 1:
1539 if (info->array.raid_disks != 2)
1540 return "Can only convert a 2-device array to RAID1";
1541 if (info->delta_disks != UnSet &&
1542 info->delta_disks != 0)
1543 return "Cannot set raid_disk when converting RAID5->RAID1";
1544 re->level = 1;
1545 info->new_chunk = 0;
1546 return NULL;
1547 default:
1548 return "Impossible level change requested";
1549 }
1550 break;
1551 case 6:
1552 switch (info->new_level) {
1553 case 4:
1554 case 5:
1555 delta_parity = -1;
1556 case 6:
1557 re->level = 6;
1558 re->before.data_disks = info->array.raid_disks - 2;
1559 re->before.layout = info->array.layout;
1560 break;
1561 default:
1562 return "Impossible level change requested";
1563 }
1564 break;
1565 }
1566
1567 /* If we reached here then it looks like a re-stripe is
1568 * happening. We have determined the intermediate level
1569 * and initial raid_disks/layout and stored these in 're'.
1570 *
1571 * We need to deduce the final layout that can be atomically
1572 * converted to the end state.
1573 */
1574 switch (info->new_level) {
1575 case 0:
1576 /* We can only get to RAID0 from RAID4 or RAID5
1577 * with appropriate layout and one extra device
1578 */
1579 if (re->level != 4 && re->level != 5)
1580 return "Cannot covert to RAID0 from this level";
1581
1582 switch (re->level) {
1583 case 4:
1584 re->before.layout = 0;
1585 re->after.layout = 0;
1586 break;
1587 case 5:
1588 re->after.layout = ALGORITHM_PARITY_N;
1589 break;
1590 }
1591 break;
1592
1593 case 4:
1594 /* We can only get to RAID4 from RAID5 */
1595 if (re->level != 4 && re->level != 5)
1596 return "Cannot convert to RAID4 from this level";
1597
1598 switch (re->level) {
1599 case 4:
1600 re->after.layout = 0;
1601 break;
1602 case 5:
1603 re->after.layout = ALGORITHM_PARITY_N;
1604 break;
1605 }
1606 break;
1607
1608 case 5:
1609 /* We get to RAID5 from RAID5 or RAID6 */
1610 if (re->level != 5 && re->level != 6)
1611 return "Cannot convert to RAID5 from this level";
1612
1613 switch (re->level) {
1614 case 5:
1615 if (info->new_layout == UnSet)
1616 re->after.layout = re->before.layout;
1617 else
1618 re->after.layout = info->new_layout;
1619 break;
1620 case 6:
1621 if (info->new_layout == UnSet)
1622 info->new_layout = re->before.layout;
1623
1624 /* after.layout needs to be raid6 version of new_layout */
1625 if (info->new_layout == ALGORITHM_PARITY_N)
1626 re->after.layout = ALGORITHM_PARITY_N;
1627 else {
1628 char layout[40];
1629 char *ls = map_num(r5layout, info->new_layout);
1630 int l;
1631 if (ls) {
1632 /* Current RAID6 layout has a RAID5
1633 * equivalent - good
1634 */
1635 strcat(strcpy(layout, ls), "-6");
1636 l = map_name(r6layout, layout);
1637 if (l == UnSet)
1638 return "Cannot find RAID6 layout to convert to";
1639 } else {
1640 /* Current RAID6 has no equivalent.
1641 * If it is already a '-6' layout we
1642 * can leave it unchanged, else we must
1643 * fail
1644 */
1645 ls = map_num(r6layout,
1646 info->new_layout);
1647 if (!ls ||
1648 strcmp(ls+strlen(ls)-2, "-6") != 0)
1649 return "Please specify new layout";
1650 l = info->new_layout;
1651 }
1652 re->after.layout = l;
1653 }
1654 }
1655 break;
1656
1657 case 6:
1658 /* We must already be at level 6 */
1659 if (re->level != 6)
1660 return "Impossible level change";
1661 if (info->new_layout == UnSet)
1662 re->after.layout = info->array.layout;
1663 else
1664 re->after.layout = info->new_layout;
1665 break;
1666 default:
1667 return "Impossible level change requested";
1668 }
1669 if (info->delta_disks == UnSet)
1670 info->delta_disks = delta_parity;
1671
1672 re->after.data_disks =
1673 (re->before.data_disks + info->delta_disks - delta_parity);
1674
1675 switch (re->level) {
1676 case 6:
1677 re->parity = 2;
1678 break;
1679 case 4:
1680 case 5:
1681 re->parity = 1;
1682 break;
1683 default:
1684 re->parity = 0;
1685 break;
1686 }
1687 /* So we have a restripe operation, we need to calculate the number
1688 * of blocks per reshape operation.
1689 */
1690 re->new_size = info->component_size * re->before.data_disks;
1691 if (info->new_chunk == 0)
1692 info->new_chunk = info->array.chunk_size;
1693 if (re->after.data_disks == re->before.data_disks &&
1694 re->after.layout == re->before.layout &&
1695 info->new_chunk == info->array.chunk_size) {
1696 /* Nothing to change, can change level immediately. */
1697 re->level = info->new_level;
1698 re->backup_blocks = 0;
1699 return NULL;
1700 }
1701 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1702 /* chunk and layout changes make no difference */
1703 re->level = info->new_level;
1704 re->backup_blocks = 0;
1705 return NULL;
1706 }
1707
1708 if (re->after.data_disks == re->before.data_disks &&
1709 get_linux_version() < 2006032)
1710 return "in-place reshape is not safe before 2.6.32 - sorry.";
1711
1712 if (re->after.data_disks < re->before.data_disks &&
1713 get_linux_version() < 2006030)
1714 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1715
1716 re->backup_blocks = compute_backup_blocks(
1717 info->new_chunk, info->array.chunk_size,
1718 re->after.data_disks, re->before.data_disks);
1719 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1720
1721 re->new_size = info->component_size * re->after.data_disks;
1722 return NULL;
1723 }
1724
1725 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1726 char *text_version)
1727 {
1728 struct mdinfo *info;
1729 char *subarray;
1730 int ret_val = -1;
1731
1732 if ((st == NULL) || (sra == NULL))
1733 return ret_val;
1734
1735 if (text_version == NULL)
1736 text_version = sra->text_version;
1737 subarray = strchr(text_version + 1, '/')+1;
1738 info = st->ss->container_content(st, subarray);
1739 if (info) {
1740 unsigned long long current_size = 0;
1741 unsigned long long new_size = info->custom_array_size/2;
1742
1743 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1744 new_size > current_size) {
1745 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1746 < 0)
1747 dprintf("Error: Cannot set array size");
1748 else {
1749 ret_val = 0;
1750 dprintf("Array size changed");
1751 }
1752 dprintf_cont(" from %llu to %llu.\n",
1753 current_size, new_size);
1754 }
1755 sysfs_free(info);
1756 } else
1757 dprintf("Error: set_array_size(): info pointer in NULL\n");
1758
1759 return ret_val;
1760 }
1761
1762 static int reshape_array(char *container, int fd, char *devname,
1763 struct supertype *st, struct mdinfo *info,
1764 int force, struct mddev_dev *devlist,
1765 unsigned long long data_offset,
1766 char *backup_file, int verbose, int forked,
1767 int restart, int freeze_reshape);
1768 static int reshape_container(char *container, char *devname,
1769 int mdfd,
1770 struct supertype *st,
1771 struct mdinfo *info,
1772 int force,
1773 char *backup_file, int verbose,
1774 int forked, int restart, int freeze_reshape);
1775
1776 int Grow_reshape(char *devname, int fd,
1777 struct mddev_dev *devlist,
1778 unsigned long long data_offset,
1779 struct context *c, struct shape *s)
1780 {
1781 /* Make some changes in the shape of an array.
1782 * The kernel must support the change.
1783 *
1784 * There are three different changes. Each can trigger
1785 * a resync or recovery so we freeze that until we have
1786 * requested everything (if kernel supports freezing - 2.6.30).
1787 * The steps are:
1788 * - change size (i.e. component_size)
1789 * - change level
1790 * - change layout/chunksize/ndisks
1791 *
1792 * The last can require a reshape. It is different on different
1793 * levels so we need to check the level before actioning it.
1794 * Some times the level change needs to be requested after the
1795 * reshape (e.g. raid6->raid5, raid5->raid0)
1796 *
1797 */
1798 struct mdu_array_info_s array;
1799 int rv = 0;
1800 struct supertype *st;
1801 char *subarray = NULL;
1802
1803 int frozen;
1804 int changed = 0;
1805 char *container = NULL;
1806 int cfd = -1;
1807
1808 struct mddev_dev *dv;
1809 int added_disks;
1810
1811 struct mdinfo info;
1812 struct mdinfo *sra;
1813
1814 if (md_get_array_info(fd, &array) < 0) {
1815 pr_err("%s is not an active md array - aborting\n",
1816 devname);
1817 return 1;
1818 }
1819 if (s->level != UnSet && s->chunk) {
1820 pr_err("Cannot change array level in the same operation as changing chunk size.\n");
1821 return 1;
1822 }
1823
1824 if (data_offset != INVALID_SECTORS && array.level != 10 &&
1825 (array.level < 4 || array.level > 6)) {
1826 pr_err("--grow --data-offset not yet supported\n");
1827 return 1;
1828 }
1829
1830 if (s->size > 0 &&
1831 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1832 pr_err("cannot change component size at the same time as other changes.\n"
1833 " Change size first, then check data is intact before making other changes.\n");
1834 return 1;
1835 }
1836
1837 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1838 array.level > 1 && get_linux_version() < 2006032 &&
1839 !check_env("MDADM_FORCE_FEWER")) {
1840 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1841 " Please use a newer kernel\n");
1842 return 1;
1843 }
1844
1845 if (array.level > 1 && s->size > 1 &&
1846 (unsigned long long) (array.chunk_size / 1024) > s->size) {
1847 pr_err("component size must be larger than chunk size.\n");
1848 return 1;
1849 }
1850
1851 st = super_by_fd(fd, &subarray);
1852 if (!st) {
1853 pr_err("Unable to determine metadata format for %s\n", devname);
1854 return 1;
1855 }
1856 if (s->raiddisks > st->max_devs) {
1857 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1858 return 1;
1859 }
1860 if (s->level == 0 && (array.state & (1 << MD_SB_BITMAP_PRESENT)) &&
1861 !(array.state & (1 << MD_SB_CLUSTERED)) && !st->ss->external) {
1862 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
1863 if (md_set_array_info(fd, &array) != 0) {
1864 pr_err("failed to remove internal bitmap.\n");
1865 return 1;
1866 }
1867 }
1868
1869 /* in the external case we need to check that the requested reshape is
1870 * supported, and perform an initial check that the container holds the
1871 * pre-requisite spare devices (mdmon owns final validation)
1872 */
1873 if (st->ss->external) {
1874 int retval;
1875
1876 if (subarray) {
1877 container = st->container_devnm;
1878 cfd = open_dev_excl(st->container_devnm);
1879 } else {
1880 container = st->devnm;
1881 close(fd);
1882 cfd = open_dev_excl(st->devnm);
1883 fd = cfd;
1884 }
1885 if (cfd < 0) {
1886 pr_err("Unable to open container for %s\n", devname);
1887 free(subarray);
1888 return 1;
1889 }
1890
1891 retval = st->ss->load_container(st, cfd, NULL);
1892
1893 if (retval) {
1894 pr_err("Cannot read superblock for %s\n", devname);
1895 close(cfd);
1896 free(subarray);
1897 return 1;
1898 }
1899
1900 if (s->raiddisks && subarray) {
1901 pr_err("--raid-devices operation can be performed on a container only\n");
1902 close(cfd);
1903 free(subarray);
1904 return 1;
1905 }
1906
1907 /* check if operation is supported for metadata handler */
1908 if (st->ss->container_content) {
1909 struct mdinfo *cc = NULL;
1910 struct mdinfo *content = NULL;
1911
1912 cc = st->ss->container_content(st, subarray);
1913 for (content = cc; content ; content = content->next) {
1914 int allow_reshape = 1;
1915
1916 /* check if reshape is allowed based on metadata
1917 * indications stored in content.array.status
1918 */
1919 if (content->array.state &
1920 (1 << MD_SB_BLOCK_VOLUME))
1921 allow_reshape = 0;
1922 if (content->array.state &
1923 (1 << MD_SB_BLOCK_CONTAINER_RESHAPE))
1924 allow_reshape = 0;
1925 if (!allow_reshape) {
1926 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1927 devname, container);
1928 sysfs_free(cc);
1929 free(subarray);
1930 return 1;
1931 }
1932 if (content->consistency_policy ==
1933 CONSISTENCY_POLICY_PPL) {
1934 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1935 sysfs_free(cc);
1936 free(subarray);
1937 return 1;
1938 }
1939 if (content->consistency_policy ==
1940 CONSISTENCY_POLICY_BITMAP) {
1941 pr_err("Operation not supported when write-intent bitmap is enabled\n");
1942 sysfs_free(cc);
1943 free(subarray);
1944 return 1;
1945 }
1946 }
1947 sysfs_free(cc);
1948 }
1949 if (mdmon_running(container))
1950 st->update_tail = &st->updates;
1951 }
1952
1953 added_disks = 0;
1954 for (dv = devlist; dv; dv = dv->next)
1955 added_disks++;
1956 if (s->raiddisks > array.raid_disks &&
1957 array.spare_disks + added_disks <
1958 (s->raiddisks - array.raid_disks) &&
1959 !c->force) {
1960 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1961 " Use --force to over-ride this check.\n",
1962 s->raiddisks - array.raid_disks,
1963 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1964 array.spare_disks + added_disks);
1965 return 1;
1966 }
1967
1968 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1969 GET_STATE | GET_VERSION);
1970 if (sra) {
1971 if (st->ss->external && subarray == NULL) {
1972 array.level = LEVEL_CONTAINER;
1973 sra->array.level = LEVEL_CONTAINER;
1974 }
1975 } else {
1976 pr_err("failed to read sysfs parameters for %s\n",
1977 devname);
1978 return 1;
1979 }
1980 frozen = freeze(st);
1981 if (frozen < -1) {
1982 /* freeze() already spewed the reason */
1983 sysfs_free(sra);
1984 return 1;
1985 } else if (frozen < 0) {
1986 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1987 sysfs_free(sra);
1988 return 1;
1989 }
1990
1991 /* ========= set size =============== */
1992 if (s->size > 0 &&
1993 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1994 unsigned long long orig_size = get_component_size(fd)/2;
1995 unsigned long long min_csize;
1996 struct mdinfo *mdi;
1997 int raid0_takeover = 0;
1998
1999 if (orig_size == 0)
2000 orig_size = (unsigned) array.size;
2001
2002 if (orig_size == 0) {
2003 pr_err("Cannot set device size in this type of array.\n");
2004 rv = 1;
2005 goto release;
2006 }
2007
2008 if (array.level == 0) {
2009 pr_err("Component size change is not supported for RAID0\n");
2010 rv = 1;
2011 goto release;
2012 }
2013
2014 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
2015 devname, APPLY_METADATA_CHANGES,
2016 c->verbose > 0)) {
2017 rv = 1;
2018 goto release;
2019 }
2020 sync_metadata(st);
2021 if (st->ss->external) {
2022 /* metadata can have size limitation
2023 * update size value according to metadata information
2024 */
2025 struct mdinfo *sizeinfo =
2026 st->ss->container_content(st, subarray);
2027 if (sizeinfo) {
2028 unsigned long long new_size =
2029 sizeinfo->custom_array_size/2;
2030 int data_disks = get_data_disks(
2031 sizeinfo->array.level,
2032 sizeinfo->array.layout,
2033 sizeinfo->array.raid_disks);
2034 new_size /= data_disks;
2035 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
2036 orig_size, new_size,
2037 new_size * data_disks);
2038 s->size = new_size;
2039 sysfs_free(sizeinfo);
2040 }
2041 }
2042
2043 /* Update the size of each member device in case
2044 * they have been resized. This will never reduce
2045 * below the current used-size. The "size" attribute
2046 * understands '0' to mean 'max'.
2047 */
2048 min_csize = 0;
2049 for (mdi = sra->devs; mdi; mdi = mdi->next) {
2050 sysfs_set_num(sra, mdi, "size",
2051 s->size == MAX_SIZE ? 0 : s->size);
2052 if (array.not_persistent == 0 &&
2053 array.major_version == 0 &&
2054 get_linux_version() < 3001000) {
2055 /* Dangerous to allow size to exceed 2TB */
2056 unsigned long long csize;
2057 if (sysfs_get_ll(sra, mdi, "size",
2058 &csize) == 0) {
2059 if (csize >= 2ULL*1024*1024*1024)
2060 csize = 2ULL*1024*1024*1024;
2061 if ((min_csize == 0 ||
2062 (min_csize > csize)))
2063 min_csize = csize;
2064 }
2065 }
2066 }
2067 if (min_csize && s->size > min_csize) {
2068 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2069 rv = 1;
2070 goto size_change_error;
2071 }
2072 if (min_csize && s->size == MAX_SIZE) {
2073 /* Don't let the kernel choose a size - it will get
2074 * it wrong
2075 */
2076 pr_err("Limited v0.90 array to 2TB per device\n");
2077 s->size = min_csize;
2078 }
2079 if (st->ss->external) {
2080 if (sra->array.level == 0) {
2081 rv = sysfs_set_str(sra, NULL, "level", "raid5");
2082 if (!rv) {
2083 raid0_takeover = 1;
2084 /* get array parameters after takeover
2085 * to change one parameter at time only
2086 */
2087 rv = md_get_array_info(fd, &array);
2088 }
2089 }
2090 /* make sure mdmon is
2091 * aware of the new level */
2092 if (!mdmon_running(st->container_devnm))
2093 start_mdmon(st->container_devnm);
2094 ping_monitor(container);
2095 if (mdmon_running(st->container_devnm) &&
2096 st->update_tail == NULL)
2097 st->update_tail = &st->updates;
2098 }
2099
2100 if (s->size == MAX_SIZE)
2101 s->size = 0;
2102 array.size = s->size;
2103 if (s->size & ~INT32_MAX) {
2104 /* got truncated to 32bit, write to
2105 * component_size instead
2106 */
2107 if (sra)
2108 rv = sysfs_set_num(sra, NULL,
2109 "component_size", s->size);
2110 else
2111 rv = -1;
2112 } else {
2113 rv = md_set_array_info(fd, &array);
2114
2115 /* manage array size when it is managed externally
2116 */
2117 if ((rv == 0) && st->ss->external)
2118 rv = set_array_size(st, sra, sra->text_version);
2119 }
2120
2121 if (raid0_takeover) {
2122 /* do not recync non-existing parity,
2123 * we will drop it anyway
2124 */
2125 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2126 /* go back to raid0, drop parity disk
2127 */
2128 sysfs_set_str(sra, NULL, "level", "raid0");
2129 md_get_array_info(fd, &array);
2130 }
2131
2132 size_change_error:
2133 if (rv != 0) {
2134 int err = errno;
2135
2136 /* restore metadata */
2137 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2138 UnSet, NULL, devname,
2139 ROLLBACK_METADATA_CHANGES,
2140 c->verbose) == 0)
2141 sync_metadata(st);
2142 pr_err("Cannot set device size for %s: %s\n",
2143 devname, strerror(err));
2144 if (err == EBUSY &&
2145 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2146 cont_err("Bitmap must be removed before size can be changed\n");
2147 rv = 1;
2148 goto release;
2149 }
2150 if (s->assume_clean) {
2151 /* This will fail on kernels older than 3.0 unless
2152 * a backport has been arranged.
2153 */
2154 if (sra == NULL ||
2155 sysfs_set_str(sra, NULL, "resync_start",
2156 "none") < 0)
2157 pr_err("--assume-clean not supported with --grow on this kernel\n");
2158 }
2159 md_get_array_info(fd, &array);
2160 s->size = get_component_size(fd)/2;
2161 if (s->size == 0)
2162 s->size = array.size;
2163 if (c->verbose >= 0) {
2164 if (s->size == orig_size)
2165 pr_err("component size of %s unchanged at %lluK\n",
2166 devname, s->size);
2167 else
2168 pr_err("component size of %s has been set to %lluK\n",
2169 devname, s->size);
2170 }
2171 changed = 1;
2172 } else if (array.level != LEVEL_CONTAINER) {
2173 s->size = get_component_size(fd)/2;
2174 if (s->size == 0)
2175 s->size = array.size;
2176 }
2177
2178 /* See if there is anything else to do */
2179 if ((s->level == UnSet || s->level == array.level) &&
2180 (s->layout_str == NULL) &&
2181 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2182 data_offset == INVALID_SECTORS &&
2183 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2184 /* Nothing more to do */
2185 if (!changed && c->verbose >= 0)
2186 pr_err("%s: no change requested\n", devname);
2187 goto release;
2188 }
2189
2190 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2191 * current implementation assumes that following conditions must be met:
2192 * - RAID10:
2193 * - far_copies == 1
2194 * - near_copies == 2
2195 */
2196 if ((s->level == 0 && array.level == 10 && sra &&
2197 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2198 (s->level == 0 && array.level == 1 && sra)) {
2199 int err;
2200
2201 err = remove_disks_for_takeover(st, sra, array.layout);
2202 if (err) {
2203 dprintf("Array cannot be reshaped\n");
2204 if (cfd > -1)
2205 close(cfd);
2206 rv = 1;
2207 goto release;
2208 }
2209 /* Make sure mdmon has seen the device removal
2210 * and updated metadata before we continue with
2211 * level change
2212 */
2213 if (container)
2214 ping_monitor(container);
2215 }
2216
2217 memset(&info, 0, sizeof(info));
2218 info.array = array;
2219 if (sysfs_init(&info, fd, NULL)) {
2220 pr_err("failed to initialize sysfs.\n");
2221 rv = 1;
2222 goto release;
2223 }
2224 strcpy(info.text_version, sra->text_version);
2225 info.component_size = s->size*2;
2226 info.new_level = s->level;
2227 info.new_chunk = s->chunk * 1024;
2228 if (info.array.level == LEVEL_CONTAINER) {
2229 info.delta_disks = UnSet;
2230 info.array.raid_disks = s->raiddisks;
2231 } else if (s->raiddisks)
2232 info.delta_disks = s->raiddisks - info.array.raid_disks;
2233 else
2234 info.delta_disks = UnSet;
2235 if (s->layout_str == NULL) {
2236 info.new_layout = UnSet;
2237 if (info.array.level == 6 &&
2238 (info.new_level == 6 || info.new_level == UnSet) &&
2239 info.array.layout >= 16) {
2240 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2241 cont_err("during the reshape, please specify --layout=preserve\n");
2242 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2243 rv = 1;
2244 goto release;
2245 }
2246 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2247 strcmp(s->layout_str, "normalize") == 0) {
2248 /* If we have a -6 RAID6 layout, remove the '-6'. */
2249 info.new_layout = UnSet;
2250 if (info.array.level == 6 && info.new_level == UnSet) {
2251 char l[40], *h;
2252 strcpy(l, map_num_s(r6layout, info.array.layout));
2253 h = strrchr(l, '-');
2254 if (h && strcmp(h, "-6") == 0) {
2255 *h = 0;
2256 info.new_layout = map_name(r6layout, l);
2257 }
2258 } else {
2259 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2260 rv = 1;
2261 goto release;
2262 }
2263 } else if (strcmp(s->layout_str, "preserve") == 0) {
2264 /* This means that a non-standard RAID6 layout
2265 * is OK.
2266 * In particular:
2267 * - When reshape a RAID6 (e.g. adding a device)
2268 * which is in a non-standard layout, it is OK
2269 * to preserve that layout.
2270 * - When converting a RAID5 to RAID6, leave it in
2271 * the XXX-6 layout, don't re-layout.
2272 */
2273 if (info.array.level == 6 && info.new_level == UnSet)
2274 info.new_layout = info.array.layout;
2275 else if (info.array.level == 5 && info.new_level == 6) {
2276 char l[40];
2277 strcpy(l, map_num_s(r5layout, info.array.layout));
2278 strcat(l, "-6");
2279 info.new_layout = map_name(r6layout, l);
2280 } else {
2281 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2282 rv = 1;
2283 goto release;
2284 }
2285 } else {
2286 int l = info.new_level;
2287 if (l == UnSet)
2288 l = info.array.level;
2289 switch (l) {
2290 case 5:
2291 info.new_layout = map_name(r5layout, s->layout_str);
2292 break;
2293 case 6:
2294 info.new_layout = map_name(r6layout, s->layout_str);
2295 break;
2296 case 10:
2297 info.new_layout = parse_layout_10(s->layout_str);
2298 break;
2299 case LEVEL_FAULTY:
2300 info.new_layout = parse_layout_faulty(s->layout_str);
2301 break;
2302 default:
2303 pr_err("layout not meaningful with this level\n");
2304 rv = 1;
2305 goto release;
2306 }
2307 if (info.new_layout == UnSet) {
2308 pr_err("layout %s not understood for this level\n",
2309 s->layout_str);
2310 rv = 1;
2311 goto release;
2312 }
2313 }
2314
2315 if (array.level == LEVEL_FAULTY) {
2316 if (s->level != UnSet && s->level != array.level) {
2317 pr_err("cannot change level of Faulty device\n");
2318 rv =1 ;
2319 }
2320 if (s->chunk) {
2321 pr_err("cannot set chunksize of Faulty device\n");
2322 rv =1 ;
2323 }
2324 if (s->raiddisks && s->raiddisks != 1) {
2325 pr_err("cannot set raid_disks of Faulty device\n");
2326 rv =1 ;
2327 }
2328 if (s->layout_str) {
2329 if (md_get_array_info(fd, &array) != 0) {
2330 dprintf("Cannot get array information.\n");
2331 goto release;
2332 }
2333 array.layout = info.new_layout;
2334 if (md_set_array_info(fd, &array) != 0) {
2335 pr_err("failed to set new layout\n");
2336 rv = 1;
2337 } else if (c->verbose >= 0)
2338 printf("layout for %s set to %d\n",
2339 devname, array.layout);
2340 }
2341 } else if (array.level == LEVEL_CONTAINER) {
2342 /* This change is to be applied to every array in the
2343 * container. This is only needed when the metadata imposes
2344 * restraints of the various arrays in the container.
2345 * Currently we only know that IMSM requires all arrays
2346 * to have the same number of devices so changing the
2347 * number of devices (On-Line Capacity Expansion) must be
2348 * performed at the level of the container
2349 */
2350 close_fd(&fd);
2351 rv = reshape_container(container, devname, -1, st, &info,
2352 c->force, c->backup_file, c->verbose,
2353 0, 0, 0);
2354 frozen = 0;
2355 } else {
2356 /* get spare devices from external metadata
2357 */
2358 if (st->ss->external) {
2359 struct mdinfo *info2;
2360
2361 info2 = st->ss->container_content(st, subarray);
2362 if (info2) {
2363 info.array.spare_disks =
2364 info2->array.spare_disks;
2365 sysfs_free(info2);
2366 }
2367 }
2368
2369 /* Impose these changes on a single array. First
2370 * check that the metadata is OK with the change. */
2371
2372 if (reshape_super(st, 0, info.new_level,
2373 info.new_layout, info.new_chunk,
2374 info.array.raid_disks, info.delta_disks,
2375 c->backup_file, devname,
2376 APPLY_METADATA_CHANGES, c->verbose)) {
2377 rv = 1;
2378 goto release;
2379 }
2380 sync_metadata(st);
2381 rv = reshape_array(container, fd, devname, st, &info, c->force,
2382 devlist, data_offset, c->backup_file,
2383 c->verbose, 0, 0, 0);
2384 frozen = 0;
2385 }
2386 release:
2387 sysfs_free(sra);
2388 if (frozen > 0)
2389 unfreeze(st);
2390 return rv;
2391 }
2392
2393 /* verify_reshape_position()
2394 * Function checks if reshape position in metadata is not farther
2395 * than position in md.
2396 * Return value:
2397 * 0 : not valid sysfs entry
2398 * it can be caused by not started reshape, it should be started
2399 * by reshape array or raid0 array is before takeover
2400 * -1 : error, reshape position is obviously wrong
2401 * 1 : success, reshape progress correct or updated
2402 */
2403 static int verify_reshape_position(struct mdinfo *info, int level)
2404 {
2405 int ret_val = 0;
2406 char buf[40];
2407 int rv;
2408
2409 /* read sync_max, failure can mean raid0 array */
2410 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2411
2412 if (rv > 0) {
2413 char *ep;
2414 unsigned long long position = strtoull(buf, &ep, 0);
2415
2416 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2417 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2418 position *= get_data_disks(level,
2419 info->new_layout,
2420 info->array.raid_disks);
2421 if (info->reshape_progress < position) {
2422 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2423 info->reshape_progress, position);
2424 info->reshape_progress = position;
2425 ret_val = 1;
2426 } else if (info->reshape_progress > position) {
2427 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2428 position, info->reshape_progress);
2429 ret_val = -1;
2430 } else {
2431 dprintf("Reshape position in md and metadata are the same;");
2432 ret_val = 1;
2433 }
2434 }
2435 } else if (rv == 0) {
2436 /* for valid sysfs entry, 0-length content
2437 * should be indicated as error
2438 */
2439 ret_val = -1;
2440 }
2441
2442 return ret_val;
2443 }
2444
2445 static unsigned long long choose_offset(unsigned long long lo,
2446 unsigned long long hi,
2447 unsigned long long min,
2448 unsigned long long max)
2449 {
2450 /* Choose a new offset between hi and lo.
2451 * It must be between min and max, but
2452 * we would prefer something near the middle of hi/lo, and also
2453 * prefer to be aligned to a big power of 2.
2454 *
2455 * So we start with the middle, then for each bit,
2456 * starting at '1' and increasing, if it is set, we either
2457 * add it or subtract it if possible, preferring the option
2458 * which is furthest from the boundary.
2459 *
2460 * We stop once we get a 1MB alignment. As units are in sectors,
2461 * 1MB = 2*1024 sectors.
2462 */
2463 unsigned long long choice = (lo + hi) / 2;
2464 unsigned long long bit = 1;
2465
2466 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2467 unsigned long long bigger, smaller;
2468 if (! (bit & choice))
2469 continue;
2470 bigger = choice + bit;
2471 smaller = choice - bit;
2472 if (bigger > max && smaller < min)
2473 break;
2474 if (bigger > max)
2475 choice = smaller;
2476 else if (smaller < min)
2477 choice = bigger;
2478 else if (hi - bigger > smaller - lo)
2479 choice = bigger;
2480 else
2481 choice = smaller;
2482 }
2483 return choice;
2484 }
2485
2486 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2487 char *devname, int delta_disks,
2488 unsigned long long data_offset,
2489 unsigned long long min,
2490 int can_fallback)
2491 {
2492 struct mdinfo *sd;
2493 int dir = 0;
2494 int err = 0;
2495 unsigned long long before, after;
2496
2497 /* Need to find min space before and after so same is used
2498 * on all devices
2499 */
2500 before = UINT64_MAX;
2501 after = UINT64_MAX;
2502 for (sd = sra->devs; sd; sd = sd->next) {
2503 char *dn;
2504 int dfd;
2505 int rv;
2506 struct supertype *st2;
2507 struct mdinfo info2;
2508
2509 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2510 continue;
2511 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2512 dfd = dev_open(dn, O_RDONLY);
2513 if (dfd < 0) {
2514 pr_err("%s: cannot open component %s\n",
2515 devname, dn ? dn : "-unknown-");
2516 goto release;
2517 }
2518 st2 = dup_super(st);
2519 rv = st2->ss->load_super(st2,dfd, NULL);
2520 close(dfd);
2521 if (rv) {
2522 free(st2);
2523 pr_err("%s: cannot get superblock from %s\n",
2524 devname, dn);
2525 goto release;
2526 }
2527 st2->ss->getinfo_super(st2, &info2, NULL);
2528 st2->ss->free_super(st2);
2529 free(st2);
2530 if (info2.space_before == 0 &&
2531 info2.space_after == 0) {
2532 /* Metadata doesn't support data_offset changes */
2533 if (!can_fallback)
2534 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2535 devname);
2536 goto fallback;
2537 }
2538 if (before > info2.space_before)
2539 before = info2.space_before;
2540 if (after > info2.space_after)
2541 after = info2.space_after;
2542
2543 if (data_offset != INVALID_SECTORS) {
2544 if (dir == 0) {
2545 if (info2.data_offset == data_offset) {
2546 pr_err("%s: already has that data_offset\n",
2547 dn);
2548 goto release;
2549 }
2550 if (data_offset < info2.data_offset)
2551 dir = -1;
2552 else
2553 dir = 1;
2554 } else if ((data_offset <= info2.data_offset &&
2555 dir == 1) ||
2556 (data_offset >= info2.data_offset &&
2557 dir == -1)) {
2558 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2559 dn);
2560 goto release;
2561 }
2562 }
2563 }
2564 if (before == UINT64_MAX)
2565 /* impossible really, there must be no devices */
2566 return 1;
2567
2568 for (sd = sra->devs; sd; sd = sd->next) {
2569 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2570 unsigned long long new_data_offset;
2571
2572 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2573 continue;
2574 if (delta_disks < 0) {
2575 /* Don't need any space as array is shrinking
2576 * just move data_offset up by min
2577 */
2578 if (data_offset == INVALID_SECTORS)
2579 new_data_offset = sd->data_offset + min;
2580 else {
2581 if (data_offset < sd->data_offset + min) {
2582 pr_err("--data-offset too small for %s\n",
2583 dn);
2584 goto release;
2585 }
2586 new_data_offset = data_offset;
2587 }
2588 } else if (delta_disks > 0) {
2589 /* need space before */
2590 if (before < min) {
2591 if (can_fallback)
2592 goto fallback;
2593 pr_err("Insufficient head-space for reshape on %s\n",
2594 dn);
2595 goto release;
2596 }
2597 if (data_offset == INVALID_SECTORS)
2598 new_data_offset = sd->data_offset - min;
2599 else {
2600 if (data_offset > sd->data_offset - min) {
2601 pr_err("--data-offset too large for %s\n",
2602 dn);
2603 goto release;
2604 }
2605 new_data_offset = data_offset;
2606 }
2607 } else {
2608 if (dir == 0) {
2609 /* can move up or down. If 'data_offset'
2610 * was set we would have already decided,
2611 * so just choose direction with most space.
2612 */
2613 if (before > after)
2614 dir = -1;
2615 else
2616 dir = 1;
2617 }
2618 sysfs_set_str(sra, NULL, "reshape_direction",
2619 dir == 1 ? "backwards" : "forwards");
2620 if (dir > 0) {
2621 /* Increase data offset */
2622 if (after < min) {
2623 if (can_fallback)
2624 goto fallback;
2625 pr_err("Insufficient tail-space for reshape on %s\n",
2626 dn);
2627 goto release;
2628 }
2629 if (data_offset != INVALID_SECTORS &&
2630 data_offset < sd->data_offset + min) {
2631 pr_err("--data-offset too small on %s\n",
2632 dn);
2633 goto release;
2634 }
2635 if (data_offset != INVALID_SECTORS)
2636 new_data_offset = data_offset;
2637 else
2638 new_data_offset = choose_offset(sd->data_offset,
2639 sd->data_offset + after,
2640 sd->data_offset + min,
2641 sd->data_offset + after);
2642 } else {
2643 /* Decrease data offset */
2644 if (before < min) {
2645 if (can_fallback)
2646 goto fallback;
2647 pr_err("insufficient head-room on %s\n",
2648 dn);
2649 goto release;
2650 }
2651 if (data_offset != INVALID_SECTORS &&
2652 data_offset > sd->data_offset - min) {
2653 pr_err("--data-offset too large on %s\n",
2654 dn);
2655 goto release;
2656 }
2657 if (data_offset != INVALID_SECTORS)
2658 new_data_offset = data_offset;
2659 else
2660 new_data_offset = choose_offset(sd->data_offset - before,
2661 sd->data_offset,
2662 sd->data_offset - before,
2663 sd->data_offset - min);
2664 }
2665 }
2666 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2667 if (err < 0 && errno == E2BIG) {
2668 /* try again after increasing data size to max */
2669 err = sysfs_set_num(sra, sd, "size", 0);
2670 if (err < 0 && errno == EINVAL &&
2671 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2672 /* some kernels have a bug where you cannot
2673 * use '0' on spare devices. */
2674 sysfs_set_num(sra, sd, "size",
2675 (sra->component_size + after)/2);
2676 }
2677 err = sysfs_set_num(sra, sd, "new_offset",
2678 new_data_offset);
2679 }
2680 if (err < 0) {
2681 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2682 pr_err("data-offset is too big for %s\n", dn);
2683 goto release;
2684 }
2685 if (sd == sra->devs &&
2686 (errno == ENOENT || errno == E2BIG))
2687 /* Early kernel, no 'new_offset' file,
2688 * or kernel doesn't like us.
2689 * For RAID5/6 this is not fatal
2690 */
2691 return 1;
2692 pr_err("Cannot set new_offset for %s\n", dn);
2693 break;
2694 }
2695 }
2696 return err;
2697 release:
2698 return -1;
2699 fallback:
2700 /* Just use a backup file */
2701 return 1;
2702 }
2703
2704 static int raid10_reshape(char *container, int fd, char *devname,
2705 struct supertype *st, struct mdinfo *info,
2706 struct reshape *reshape,
2707 unsigned long long data_offset,
2708 int force, int verbose)
2709 {
2710 /* Changing raid_disks, layout, chunksize or possibly
2711 * just data_offset for a RAID10.
2712 * We must always change data_offset. We change by at least
2713 * ->min_offset_change which is the largest of the old and new
2714 * chunk sizes.
2715 * If raid_disks is increasing, then data_offset must decrease
2716 * by at least this copy size.
2717 * If raid_disks is unchanged, data_offset must increase or
2718 * decrease by at least min_offset_change but preferably by much more.
2719 * We choose half of the available space.
2720 * If raid_disks is decreasing, data_offset must increase by
2721 * at least min_offset_change. To allow of this, component_size
2722 * must be decreased by the same amount.
2723 *
2724 * So we calculate the required minimum and direction, possibly
2725 * reduce the component_size, then iterate through the devices
2726 * and set the new_data_offset.
2727 * If that all works, we set chunk_size, layout, raid_disks, and start
2728 * 'reshape'
2729 */
2730 struct mdinfo *sra;
2731 unsigned long long min;
2732 int err = 0;
2733
2734 sra = sysfs_read(fd, NULL,
2735 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2736 );
2737 if (!sra) {
2738 pr_err("%s: Cannot get array details from sysfs\n", devname);
2739 goto release;
2740 }
2741 min = reshape->min_offset_change;
2742
2743 if (info->delta_disks)
2744 sysfs_set_str(sra, NULL, "reshape_direction",
2745 info->delta_disks < 0 ? "backwards" : "forwards");
2746 if (info->delta_disks < 0 && info->space_after < min) {
2747 int rv = sysfs_set_num(sra, NULL, "component_size",
2748 (sra->component_size - min)/2);
2749 if (rv) {
2750 pr_err("cannot reduce component size\n");
2751 goto release;
2752 }
2753 }
2754 err = set_new_data_offset(sra, st, devname, info->delta_disks,
2755 data_offset, min, 0);
2756 if (err == 1) {
2757 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2758 cont_err("supported on this kernel\n");
2759 err = -1;
2760 }
2761 if (err < 0)
2762 goto release;
2763
2764 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2765 err = errno;
2766 if (!err && sysfs_set_num(sra, NULL, "layout",
2767 reshape->after.layout) < 0)
2768 err = errno;
2769 if (!err &&
2770 sysfs_set_num(sra, NULL, "raid_disks",
2771 info->array.raid_disks + info->delta_disks) < 0)
2772 err = errno;
2773 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2774 err = errno;
2775 if (err) {
2776 pr_err("Cannot set array shape for %s\n",
2777 devname);
2778 if (err == EBUSY &&
2779 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2780 cont_err(" Bitmap must be removed before shape can be changed\n");
2781 goto release;
2782 }
2783 sysfs_free(sra);
2784 return 0;
2785 release:
2786 sysfs_free(sra);
2787 return 1;
2788 }
2789
2790 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2791 {
2792 struct mdinfo *sra, *sd;
2793 /* Initialisation to silence compiler warning */
2794 unsigned long long min_space_before = 0, min_space_after = 0;
2795 int first = 1;
2796
2797 sra = sysfs_read(fd, NULL, GET_DEVS);
2798 if (!sra)
2799 return;
2800 for (sd = sra->devs; sd; sd = sd->next) {
2801 char *dn;
2802 int dfd;
2803 struct supertype *st2;
2804 struct mdinfo info2;
2805
2806 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2807 continue;
2808 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2809 dfd = dev_open(dn, O_RDONLY);
2810 if (dfd < 0)
2811 break;
2812 st2 = dup_super(st);
2813 if (st2->ss->load_super(st2,dfd, NULL)) {
2814 close(dfd);
2815 free(st2);
2816 break;
2817 }
2818 close(dfd);
2819 st2->ss->getinfo_super(st2, &info2, NULL);
2820 st2->ss->free_super(st2);
2821 free(st2);
2822 if (first ||
2823 min_space_before > info2.space_before)
2824 min_space_before = info2.space_before;
2825 if (first ||
2826 min_space_after > info2.space_after)
2827 min_space_after = info2.space_after;
2828 first = 0;
2829 }
2830 if (sd == NULL && !first) {
2831 info->space_after = min_space_after;
2832 info->space_before = min_space_before;
2833 }
2834 sysfs_free(sra);
2835 }
2836
2837 static void update_cache_size(char *container, struct mdinfo *sra,
2838 struct mdinfo *info,
2839 int disks, unsigned long long blocks)
2840 {
2841 /* Check that the internal stripe cache is
2842 * large enough, or it won't work.
2843 * It must hold at least 4 stripes of the larger
2844 * chunk size
2845 */
2846 unsigned long cache;
2847 cache = max(info->array.chunk_size, info->new_chunk);
2848 cache *= 4; /* 4 stripes minimum */
2849 cache /= 512; /* convert to sectors */
2850 /* make sure there is room for 'blocks' with a bit to spare */
2851 if (cache < 16 + blocks / disks)
2852 cache = 16 + blocks / disks;
2853 cache /= (4096/512); /* Convert from sectors to pages */
2854
2855 if (sra->cache_size < cache)
2856 subarray_set_num(container, sra, "stripe_cache_size",
2857 cache+1);
2858 }
2859
2860 static int impose_reshape(struct mdinfo *sra,
2861 struct mdinfo *info,
2862 struct supertype *st,
2863 int fd,
2864 int restart,
2865 char *devname, char *container,
2866 struct reshape *reshape)
2867 {
2868 struct mdu_array_info_s array;
2869
2870 sra->new_chunk = info->new_chunk;
2871
2872 if (restart) {
2873 /* for external metadata checkpoint saved by mdmon can be lost
2874 * or missed /due to e.g. crash/. Check if md is not during
2875 * restart farther than metadata points to.
2876 * If so, this means metadata information is obsolete.
2877 */
2878 if (st->ss->external)
2879 verify_reshape_position(info, reshape->level);
2880 sra->reshape_progress = info->reshape_progress;
2881 } else {
2882 sra->reshape_progress = 0;
2883 if (reshape->after.data_disks < reshape->before.data_disks)
2884 /* start from the end of the new array */
2885 sra->reshape_progress = (sra->component_size
2886 * reshape->after.data_disks);
2887 }
2888
2889 md_get_array_info(fd, &array);
2890 if (info->array.chunk_size == info->new_chunk &&
2891 reshape->before.layout == reshape->after.layout &&
2892 st->ss->external == 0) {
2893 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2894 array.raid_disks = reshape->after.data_disks + reshape->parity;
2895 if (!restart && md_set_array_info(fd, &array) != 0) {
2896 int err = errno;
2897
2898 pr_err("Cannot set device shape for %s: %s\n",
2899 devname, strerror(errno));
2900
2901 if (err == EBUSY &&
2902 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2903 cont_err("Bitmap must be removed before shape can be changed\n");
2904
2905 goto release;
2906 }
2907 } else if (!restart) {
2908 /* set them all just in case some old 'new_*' value
2909 * persists from some earlier problem.
2910 */
2911 int err = 0;
2912 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2913 err = errno;
2914 if (!err && sysfs_set_num(sra, NULL, "layout",
2915 reshape->after.layout) < 0)
2916 err = errno;
2917 if (!err && subarray_set_num(container, sra, "raid_disks",
2918 reshape->after.data_disks +
2919 reshape->parity) < 0)
2920 err = errno;
2921 if (err) {
2922 pr_err("Cannot set device shape for %s\n", devname);
2923
2924 if (err == EBUSY &&
2925 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2926 cont_err("Bitmap must be removed before shape can be changed\n");
2927 goto release;
2928 }
2929 }
2930 return 0;
2931 release:
2932 return -1;
2933 }
2934
2935 static int impose_level(int fd, int level, char *devname, int verbose)
2936 {
2937 char *c;
2938 struct mdu_array_info_s array;
2939 struct mdinfo info;
2940
2941 if (sysfs_init(&info, fd, NULL)) {
2942 pr_err("failed to initialize sysfs.\n");
2943 return 1;
2944 }
2945
2946 md_get_array_info(fd, &array);
2947 if (level == 0 && is_level456(array.level)) {
2948 /* To convert to RAID0 we need to fail and
2949 * remove any non-data devices. */
2950 int found = 0;
2951 int d;
2952 int data_disks = array.raid_disks - 1;
2953 if (array.level == 6)
2954 data_disks -= 1;
2955 if (array.level == 5 && array.layout != ALGORITHM_PARITY_N)
2956 return -1;
2957 if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6)
2958 return -1;
2959 sysfs_set_str(&info, NULL,"sync_action", "idle");
2960 /* First remove any spares so no recovery starts */
2961 for (d = 0, found = 0;
2962 d < MAX_DISKS && found < array.nr_disks; d++) {
2963 mdu_disk_info_t disk;
2964 disk.number = d;
2965 if (md_get_disk_info(fd, &disk) < 0)
2966 continue;
2967 if (disk.major == 0 && disk.minor == 0)
2968 continue;
2969 found++;
2970 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2971 disk.raid_disk < data_disks)
2972 /* keep this */
2973 continue;
2974 ioctl(fd, HOT_REMOVE_DISK,
2975 makedev(disk.major, disk.minor));
2976 }
2977 /* Now fail anything left */
2978 md_get_array_info(fd, &array);
2979 for (d = 0, found = 0;
2980 d < MAX_DISKS && found < array.nr_disks; d++) {
2981 mdu_disk_info_t disk;
2982 disk.number = d;
2983 if (md_get_disk_info(fd, &disk) < 0)
2984 continue;
2985 if (disk.major == 0 && disk.minor == 0)
2986 continue;
2987 found++;
2988 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2989 disk.raid_disk < data_disks)
2990 /* keep this */
2991 continue;
2992 ioctl(fd, SET_DISK_FAULTY,
2993 makedev(disk.major, disk.minor));
2994 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2995 }
2996 }
2997 c = map_num(pers, level);
2998 if (c) {
2999 int err = sysfs_set_str(&info, NULL, "level", c);
3000 if (err) {
3001 err = errno;
3002 pr_err("%s: could not set level to %s\n",
3003 devname, c);
3004 if (err == EBUSY &&
3005 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
3006 cont_err("Bitmap must be removed before level can be changed\n");
3007 return err;
3008 }
3009 if (verbose >= 0)
3010 pr_err("level of %s changed to %s\n", devname, c);
3011 }
3012 return 0;
3013 }
3014
3015 int sigterm = 0;
3016 static void catch_term(int sig)
3017 {
3018 sigterm = 1;
3019 }
3020
3021 static int reshape_array(char *container, int fd, char *devname,
3022 struct supertype *st, struct mdinfo *info,
3023 int force, struct mddev_dev *devlist,
3024 unsigned long long data_offset,
3025 char *backup_file, int verbose, int forked,
3026 int restart, int freeze_reshape)
3027 {
3028 struct reshape reshape;
3029 int spares_needed;
3030 char *msg;
3031 int orig_level = UnSet;
3032 int odisks;
3033 int delayed;
3034
3035 struct mdu_array_info_s array;
3036 char *c;
3037
3038 struct mddev_dev *dv;
3039 int added_disks;
3040
3041 int *fdlist = NULL;
3042 unsigned long long *offsets = NULL;
3043 int d;
3044 int nrdisks;
3045 int err;
3046 unsigned long blocks;
3047 unsigned long long array_size;
3048 int done;
3049 struct mdinfo *sra = NULL;
3050 char buf[20];
3051
3052 /* when reshaping a RAID0, the component_size might be zero.
3053 * So try to fix that up.
3054 */
3055 if (md_get_array_info(fd, &array) != 0) {
3056 dprintf("Cannot get array information.\n");
3057 goto release;
3058 }
3059 if (array.level == 0 && info->component_size == 0) {
3060 get_dev_size(fd, NULL, &array_size);
3061 info->component_size = array_size / array.raid_disks;
3062 }
3063
3064 if (array.level == 10)
3065 /* Need space_after info */
3066 get_space_after(fd, st, info);
3067
3068 if (info->reshape_active) {
3069 int new_level = info->new_level;
3070 info->new_level = UnSet;
3071 if (info->delta_disks > 0)
3072 info->array.raid_disks -= info->delta_disks;
3073 msg = analyse_change(devname, info, &reshape);
3074 info->new_level = new_level;
3075 if (info->delta_disks > 0)
3076 info->array.raid_disks += info->delta_disks;
3077 if (!restart)
3078 /* Make sure the array isn't read-only */
3079 ioctl(fd, RESTART_ARRAY_RW, 0);
3080 } else
3081 msg = analyse_change(devname, info, &reshape);
3082 if (msg) {
3083 /* if msg == "", error has already been printed */
3084 if (msg[0])
3085 pr_err("%s\n", msg);
3086 goto release;
3087 }
3088 if (restart && (reshape.level != info->array.level ||
3089 reshape.before.layout != info->array.layout ||
3090 reshape.before.data_disks + reshape.parity !=
3091 info->array.raid_disks - max(0, info->delta_disks))) {
3092 pr_err("reshape info is not in native format - cannot continue.\n");
3093 goto release;
3094 }
3095
3096 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3097 !((sysfs_get_str(info, NULL, "sync_action",
3098 buf, sizeof(buf)) > 0) &&
3099 (strncmp(buf, "reshape", 7) == 0))) {
3100 /* When reshape is restarted from '0', very begin of array
3101 * it is possible that for external metadata reshape and array
3102 * configuration doesn't happen.
3103 * Check if md has the same opinion, and reshape is restarted
3104 * from 0. If so, this is regular reshape start after reshape
3105 * switch in metadata to next array only.
3106 */
3107 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3108 (info->reshape_progress == 0))
3109 restart = 0;
3110 }
3111 if (restart) {
3112 /*
3113 * reshape already started. just skip to monitoring
3114 * the reshape
3115 */
3116 if (reshape.backup_blocks == 0)
3117 return 0;
3118 if (restart & RESHAPE_NO_BACKUP)
3119 return 0;
3120
3121 /* Need 'sra' down at 'started:' */
3122 sra = sysfs_read(fd, NULL,
3123 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|
3124 GET_CHUNK|GET_CACHE);
3125 if (!sra) {
3126 pr_err("%s: Cannot get array details from sysfs\n",
3127 devname);
3128 goto release;
3129 }
3130
3131 if (!backup_file)
3132 backup_file = locate_backup(sra->sys_name);
3133
3134 goto started;
3135 }
3136 /* The container is frozen but the array may not be.
3137 * So freeze the array so spares don't get put to the wrong use
3138 * FIXME there should probably be a cleaner separation between
3139 * freeze_array and freeze_container.
3140 */
3141 sysfs_freeze_array(info);
3142 /* Check we have enough spares to not be degraded */
3143 added_disks = 0;
3144 for (dv = devlist; dv ; dv=dv->next)
3145 added_disks++;
3146 spares_needed = max(reshape.before.data_disks,
3147 reshape.after.data_disks) +
3148 reshape.parity - array.raid_disks;
3149
3150 if (!force && info->new_level > 1 && info->array.level > 1 &&
3151 spares_needed > info->array.spare_disks + added_disks) {
3152 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3153 " Use --force to over-ride this check.\n",
3154 spares_needed,
3155 spares_needed == 1 ? "" : "s",
3156 info->array.spare_disks + added_disks);
3157 goto release;
3158 }
3159 /* Check we have enough spares to not fail */
3160 spares_needed = max(reshape.before.data_disks,
3161 reshape.after.data_disks)
3162 - array.raid_disks;
3163 if ((info->new_level > 1 || info->new_level == 0) &&
3164 spares_needed > info->array.spare_disks +added_disks) {
3165 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3166 spares_needed, spares_needed == 1 ? "" : "s",
3167 info->array.spare_disks + added_disks);
3168 goto release;
3169 }
3170
3171 if (reshape.level != array.level) {
3172 int err = impose_level(fd, reshape.level, devname, verbose);
3173 if (err)
3174 goto release;
3175 info->new_layout = UnSet; /* after level change,
3176 * layout is meaningless */
3177 orig_level = array.level;
3178 sysfs_freeze_array(info);
3179
3180 if (reshape.level > 0 && st->ss->external) {
3181 /* make sure mdmon is aware of the new level */
3182 if (mdmon_running(container))
3183 flush_mdmon(container);
3184
3185 if (!mdmon_running(container))
3186 start_mdmon(container);
3187 ping_monitor(container);
3188 if (mdmon_running(container) && st->update_tail == NULL)
3189 st->update_tail = &st->updates;
3190 }
3191 }
3192 /* ->reshape_super might have chosen some spares from the
3193 * container that it wants to be part of the new array.
3194 * We can collect them with ->container_content and give
3195 * them to the kernel.
3196 */
3197 if (st->ss->reshape_super && st->ss->container_content) {
3198 char *subarray = strchr(info->text_version+1, '/')+1;
3199 struct mdinfo *info2 =
3200 st->ss->container_content(st, subarray);
3201 struct mdinfo *d;
3202
3203 if (info2) {
3204 if (sysfs_init(info2, fd, st->devnm)) {
3205 pr_err("unable to initialize sysfs for %s\n",
3206 st->devnm);
3207 free(info2);
3208 goto release;
3209 }
3210 /* When increasing number of devices, we need to set
3211 * new raid_disks before adding these, or they might
3212 * be rejected.
3213 */
3214 if (reshape.backup_blocks &&
3215 reshape.after.data_disks >
3216 reshape.before.data_disks)
3217 subarray_set_num(container, info2, "raid_disks",
3218 reshape.after.data_disks +
3219 reshape.parity);
3220 for (d = info2->devs; d; d = d->next) {
3221 if (d->disk.state == 0 &&
3222 d->disk.raid_disk >= 0) {
3223 /* This is a spare that wants to
3224 * be part of the array.
3225 */
3226 add_disk(fd, st, info2, d);
3227 }
3228 }
3229 sysfs_free(info2);
3230 }
3231 }
3232 /* We might have been given some devices to add to the
3233 * array. Now that the array has been changed to the right
3234 * level and frozen, we can safely add them.
3235 */
3236 if (devlist) {
3237 if (Manage_subdevs(devname, fd, devlist, verbose, 0, NULL, 0))
3238 goto release;
3239 }
3240
3241 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3242 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3243 if (reshape.backup_blocks == 0) {
3244 /* No restriping needed, but we might need to impose
3245 * some more changes: layout, raid_disks, chunk_size
3246 */
3247 /* read current array info */
3248 if (md_get_array_info(fd, &array) != 0) {
3249 dprintf("Cannot get array information.\n");
3250 goto release;
3251 }
3252 /* compare current array info with new values and if
3253 * it is different update them to new */
3254 if (info->new_layout != UnSet &&
3255 info->new_layout != array.layout) {
3256 array.layout = info->new_layout;
3257 if (md_set_array_info(fd, &array) != 0) {
3258 pr_err("failed to set new layout\n");
3259 goto release;
3260 } else if (verbose >= 0)
3261 printf("layout for %s set to %d\n",
3262 devname, array.layout);
3263 }
3264 if (info->delta_disks != UnSet && info->delta_disks != 0 &&
3265 array.raid_disks !=
3266 (info->array.raid_disks + info->delta_disks)) {
3267 array.raid_disks += info->delta_disks;
3268 if (md_set_array_info(fd, &array) != 0) {
3269 pr_err("failed to set raid disks\n");
3270 goto release;
3271 } else if (verbose >= 0) {
3272 printf("raid_disks for %s set to %d\n",
3273 devname, array.raid_disks);
3274 }
3275 }
3276 if (info->new_chunk != 0 &&
3277 info->new_chunk != array.chunk_size) {
3278 if (sysfs_set_num(info, NULL,
3279 "chunk_size", info->new_chunk) != 0) {
3280 pr_err("failed to set chunk size\n");
3281 goto release;
3282 } else if (verbose >= 0)
3283 printf("chunk size for %s set to %d\n",
3284 devname, info->new_chunk);
3285 }
3286 unfreeze(st);
3287 return 0;
3288 }
3289
3290 /*
3291 * There are three possibilities.
3292 * 1/ The array will shrink.
3293 * We need to ensure the reshape will pause before reaching
3294 * the 'critical section'. We also need to fork and wait for
3295 * that to happen. When it does we
3296 * suspend/backup/complete/unfreeze
3297 *
3298 * 2/ The array will not change size.
3299 * This requires that we keep a backup of a sliding window
3300 * so that we can restore data after a crash. So we need
3301 * to fork and monitor progress.
3302 * In future we will allow the data_offset to change, so
3303 * a sliding backup becomes unnecessary.
3304 *
3305 * 3/ The array will grow. This is relatively easy.
3306 * However the kernel's restripe routines will cheerfully
3307 * overwrite some early data before it is safe. So we
3308 * need to make a backup of the early parts of the array
3309 * and be ready to restore it if rebuild aborts very early.
3310 * For externally managed metadata, we still need a forked
3311 * child to monitor the reshape and suspend IO over the region
3312 * that is being reshaped.
3313 *
3314 * We backup data by writing it to one spare, or to a
3315 * file which was given on command line.
3316 *
3317 * In each case, we first make sure that storage is available
3318 * for the required backup.
3319 * Then we:
3320 * - request the shape change.
3321 * - fork to handle backup etc.
3322 */
3323 /* Check that we can hold all the data */
3324 get_dev_size(fd, NULL, &array_size);
3325 if (reshape.new_size < (array_size/512)) {
3326 pr_err("this change will reduce the size of the array.\n"
3327 " use --grow --array-size first to truncate array.\n"
3328 " e.g. mdadm --grow %s --array-size %llu\n",
3329 devname, reshape.new_size/2);
3330 goto release;
3331 }
3332
3333 if (array.level == 10) {
3334 /* Reshaping RAID10 does not require any data backup by
3335 * user-space. Instead it requires that the data_offset
3336 * is changed to avoid the need for backup.
3337 * So this is handled very separately
3338 */
3339 if (restart)
3340 /* Nothing to do. */
3341 return 0;
3342 return raid10_reshape(container, fd, devname, st, info,
3343 &reshape, data_offset, force, verbose);
3344 }
3345 sra = sysfs_read(fd, NULL,
3346 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3347 GET_CACHE);
3348 if (!sra) {
3349 pr_err("%s: Cannot get array details from sysfs\n",
3350 devname);
3351 goto release;
3352 }
3353
3354 if (!backup_file)
3355 switch(set_new_data_offset(sra, st, devname,
3356 reshape.after.data_disks - reshape.before.data_disks,
3357 data_offset,
3358 reshape.min_offset_change, 1)) {
3359 case -1:
3360 goto release;
3361 case 0:
3362 /* Updated data_offset, so it's easy now */
3363 update_cache_size(container, sra, info,
3364 min(reshape.before.data_disks,
3365 reshape.after.data_disks),
3366 reshape.backup_blocks);
3367
3368 /* Right, everything seems fine. Let's kick things off.
3369 */
3370 sync_metadata(st);
3371
3372 if (impose_reshape(sra, info, st, fd, restart,
3373 devname, container, &reshape) < 0)
3374 goto release;
3375 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3376 struct mdinfo *sd;
3377 if (errno != EINVAL) {
3378 pr_err("Failed to initiate reshape!\n");
3379 goto release;
3380 }
3381 /* revert data_offset and try the old way */
3382 for (sd = sra->devs; sd; sd = sd->next) {
3383 sysfs_set_num(sra, sd, "new_offset",
3384 sd->data_offset);
3385 sysfs_set_str(sra, NULL, "reshape_direction",
3386 "forwards");
3387 }
3388 break;
3389 }
3390 if (info->new_level == reshape.level)
3391 return 0;
3392 /* need to adjust level when reshape completes */
3393 switch(fork()) {
3394 case -1: /* ignore error, but don't wait */
3395 return 0;
3396 default: /* parent */
3397 return 0;
3398 case 0:
3399 manage_fork_fds(0);
3400 map_fork();
3401 break;
3402 }
3403 close(fd);
3404 wait_reshape(sra);
3405 fd = open_dev(sra->sys_name);
3406 if (fd >= 0)
3407 impose_level(fd, info->new_level, devname, verbose);
3408 return 0;
3409 case 1: /* Couldn't set data_offset, try the old way */
3410 if (data_offset != INVALID_SECTORS) {
3411 pr_err("Cannot update data_offset on this array\n");
3412 goto release;
3413 }
3414 break;
3415 }
3416
3417 started:
3418 /* Decide how many blocks (sectors) for a reshape
3419 * unit. The number we have so far is just a minimum
3420 */
3421 blocks = reshape.backup_blocks;
3422 if (reshape.before.data_disks ==
3423 reshape.after.data_disks) {
3424 /* Make 'blocks' bigger for better throughput, but
3425 * not so big that we reject it below.
3426 * Try for 16 megabytes
3427 */
3428 while (blocks * 32 < sra->component_size && blocks < 16*1024*2)
3429 blocks *= 2;
3430 } else
3431 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3432
3433 if (blocks >= sra->component_size/2) {
3434 pr_err("%s: Something wrong - reshape aborted\n", devname);
3435 goto release;
3436 }
3437
3438 /* Now we need to open all these devices so we can read/write.
3439 */
3440 nrdisks = max(reshape.before.data_disks,
3441 reshape.after.data_disks) + reshape.parity
3442 + sra->array.spare_disks;
3443 fdlist = xcalloc((1+nrdisks), sizeof(int));
3444 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3445
3446 odisks = reshape.before.data_disks + reshape.parity;
3447 d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks,
3448 backup_file, fdlist, offsets);
3449 if (d < odisks) {
3450 goto release;
3451 }
3452 if ((st->ss->manage_reshape == NULL) ||
3453 (st->ss->recover_backup == NULL)) {
3454 if (backup_file == NULL) {
3455 if (reshape.after.data_disks <=
3456 reshape.before.data_disks) {
3457 pr_err("%s: Cannot grow - need backup-file\n",
3458 devname);
3459 pr_err(" Please provide one with \"--backup=...\"\n");
3460 goto release;
3461 } else if (d == odisks) {
3462 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3463 goto release;
3464 }
3465 } else {
3466 if (!reshape_open_backup_file(backup_file, fd, devname,
3467 (signed)blocks,
3468 fdlist+d, offsets+d,
3469 sra->sys_name, restart)) {
3470 goto release;
3471 }
3472 d++;
3473 }
3474 }
3475
3476 update_cache_size(container, sra, info,
3477 min(reshape.before.data_disks,
3478 reshape.after.data_disks), blocks);
3479
3480 /* Right, everything seems fine. Let's kick things off.
3481 * If only changing raid_disks, use ioctl, else use
3482 * sysfs.
3483 */
3484 sync_metadata(st);
3485
3486 if (impose_reshape(sra, info, st, fd, restart,
3487 devname, container, &reshape) < 0)
3488 goto release;
3489
3490 err = start_reshape(sra, restart, reshape.before.data_disks,
3491 reshape.after.data_disks, st);
3492 if (err) {
3493 pr_err("Cannot %s reshape for %s\n",
3494 restart ? "continue" : "start", devname);
3495 goto release;
3496 }
3497 if (restart)
3498 sysfs_set_str(sra, NULL, "array_state", "active");
3499 if (freeze_reshape) {
3500 free(fdlist);
3501 free(offsets);
3502 sysfs_free(sra);
3503 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3504 sra->reshape_progress);
3505 return 1;
3506 }
3507
3508 if (!forked)
3509 if (continue_via_systemd(container ?: sra->sys_name,
3510 GROW_SERVICE)) {
3511 free(fdlist);
3512 free(offsets);
3513 sysfs_free(sra);
3514 return 0;
3515 }
3516
3517 /* Now we just need to kick off the reshape and watch, while
3518 * handling backups of the data...
3519 * This is all done by a forked background process.
3520 */
3521 switch(forked ? 0 : fork()) {
3522 case -1:
3523 pr_err("Cannot run child to monitor reshape: %s\n",
3524 strerror(errno));
3525 abort_reshape(sra);
3526 goto release;
3527 default:
3528 free(fdlist);
3529 free(offsets);
3530 sysfs_free(sra);
3531 return 0;
3532 case 0:
3533 map_fork();
3534 break;
3535 }
3536
3537 /* Close unused file descriptor in the forked process */
3538 close_fd(&fd);
3539
3540 /* If another array on the same devices is busy, the
3541 * reshape will wait for them. This would mean that
3542 * the first section that we suspend will stay suspended
3543 * for a long time. So check on that possibility
3544 * by looking for "DELAYED" in /proc/mdstat, and if found,
3545 * wait a while
3546 */
3547 do {
3548 struct mdstat_ent *mds, *m;
3549 delayed = 0;
3550 mds = mdstat_read(1, 0);
3551 for (m = mds; m; m = m->next)
3552 if (strcmp(m->devnm, sra->sys_name) == 0) {
3553 if (m->resync && m->percent == RESYNC_DELAYED)
3554 delayed = 1;
3555 if (m->resync == 0)
3556 /* Haven't started the reshape thread
3557 * yet, wait a bit
3558 */
3559 delayed = 2;
3560 break;
3561 }
3562 free_mdstat(mds);
3563 if (delayed == 1 && get_linux_version() < 3007000) {
3564 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3565 " You might experience problems until other reshapes complete.\n");
3566 delayed = 0;
3567 }
3568 if (delayed)
3569 mdstat_wait(30 - (delayed-1) * 25);
3570 } while (delayed);
3571 mdstat_close();
3572 if (check_env("MDADM_GROW_VERIFY"))
3573 fd = open(devname, O_RDONLY | O_DIRECT);
3574 else
3575 fd = -1;
3576 mlockall(MCL_FUTURE);
3577
3578 if (signal_s(SIGTERM, catch_term) == SIG_ERR)
3579 goto release;
3580
3581 if (st->ss->external) {
3582 /* metadata handler takes it from here */
3583 done = st->ss->manage_reshape(
3584 fd, sra, &reshape, st, blocks,
3585 fdlist, offsets, d - odisks, fdlist + odisks,
3586 offsets + odisks);
3587 } else
3588 done = child_monitor(
3589 fd, sra, &reshape, st, blocks, fdlist, offsets,
3590 d - odisks, fdlist + odisks, offsets + odisks);
3591
3592 free(fdlist);
3593 free(offsets);
3594
3595 if (backup_file && done) {
3596 char *bul;
3597 bul = make_backup(sra->sys_name);
3598 if (bul) {
3599 char buf[1024];
3600 int l = readlink(bul, buf, sizeof(buf) - 1);
3601 if (l > 0) {
3602 buf[l]=0;
3603 unlink(buf);
3604 }
3605 unlink(bul);
3606 free(bul);
3607 }
3608 unlink(backup_file);
3609 }
3610 if (!done) {
3611 abort_reshape(sra);
3612 goto out;
3613 }
3614
3615 if (!st->ss->external &&
3616 !(reshape.before.data_disks != reshape.after.data_disks &&
3617 info->custom_array_size) && info->new_level == reshape.level &&
3618 !forked) {
3619 /* no need to wait for the reshape to finish as
3620 * there is nothing more to do.
3621 */
3622 sysfs_free(sra);
3623 exit(0);
3624 }
3625 wait_reshape(sra);
3626
3627 if (st->ss->external) {
3628 /* Re-load the metadata as much could have changed */
3629 int cfd = open_dev(st->container_devnm);
3630 if (cfd >= 0) {
3631 flush_mdmon(container);
3632 st->ss->free_super(st);
3633 st->ss->load_container(st, cfd, container);
3634 close(cfd);
3635 }
3636 }
3637
3638 /* set new array size if required customer_array_size is used
3639 * by this metadata.
3640 */
3641 if (reshape.before.data_disks != reshape.after.data_disks &&
3642 info->custom_array_size)
3643 set_array_size(st, info, info->text_version);
3644
3645 if (info->new_level != reshape.level) {
3646 if (fd < 0)
3647 fd = open(devname, O_RDONLY);
3648 impose_level(fd, info->new_level, devname, verbose);
3649 close(fd);
3650 if (info->new_level == 0)
3651 st->update_tail = NULL;
3652 }
3653 out:
3654 sysfs_free(sra);
3655 if (forked)
3656 return 0;
3657 unfreeze(st);
3658 exit(0);
3659
3660 release:
3661 free(fdlist);
3662 free(offsets);
3663 if (orig_level != UnSet && sra) {
3664 c = map_num(pers, orig_level);
3665 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3666 pr_err("aborting level change\n");
3667 }
3668 sysfs_free(sra);
3669 if (!forked)
3670 unfreeze(st);
3671 return 1;
3672 }
3673
3674 /* mdfd handle is passed to be closed in child process (after fork).
3675 */
3676 int reshape_container(char *container, char *devname,
3677 int mdfd,
3678 struct supertype *st,
3679 struct mdinfo *info,
3680 int force,
3681 char *backup_file, int verbose,
3682 int forked, int restart, int freeze_reshape)
3683 {
3684 struct mdinfo *cc = NULL;
3685 int rv = restart;
3686 char last_devnm[32] = "";
3687
3688 /* component_size is not meaningful for a container,
3689 * so pass '0' meaning 'no change'
3690 */
3691 if (!restart &&
3692 reshape_super(st, 0, info->new_level,
3693 info->new_layout, info->new_chunk,
3694 info->array.raid_disks, info->delta_disks,
3695 backup_file, devname, APPLY_METADATA_CHANGES,
3696 verbose)) {
3697 unfreeze(st);
3698 return 1;
3699 }
3700
3701 sync_metadata(st);
3702
3703 /* ping monitor to be sure that update is on disk
3704 */
3705 ping_monitor(container);
3706
3707 if (!forked && !freeze_reshape)
3708 if (continue_via_systemd(container, GROW_SERVICE))
3709 return 0;
3710
3711 switch (forked ? 0 : fork()) {
3712 case -1: /* error */
3713 perror("Cannot fork to complete reshape\n");
3714 unfreeze(st);
3715 return 1;
3716 default: /* parent */
3717 if (!freeze_reshape)
3718 printf("%s: multi-array reshape continues in background\n", Name);
3719 return 0;
3720 case 0: /* child */
3721 manage_fork_fds(0);
3722 map_fork();
3723 break;
3724 }
3725
3726 /* close unused handle in child process
3727 */
3728 if (mdfd > -1)
3729 close(mdfd);
3730
3731 while(1) {
3732 /* For each member array with reshape_active,
3733 * we need to perform the reshape.
3734 * We pick the first array that needs reshaping and
3735 * reshape it. reshape_array() will re-read the metadata
3736 * so the next time through a different array should be
3737 * ready for reshape.
3738 * It is possible that the 'different' array will not
3739 * be assembled yet. In that case we simple exit.
3740 * When it is assembled, the mdadm which assembles it
3741 * will take over the reshape.
3742 */
3743 struct mdinfo *content;
3744 int fd;
3745 struct mdstat_ent *mdstat;
3746 char *adev;
3747 dev_t devid;
3748
3749 sysfs_free(cc);
3750
3751 cc = st->ss->container_content(st, NULL);
3752
3753 for (content = cc; content ; content = content->next) {
3754 char *subarray;
3755 if (!content->reshape_active)
3756 continue;
3757
3758 subarray = strchr(content->text_version+1, '/')+1;
3759 mdstat = mdstat_by_subdev(subarray, container);
3760 if (!mdstat)
3761 continue;
3762 if (mdstat->active == 0) {
3763 pr_err("Skipping inactive array %s.\n",
3764 mdstat->devnm);
3765 free_mdstat(mdstat);
3766 mdstat = NULL;
3767 continue;
3768 }
3769 break;
3770 }
3771 if (!content)
3772 break;
3773
3774 devid = devnm2devid(mdstat->devnm);
3775 adev = map_dev(major(devid), minor(devid), 0);
3776 if (!adev)
3777 adev = content->text_version;
3778
3779 fd = open_dev(mdstat->devnm);
3780 if (fd < 0) {
3781 pr_err("Device %s cannot be opened for reshape.\n",
3782 adev);
3783 break;
3784 }
3785
3786 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3787 /* Do not allow for multiple reshape_array() calls for
3788 * the same array.
3789 * It can happen when reshape_array() returns without
3790 * error, when reshape is not finished (wrong reshape
3791 * starting/continuation conditions). Mdmon doesn't
3792 * switch to next array in container and reentry
3793 * conditions for the same array occur.
3794 * This is possibly interim until the behaviour of
3795 * reshape_array is resolved().
3796 */
3797 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3798 close(fd);
3799 break;
3800 }
3801 strcpy(last_devnm, mdstat->devnm);
3802
3803 if (sysfs_init(content, fd, mdstat->devnm)) {
3804 pr_err("Unable to initialize sysfs for %s\n",
3805 mdstat->devnm);
3806 rv = 1;
3807 break;
3808 }
3809
3810 if (mdmon_running(container))
3811 flush_mdmon(container);
3812
3813 rv = reshape_array(container, fd, adev, st,
3814 content, force, NULL, INVALID_SECTORS,
3815 backup_file, verbose, 1, restart,
3816 freeze_reshape);
3817 close(fd);
3818
3819 if (freeze_reshape) {
3820 sysfs_free(cc);
3821 exit(0);
3822 }
3823
3824 restart = 0;
3825 if (rv)
3826 break;
3827
3828 if (mdmon_running(container))
3829 flush_mdmon(container);
3830 }
3831 if (!rv)
3832 unfreeze(st);
3833 sysfs_free(cc);
3834 exit(0);
3835 }
3836
3837 /*
3838 * We run a child process in the background which performs the following
3839 * steps:
3840 * - wait for resync to reach a certain point
3841 * - suspend io to the following section
3842 * - backup that section
3843 * - allow resync to proceed further
3844 * - resume io
3845 * - discard the backup.
3846 *
3847 * When are combined in slightly different ways in the three cases.
3848 * Grow:
3849 * - suspend/backup/allow/wait/resume/discard
3850 * Shrink:
3851 * - allow/wait/suspend/backup/allow/wait/resume/discard
3852 * same-size:
3853 * - wait/resume/discard/suspend/backup/allow
3854 *
3855 * suspend/backup/allow always come together
3856 * wait/resume/discard do too.
3857 * For the same-size case we have two backups to improve flow.
3858 *
3859 */
3860
3861 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3862 unsigned long long backup_point,
3863 unsigned long long wait_point,
3864 unsigned long long *suspend_point,
3865 unsigned long long *reshape_completed, int *frozen)
3866 {
3867 /* This function is called repeatedly by the reshape manager.
3868 * It determines how much progress can safely be made and allows
3869 * that progress.
3870 * - 'info' identifies the array and particularly records in
3871 * ->reshape_progress the metadata's knowledge of progress
3872 * This is a sector offset from the start of the array
3873 * of the next array block to be relocated. This number
3874 * may increase from 0 or decrease from array_size, depending
3875 * on the type of reshape that is happening.
3876 * Note that in contrast, 'sync_completed' is a block count of the
3877 * reshape so far. It gives the distance between the start point
3878 * (head or tail of device) and the next place that data will be
3879 * written. It always increases.
3880 * - 'reshape' is the structure created by analyse_change
3881 * - 'backup_point' shows how much the metadata manager has backed-up
3882 * data. For reshapes with increasing progress, it is the next address
3883 * to be backed up, previous addresses have been backed-up. For
3884 * decreasing progress, it is the earliest address that has been
3885 * backed up - later address are also backed up.
3886 * So addresses between reshape_progress and backup_point are
3887 * backed up providing those are in the 'correct' order.
3888 * - 'wait_point' is an array address. When reshape_completed
3889 * passes this point, progress_reshape should return. It might
3890 * return earlier if it determines that ->reshape_progress needs
3891 * to be updated or further backup is needed.
3892 * - suspend_point is maintained by progress_reshape and the caller
3893 * should not touch it except to initialise to zero.
3894 * It is an array address and it only increases in 2.6.37 and earlier.
3895 * This makes it difficult to handle reducing reshapes with
3896 * external metadata.
3897 * However: it is similar to backup_point in that it records the
3898 * other end of a suspended region from reshape_progress.
3899 * it is moved to extend the region that is safe to backup and/or
3900 * reshape
3901 * - reshape_completed is read from sysfs and returned. The caller
3902 * should copy this into ->reshape_progress when it has reason to
3903 * believe that the metadata knows this, and any backup outside this
3904 * has been erased.
3905 *
3906 * Return value is:
3907 * 1 if more data from backup_point - but only as far as suspend_point,
3908 * should be backed up
3909 * 0 if things are progressing smoothly
3910 * -1 if the reshape is finished because it is all done,
3911 * -2 if the reshape is finished due to an error.
3912 */
3913
3914 int advancing = (reshape->after.data_disks
3915 >= reshape->before.data_disks);
3916 unsigned long long need_backup; /* All data between start of array and
3917 * here will at some point need to
3918 * be backed up.
3919 */
3920 unsigned long long read_offset, write_offset;
3921 unsigned long long write_range;
3922 unsigned long long max_progress, target, completed;
3923 unsigned long long array_size = (info->component_size
3924 * reshape->before.data_disks);
3925 int fd;
3926 char buf[20];
3927
3928 /* First, we unsuspend any region that is now known to be safe.
3929 * If suspend_point is on the 'wrong' side of reshape_progress, then
3930 * we don't have or need suspension at the moment. This is true for
3931 * native metadata when we don't need to back-up.
3932 */
3933 if (advancing) {
3934 if (info->reshape_progress <= *suspend_point)
3935 sysfs_set_num(info, NULL, "suspend_lo",
3936 info->reshape_progress);
3937 } else {
3938 /* Note: this won't work in 2.6.37 and before.
3939 * Something somewhere should make sure we don't need it!
3940 */
3941 if (info->reshape_progress >= *suspend_point)
3942 sysfs_set_num(info, NULL, "suspend_hi",
3943 info->reshape_progress);
3944 }
3945
3946 /* Now work out how far it is safe to progress.
3947 * If the read_offset for ->reshape_progress is less than
3948 * 'blocks' beyond the write_offset, we can only progress as far
3949 * as a backup.
3950 * Otherwise we can progress until the write_offset for the new location
3951 * reaches (within 'blocks' of) the read_offset at the current location.
3952 * However that region must be suspended unless we are using native
3953 * metadata.
3954 * If we need to suspend more, we limit it to 128M per device, which is
3955 * rather arbitrary and should be some time-based calculation.
3956 */
3957 read_offset = info->reshape_progress / reshape->before.data_disks;
3958 write_offset = info->reshape_progress / reshape->after.data_disks;
3959 write_range = info->new_chunk/512;
3960 if (reshape->before.data_disks == reshape->after.data_disks)
3961 need_backup = array_size;
3962 else
3963 need_backup = reshape->backup_blocks;
3964 if (advancing) {
3965 if (read_offset < write_offset + write_range)
3966 max_progress = backup_point;
3967 else
3968 max_progress =
3969 read_offset * reshape->after.data_disks;
3970 } else {
3971 if (read_offset > write_offset - write_range)
3972 /* Can only progress as far as has been backed up,
3973 * which must be suspended */
3974 max_progress = backup_point;
3975 else if (info->reshape_progress <= need_backup)
3976 max_progress = backup_point;
3977 else {
3978 if (info->array.major_version >= 0)
3979 /* Can progress until backup is needed */
3980 max_progress = need_backup;
3981 else {
3982 /* Can progress until metadata update is required */
3983 max_progress =
3984 read_offset * reshape->after.data_disks;
3985 /* but data must be suspended */
3986 if (max_progress < *suspend_point)
3987 max_progress = *suspend_point;
3988 }
3989 }
3990 }
3991
3992 /* We know it is safe to progress to 'max_progress' providing
3993 * it is suspended or we are using native metadata.
3994 * Consider extending suspend_point 128M per device if it
3995 * is less than 64M per device beyond reshape_progress.
3996 * But always do a multiple of 'blocks'
3997 * FIXME this is too big - it takes to long to complete
3998 * this much.
3999 */
4000 target = 64*1024*2 * min(reshape->before.data_disks,
4001 reshape->after.data_disks);
4002 target /= reshape->backup_blocks;
4003 if (target < 2)
4004 target = 2;
4005 target *= reshape->backup_blocks;
4006
4007 /* For externally managed metadata we always need to suspend IO to
4008 * the area being reshaped so we regularly push suspend_point forward.
4009 * For native metadata we only need the suspend if we are going to do
4010 * a backup.
4011 */
4012 if (advancing) {
4013 if ((need_backup > info->reshape_progress ||
4014 info->array.major_version < 0) &&
4015 *suspend_point < info->reshape_progress + target) {
4016 if (need_backup < *suspend_point + 2 * target)
4017 *suspend_point = need_backup;
4018 else if (*suspend_point + 2 * target < array_size)
4019 *suspend_point += 2 * target;
4020 else
4021 *suspend_point = array_size;
4022 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4023 if (max_progress > *suspend_point)
4024 max_progress = *suspend_point;
4025 }
4026 } else {
4027 if (info->array.major_version >= 0) {
4028 /* Only need to suspend when about to backup */
4029 if (info->reshape_progress < need_backup * 2 &&
4030 *suspend_point > 0) {
4031 *suspend_point = 0;
4032 sysfs_set_num(info, NULL, "suspend_lo", 0);
4033 sysfs_set_num(info, NULL, "suspend_hi",
4034 need_backup);
4035 }
4036 } else {
4037 /* Need to suspend continually */
4038 if (info->reshape_progress < *suspend_point)
4039 *suspend_point = info->reshape_progress;
4040 if (*suspend_point + target < info->reshape_progress)
4041 /* No need to move suspend region yet */;
4042 else {
4043 if (*suspend_point >= 2 * target)
4044 *suspend_point -= 2 * target;
4045 else
4046 *suspend_point = 0;
4047 sysfs_set_num(info, NULL, "suspend_lo",
4048 *suspend_point);
4049 }
4050 if (max_progress < *suspend_point)
4051 max_progress = *suspend_point;
4052 }
4053 }
4054
4055 /* now set sync_max to allow that progress. sync_max, like
4056 * sync_completed is a count of sectors written per device, so
4057 * we find the difference between max_progress and the start point,
4058 * and divide that by after.data_disks to get a sync_max
4059 * number.
4060 * At the same time we convert wait_point to a similar number
4061 * for comparing against sync_completed.
4062 */
4063 /* scale down max_progress to per_disk */
4064 max_progress /= reshape->after.data_disks;
4065 /*
4066 * Round to chunk size as some kernels give an erroneously
4067 * high number
4068 */
4069 max_progress /= info->new_chunk/512;
4070 max_progress *= info->new_chunk/512;
4071 /* And round to old chunk size as the kernel wants that */
4072 max_progress /= info->array.chunk_size/512;
4073 max_progress *= info->array.chunk_size/512;
4074 /* Limit progress to the whole device */
4075 if (max_progress > info->component_size)
4076 max_progress = info->component_size;
4077 wait_point /= reshape->after.data_disks;
4078 if (!advancing) {
4079 /* switch from 'device offset' to 'processed block count' */
4080 max_progress = info->component_size - max_progress;
4081 wait_point = info->component_size - wait_point;
4082 }
4083
4084 if (!*frozen)
4085 sysfs_set_num(info, NULL, "sync_max", max_progress);
4086
4087 /* Now wait. If we have already reached the point that we were
4088 * asked to wait to, don't wait at all, else wait for any change.
4089 * We need to select on 'sync_completed' as that is the place that
4090 * notifications happen, but we are really interested in
4091 * 'reshape_position'
4092 */
4093 fd = sysfs_get_fd(info, NULL, "sync_completed");
4094 if (fd < 0)
4095 goto check_progress;
4096
4097 if (sysfs_fd_get_ll(fd, &completed) < 0)
4098 goto check_progress;
4099
4100 while (completed < max_progress && completed < wait_point) {
4101 /* Check that sync_action is still 'reshape' to avoid
4102 * waiting forever on a dead array
4103 */
4104 char action[20];
4105 if (sysfs_get_str(info, NULL, "sync_action", action, 20) <= 0 ||
4106 strncmp(action, "reshape", 7) != 0)
4107 break;
4108 /* Some kernels reset 'sync_completed' to zero
4109 * before setting 'sync_action' to 'idle'.
4110 * So we need these extra tests.
4111 */
4112 if (completed == 0 && advancing &&
4113 strncmp(action, "idle", 4) == 0 &&
4114 info->reshape_progress > 0)
4115 break;
4116 if (completed == 0 && !advancing &&
4117 strncmp(action, "idle", 4) == 0 &&
4118 info->reshape_progress <
4119 (info->component_size * reshape->after.data_disks))
4120 break;
4121 sysfs_wait(fd, NULL);
4122 if (sysfs_fd_get_ll(fd, &completed) < 0)
4123 goto check_progress;
4124 }
4125 /* Some kernels reset 'sync_completed' to zero,
4126 * we need to have real point we are in md.
4127 * So in that case, read 'reshape_position' from sysfs.
4128 */
4129 if (completed == 0) {
4130 unsigned long long reshapep;
4131 char action[20];
4132 if (sysfs_get_str(info, NULL, "sync_action", action, 20) > 0 &&
4133 strncmp(action, "idle", 4) == 0 &&
4134 sysfs_get_ll(info, NULL,
4135 "reshape_position", &reshapep) == 0)
4136 *reshape_completed = reshapep;
4137 } else {
4138 /* some kernels can give an incorrectly high
4139 * 'completed' number, so round down */
4140 completed /= (info->new_chunk/512);
4141 completed *= (info->new_chunk/512);
4142 /* Convert 'completed' back in to a 'progress' number */
4143 completed *= reshape->after.data_disks;
4144 if (!advancing)
4145 completed = (info->component_size
4146 * reshape->after.data_disks
4147 - completed);
4148 *reshape_completed = completed;
4149 }
4150
4151 close(fd);
4152
4153 /* We return the need_backup flag. Caller will decide
4154 * how much - a multiple of ->backup_blocks up to *suspend_point
4155 */
4156 if (advancing)
4157 return need_backup > info->reshape_progress;
4158 else
4159 return need_backup >= info->reshape_progress;
4160
4161 check_progress:
4162 /* if we couldn't read a number from sync_completed, then
4163 * either the reshape did complete, or it aborted.
4164 * We can tell which by checking for 'none' in reshape_position.
4165 * If it did abort, then it might immediately restart if it
4166 * it was just a device failure that leaves us degraded but
4167 * functioning.
4168 */
4169 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4170 sizeof(buf)) < 0 || strncmp(buf, "none", 4) != 0) {
4171 /* The abort might only be temporary. Wait up to 10
4172 * seconds for fd to contain a valid number again.
4173 */
4174 int wait = 10000;
4175 int rv = -2;
4176 unsigned long long new_sync_max;
4177 while (fd >= 0 && rv < 0 && wait > 0) {
4178 if (sysfs_wait(fd, &wait) != 1)
4179 break;
4180 switch (sysfs_fd_get_ll(fd, &completed)) {
4181 case 0:
4182 /* all good again */
4183 rv = 1;
4184 /* If "sync_max" is no longer max_progress
4185 * we need to freeze things
4186 */
4187 sysfs_get_ll(info, NULL, "sync_max",
4188 &new_sync_max);
4189 *frozen = (new_sync_max != max_progress);
4190 break;
4191 case -2: /* read error - abort */
4192 wait = 0;
4193 break;
4194 }
4195 }
4196 if (fd >= 0)
4197 close(fd);
4198 return rv; /* abort */
4199 } else {
4200 /* Maybe racing with array shutdown - check state */
4201 if (fd >= 0)
4202 close(fd);
4203 if (sysfs_get_str(info, NULL, "array_state", buf,
4204 sizeof(buf)) < 0 ||
4205 strncmp(buf, "inactive", 8) == 0 ||
4206 strncmp(buf, "clear",5) == 0)
4207 return -2; /* abort */
4208 return -1; /* complete */
4209 }
4210 }
4211
4212 /* FIXME return status is never checked */
4213 static int grow_backup(struct mdinfo *sra,
4214 unsigned long long offset, /* per device */
4215 unsigned long stripes, /* per device, in old chunks */
4216 int *sources, unsigned long long *offsets,
4217 int disks, int chunk, int level, int layout,
4218 int dests, int *destfd, unsigned long long *destoffsets,
4219 int part, int *degraded,
4220 char *buf)
4221 {
4222 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4223 * to storage 'destfd' (offset 'destoffsets'), after first
4224 * suspending IO. Then allow resync to continue
4225 * over the suspended section.
4226 * Use part 'part' of the backup-super-block.
4227 */
4228 int odata = disks;
4229 int rv = 0;
4230 int i;
4231 unsigned long long ll;
4232 int new_degraded;
4233 //printf("offset %llu\n", offset);
4234 if (level >= 4)
4235 odata--;
4236 if (level == 6)
4237 odata--;
4238
4239 /* Check that array hasn't become degraded, else we might backup the wrong data */
4240 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4241 return -1; /* FIXME this error is ignored */
4242 new_degraded = (int)ll;
4243 if (new_degraded != *degraded) {
4244 /* check each device to ensure it is still working */
4245 struct mdinfo *sd;
4246 for (sd = sra->devs ; sd ; sd = sd->next) {
4247 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4248 continue;
4249 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4250 char sbuf[100];
4251
4252 if (sysfs_get_str(sra, sd, "state",
4253 sbuf, sizeof(sbuf)) < 0 ||
4254 strstr(sbuf, "faulty") ||
4255 strstr(sbuf, "in_sync") == NULL) {
4256 /* this device is dead */
4257 sd->disk.state = (1<<MD_DISK_FAULTY);
4258 if (sd->disk.raid_disk >= 0 &&
4259 sources[sd->disk.raid_disk] >= 0) {
4260 close(sources[sd->disk.raid_disk]);
4261 sources[sd->disk.raid_disk] = -1;
4262 }
4263 }
4264 }
4265 }
4266 *degraded = new_degraded;
4267 }
4268 if (part) {
4269 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4270 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4271 } else {
4272 bsb.arraystart = __cpu_to_le64(offset * odata);
4273 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4274 }
4275 if (part)
4276 bsb.magic[15] = '2';
4277 for (i = 0; i < dests; i++)
4278 if (part)
4279 lseek64(destfd[i], destoffsets[i] +
4280 __le64_to_cpu(bsb.devstart2)*512, 0);
4281 else
4282 lseek64(destfd[i], destoffsets[i], 0);
4283
4284 rv = save_stripes(sources, offsets, disks, chunk, level, layout,
4285 dests, destfd, offset * 512 * odata,
4286 stripes * chunk * odata, buf);
4287
4288 if (rv)
4289 return rv;
4290 bsb.mtime = __cpu_to_le64(time(0));
4291 for (i = 0; i < dests; i++) {
4292 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4293
4294 bsb.sb_csum = bsb_csum((char*)&bsb,
4295 ((char*)&bsb.sb_csum)-((char*)&bsb));
4296 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4297 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4298 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4299
4300 rv = -1;
4301 if ((unsigned long long)lseek64(destfd[i],
4302 destoffsets[i] - 4096, 0) !=
4303 destoffsets[i] - 4096)
4304 break;
4305 if (write(destfd[i], &bsb, 512) != 512)
4306 break;
4307 if (destoffsets[i] > 4096) {
4308 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4309 destoffsets[i]+stripes*chunk*odata)
4310 break;
4311 if (write(destfd[i], &bsb, 512) != 512)
4312 break;
4313 }
4314 fsync(destfd[i]);
4315 rv = 0;
4316 }
4317
4318 return rv;
4319 }
4320
4321 /* in 2.6.30, the value reported by sync_completed can be
4322 * less that it should be by one stripe.
4323 * This only happens when reshape hits sync_max and pauses.
4324 * So allow wait_backup to either extent sync_max further
4325 * than strictly necessary, or return before the
4326 * sync has got quite as far as we would really like.
4327 * This is what 'blocks2' is for.
4328 * The various caller give appropriate values so that
4329 * every works.
4330 */
4331 /* FIXME return value is often ignored */
4332 static int forget_backup(int dests, int *destfd,
4333 unsigned long long *destoffsets,
4334 int part)
4335 {
4336 /*
4337 * Erase backup 'part' (which is 0 or 1)
4338 */
4339 int i;
4340 int rv;
4341
4342 if (part) {
4343 bsb.arraystart2 = __cpu_to_le64(0);
4344 bsb.length2 = __cpu_to_le64(0);
4345 } else {
4346 bsb.arraystart = __cpu_to_le64(0);
4347 bsb.length = __cpu_to_le64(0);
4348 }
4349 bsb.mtime = __cpu_to_le64(time(0));
4350 rv = 0;
4351 for (i = 0; i < dests; i++) {
4352 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4353 bsb.sb_csum = bsb_csum((char*)&bsb,
4354 ((char*)&bsb.sb_csum)-((char*)&bsb));
4355 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4356 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4357 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4358 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4359 destoffsets[i]-4096)
4360 rv = -1;
4361 if (rv == 0 && write(destfd[i], &bsb, 512) != 512)
4362 rv = -1;
4363 fsync(destfd[i]);
4364 }
4365 return rv;
4366 }
4367
4368 static void fail(char *msg)
4369 {
4370 int rv;
4371 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4372 rv |= (write(2, "\n", 1) != 1);
4373 exit(rv ? 1 : 2);
4374 }
4375
4376 static char *abuf, *bbuf;
4377 static unsigned long long abuflen;
4378 static void validate(int afd, int bfd, unsigned long long offset)
4379 {
4380 /* check that the data in the backup against the array.
4381 * This is only used for regression testing and should not
4382 * be used while the array is active
4383 */
4384 if (afd < 0)
4385 return;
4386 lseek64(bfd, offset - 4096, 0);
4387 if (read(bfd, &bsb2, 512) != 512)
4388 fail("cannot read bsb");
4389 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4390 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4391 fail("first csum bad");
4392 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4393 fail("magic is bad");
4394 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4395 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4396 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4397 fail("second csum bad");
4398
4399 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4400 fail("devstart is wrong");
4401
4402 if (bsb2.length) {
4403 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4404
4405 if (abuflen < len) {
4406 free(abuf);
4407 free(bbuf);
4408 abuflen = len;
4409 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4410 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4411 abuflen = 0;
4412 /* just stop validating on mem-alloc failure */
4413 return;
4414 }
4415 }
4416
4417 lseek64(bfd, offset, 0);
4418 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4419 //printf("len %llu\n", len);
4420 fail("read first backup failed");
4421 }
4422 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4423 if ((unsigned long long)read(afd, abuf, len) != len)
4424 fail("read first from array failed");
4425 if (memcmp(bbuf, abuf, len) != 0) {
4426 #if 0
4427 int i;
4428 printf("offset=%llu len=%llu\n",
4429 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4430 for (i=0; i<len; i++)
4431 if (bbuf[i] != abuf[i]) {
4432 printf("first diff byte %d\n", i);
4433 break;
4434 }
4435 #endif
4436 fail("data1 compare failed");
4437 }
4438 }
4439 if (bsb2.length2) {
4440 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4441
4442 if (abuflen < len) {
4443 free(abuf);
4444 free(bbuf);
4445 abuflen = len;
4446 abuf = xmalloc(abuflen);
4447 bbuf = xmalloc(abuflen);
4448 }
4449
4450 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4451 if ((unsigned long long)read(bfd, bbuf, len) != len)
4452 fail("read second backup failed");
4453 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4454 if ((unsigned long long)read(afd, abuf, len) != len)
4455 fail("read second from array failed");
4456 if (memcmp(bbuf, abuf, len) != 0)
4457 fail("data2 compare failed");
4458 }
4459 }
4460
4461 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4462 struct supertype *st, unsigned long blocks,
4463 int *fds, unsigned long long *offsets,
4464 int dests, int *destfd, unsigned long long *destoffsets)
4465 {
4466 /* Monitor a reshape where backup is being performed using
4467 * 'native' mechanism - either to a backup file, or
4468 * to some space in a spare.
4469 */
4470 char *buf;
4471 int degraded = -1;
4472 unsigned long long speed;
4473 unsigned long long suspend_point, array_size;
4474 unsigned long long backup_point, wait_point;
4475 unsigned long long reshape_completed;
4476 int done = 0;
4477 int increasing = reshape->after.data_disks >=
4478 reshape->before.data_disks;
4479 int part = 0; /* The next part of the backup area to fill. It
4480 * may already be full, so we need to check */
4481 int level = reshape->level;
4482 int layout = reshape->before.layout;
4483 int data = reshape->before.data_disks;
4484 int disks = reshape->before.data_disks + reshape->parity;
4485 int chunk = sra->array.chunk_size;
4486 struct mdinfo *sd;
4487 unsigned long stripes;
4488 int uuid[4];
4489 int frozen = 0;
4490
4491 /* set up the backup-super-block. This requires the
4492 * uuid from the array.
4493 */
4494 /* Find a superblock */
4495 for (sd = sra->devs; sd; sd = sd->next) {
4496 char *dn;
4497 int devfd;
4498 int ok;
4499 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4500 continue;
4501 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4502 devfd = dev_open(dn, O_RDONLY);
4503 if (devfd < 0)
4504 continue;
4505 ok = st->ss->load_super(st, devfd, NULL);
4506 close(devfd);
4507 if (ok == 0)
4508 break;
4509 }
4510 if (!sd) {
4511 pr_err("Cannot find a superblock\n");
4512 return 0;
4513 }
4514
4515 memset(&bsb, 0, 512);
4516 memcpy(bsb.magic, "md_backup_data-1", 16);
4517 st->ss->uuid_from_super(st, uuid);
4518 memcpy(bsb.set_uuid, uuid, 16);
4519 bsb.mtime = __cpu_to_le64(time(0));
4520 bsb.devstart2 = blocks;
4521
4522 stripes = blocks / (sra->array.chunk_size/512) /
4523 reshape->before.data_disks;
4524
4525 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4526 /* Don't start the 'reshape' */
4527 return 0;
4528 if (reshape->before.data_disks == reshape->after.data_disks) {
4529 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4530 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4531 }
4532
4533 if (increasing) {
4534 array_size = sra->component_size * reshape->after.data_disks;
4535 backup_point = sra->reshape_progress;
4536 suspend_point = 0;
4537 } else {
4538 array_size = sra->component_size * reshape->before.data_disks;
4539 backup_point = reshape->backup_blocks;
4540 suspend_point = array_size;
4541 }
4542
4543 while (!done) {
4544 int rv;
4545
4546 /* Want to return as soon the oldest backup slot can
4547 * be released as that allows us to start backing up
4548 * some more, providing suspend_point has been
4549 * advanced, which it should have.
4550 */
4551 if (increasing) {
4552 wait_point = array_size;
4553 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4554 wait_point = (__le64_to_cpu(bsb.arraystart) +
4555 __le64_to_cpu(bsb.length));
4556 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4557 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4558 __le64_to_cpu(bsb.length2));
4559 } else {
4560 wait_point = 0;
4561 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4562 wait_point = __le64_to_cpu(bsb.arraystart);
4563 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4564 wait_point = __le64_to_cpu(bsb.arraystart2);
4565 }
4566
4567 reshape_completed = sra->reshape_progress;
4568 rv = progress_reshape(sra, reshape,
4569 backup_point, wait_point,
4570 &suspend_point, &reshape_completed,
4571 &frozen);
4572 /* external metadata would need to ping_monitor here */
4573 sra->reshape_progress = reshape_completed;
4574
4575 /* Clear any backup region that is before 'here' */
4576 if (increasing) {
4577 if (__le64_to_cpu(bsb.length) > 0 &&
4578 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4579 __le64_to_cpu(bsb.length)))
4580 forget_backup(dests, destfd,
4581 destoffsets, 0);
4582 if (__le64_to_cpu(bsb.length2) > 0 &&
4583 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4584 __le64_to_cpu(bsb.length2)))
4585 forget_backup(dests, destfd,
4586 destoffsets, 1);
4587 } else {
4588 if (__le64_to_cpu(bsb.length) > 0 &&
4589 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4590 forget_backup(dests, destfd,
4591 destoffsets, 0);
4592 if (__le64_to_cpu(bsb.length2) > 0 &&
4593 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4594 forget_backup(dests, destfd,
4595 destoffsets, 1);
4596 }
4597 if (sigterm)
4598 rv = -2;
4599 if (rv < 0) {
4600 if (rv == -1)
4601 done = 1;
4602 break;
4603 }
4604 if (rv == 0 && increasing && !st->ss->external) {
4605 /* No longer need to monitor this reshape */
4606 sysfs_set_str(sra, NULL, "sync_max", "max");
4607 done = 1;
4608 break;
4609 }
4610
4611 while (rv) {
4612 unsigned long long offset;
4613 unsigned long actual_stripes;
4614 /* Need to backup some data.
4615 * If 'part' is not used and the desired
4616 * backup size is suspended, do a backup,
4617 * then consider the next part.
4618 */
4619 /* Check that 'part' is unused */
4620 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4621 break;
4622 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4623 break;
4624
4625 offset = backup_point / data;
4626 actual_stripes = stripes;
4627 if (increasing) {
4628 if (offset + actual_stripes * (chunk/512) >
4629 sra->component_size)
4630 actual_stripes = ((sra->component_size - offset)
4631 / (chunk/512));
4632 if (offset + actual_stripes * (chunk/512) >
4633 suspend_point/data)
4634 break;
4635 } else {
4636 if (offset < actual_stripes * (chunk/512))
4637 actual_stripes = offset / (chunk/512);
4638 offset -= actual_stripes * (chunk/512);
4639 if (offset < suspend_point/data)
4640 break;
4641 }
4642 if (actual_stripes == 0)
4643 break;
4644 grow_backup(sra, offset, actual_stripes, fds, offsets,
4645 disks, chunk, level, layout, dests, destfd,
4646 destoffsets, part, &degraded, buf);
4647 validate(afd, destfd[0], destoffsets[0]);
4648 /* record where 'part' is up to */
4649 part = !part;
4650 if (increasing)
4651 backup_point += actual_stripes * (chunk/512) * data;
4652 else
4653 backup_point -= actual_stripes * (chunk/512) * data;
4654 }
4655 }
4656
4657 /* FIXME maybe call progress_reshape one more time instead */
4658 /* remove any remaining suspension */
4659 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4660 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4661 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4662 sysfs_set_num(sra, NULL, "sync_min", 0);
4663
4664 if (reshape->before.data_disks == reshape->after.data_disks)
4665 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4666 free(buf);
4667 return done;
4668 }
4669
4670 /*
4671 * If any spare contains md_back_data-1 which is recent wrt mtime,
4672 * write that data into the array and update the super blocks with
4673 * the new reshape_progress
4674 */
4675 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist,
4676 int cnt, char *backup_file, int verbose)
4677 {
4678 int i, j;
4679 int old_disks;
4680 unsigned long long *offsets;
4681 unsigned long long nstripe, ostripe;
4682 int ndata, odata;
4683
4684 odata = info->array.raid_disks - info->delta_disks - 1;
4685 if (info->array.level == 6)
4686 odata--; /* number of data disks */
4687 ndata = info->array.raid_disks - 1;
4688 if (info->new_level == 6)
4689 ndata--;
4690
4691 old_disks = info->array.raid_disks - info->delta_disks;
4692
4693 if (info->delta_disks <= 0)
4694 /* Didn't grow, so the backup file must have
4695 * been used
4696 */
4697 old_disks = cnt;
4698 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4699 struct mdinfo dinfo;
4700 int fd;
4701 int bsbsize;
4702 char *devname, namebuf[20];
4703 unsigned long long lo, hi;
4704
4705 /* This was a spare and may have some saved data on it.
4706 * Load the superblock, find and load the
4707 * backup_super_block.
4708 * If either fail, go on to next device.
4709 * If the backup contains no new info, just return
4710 * else restore data and update all superblocks
4711 */
4712 if (i == old_disks-1) {
4713 fd = open(backup_file, O_RDONLY);
4714 if (fd<0) {
4715 pr_err("backup file %s inaccessible: %s\n",
4716 backup_file, strerror(errno));
4717 continue;
4718 }
4719 devname = backup_file;
4720 } else {
4721 fd = fdlist[i];
4722 if (fd < 0)
4723 continue;
4724 if (st->ss->load_super(st, fd, NULL))
4725 continue;
4726
4727 st->ss->getinfo_super(st, &dinfo, NULL);
4728 st->ss->free_super(st);
4729
4730 if (lseek64(fd,
4731 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4732 0) < 0) {
4733 pr_err("Cannot seek on device %d\n", i);
4734 continue; /* Cannot seek */
4735 }
4736 sprintf(namebuf, "device-%d", i);
4737 devname = namebuf;
4738 }
4739 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4740 if (verbose)
4741 pr_err("Cannot read from %s\n", devname);
4742 continue; /* Cannot read */
4743 }
4744 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4745 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4746 if (verbose)
4747 pr_err("No backup metadata on %s\n", devname);
4748 continue;
4749 }
4750 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4751 if (verbose)
4752 pr_err("Bad backup-metadata checksum on %s\n",
4753 devname);
4754 continue; /* bad checksum */
4755 }
4756 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4757 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4758 if (verbose)
4759 pr_err("Bad backup-metadata checksum2 on %s\n",
4760 devname);
4761 continue; /* Bad second checksum */
4762 }
4763 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4764 if (verbose)
4765 pr_err("Wrong uuid on backup-metadata on %s\n",
4766 devname);
4767 continue; /* Wrong uuid */
4768 }
4769
4770 /*
4771 * array utime and backup-mtime should be updated at
4772 * much the same time, but it seems that sometimes
4773 * they aren't... So allow considerable flexability in
4774 * matching, and allow this test to be overridden by
4775 * an environment variable.
4776 */
4777 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4778 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4779 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4780 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4781 (unsigned long)__le64_to_cpu(bsb.mtime),
4782 (unsigned long)info->array.utime);
4783 } else {
4784 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4785 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4786 continue; /* time stamp is too bad */
4787 }
4788 }
4789
4790 if (bsb.magic[15] == '1') {
4791 if (bsb.length == 0)
4792 continue;
4793 if (info->delta_disks >= 0) {
4794 /* reshape_progress is increasing */
4795 if (__le64_to_cpu(bsb.arraystart)
4796 + __le64_to_cpu(bsb.length)
4797 < info->reshape_progress) {
4798 nonew:
4799 if (verbose)
4800 pr_err("backup-metadata found on %s but is not needed\n", devname);
4801 continue; /* No new data here */
4802 }
4803 } else {
4804 /* reshape_progress is decreasing */
4805 if (__le64_to_cpu(bsb.arraystart) >=
4806 info->reshape_progress)
4807 goto nonew; /* No new data here */
4808 }
4809 } else {
4810 if (bsb.length == 0 && bsb.length2 == 0)
4811 continue;
4812 if (info->delta_disks >= 0) {
4813 /* reshape_progress is increasing */
4814 if ((__le64_to_cpu(bsb.arraystart)
4815 + __le64_to_cpu(bsb.length)
4816 < info->reshape_progress) &&
4817 (__le64_to_cpu(bsb.arraystart2)
4818 + __le64_to_cpu(bsb.length2)
4819 < info->reshape_progress))
4820 goto nonew; /* No new data here */
4821 } else {
4822 /* reshape_progress is decreasing */
4823 if (__le64_to_cpu(bsb.arraystart) >=
4824 info->reshape_progress &&
4825 __le64_to_cpu(bsb.arraystart2) >=
4826 info->reshape_progress)
4827 goto nonew; /* No new data here */
4828 }
4829 }
4830 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4831 second_fail:
4832 if (verbose)
4833 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4834 devname);
4835 continue; /* Cannot seek */
4836 }
4837 /* There should be a duplicate backup superblock 4k before here */
4838 if (lseek64(fd, -4096, 1) < 0 ||
4839 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4840 goto second_fail; /* Cannot find leading superblock */
4841 if (bsb.magic[15] == '1')
4842 bsbsize = offsetof(struct mdp_backup_super, pad1);
4843 else
4844 bsbsize = offsetof(struct mdp_backup_super, pad);
4845 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4846 goto second_fail; /* Cannot find leading superblock */
4847
4848 /* Now need the data offsets for all devices. */
4849 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4850 for(j=0; j<info->array.raid_disks; j++) {
4851 if (fdlist[j] < 0)
4852 continue;
4853 if (st->ss->load_super(st, fdlist[j], NULL))
4854 /* FIXME should be this be an error */
4855 continue;
4856 st->ss->getinfo_super(st, &dinfo, NULL);
4857 st->ss->free_super(st);
4858 offsets[j] = dinfo.data_offset * 512;
4859 }
4860 printf("%s: restoring critical section\n", Name);
4861
4862 if (restore_stripes(fdlist, offsets, info->array.raid_disks,
4863 info->new_chunk, info->new_level,
4864 info->new_layout, fd,
4865 __le64_to_cpu(bsb.devstart)*512,
4866 __le64_to_cpu(bsb.arraystart)*512,
4867 __le64_to_cpu(bsb.length)*512, NULL)) {
4868 /* didn't succeed, so giveup */
4869 if (verbose)
4870 pr_err("Error restoring backup from %s\n",
4871 devname);
4872 free(offsets);
4873 return 1;
4874 }
4875
4876 if (bsb.magic[15] == '2' &&
4877 restore_stripes(fdlist, offsets, info->array.raid_disks,
4878 info->new_chunk, info->new_level,
4879 info->new_layout, fd,
4880 __le64_to_cpu(bsb.devstart)*512 +
4881 __le64_to_cpu(bsb.devstart2)*512,
4882 __le64_to_cpu(bsb.arraystart2)*512,
4883 __le64_to_cpu(bsb.length2)*512, NULL)) {
4884 /* didn't succeed, so giveup */
4885 if (verbose)
4886 pr_err("Error restoring second backup from %s\n",
4887 devname);
4888 free(offsets);
4889 return 1;
4890 }
4891
4892 free(offsets);
4893
4894 /* Ok, so the data is restored. Let's update those superblocks. */
4895
4896 lo = hi = 0;
4897 if (bsb.length) {
4898 lo = __le64_to_cpu(bsb.arraystart);
4899 hi = lo + __le64_to_cpu(bsb.length);
4900 }
4901 if (bsb.magic[15] == '2' && bsb.length2) {
4902 unsigned long long lo1, hi1;
4903 lo1 = __le64_to_cpu(bsb.arraystart2);
4904 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4905 if (lo == hi) {
4906 lo = lo1;
4907 hi = hi1;
4908 } else if (lo < lo1)
4909 hi = hi1;
4910 else
4911 lo = lo1;
4912 }
4913 if (lo < hi && (info->reshape_progress < lo ||
4914 info->reshape_progress > hi))
4915 /* backup does not affect reshape_progress*/ ;
4916 else if (info->delta_disks >= 0) {
4917 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4918 __le64_to_cpu(bsb.length);
4919 if (bsb.magic[15] == '2') {
4920 unsigned long long p2;
4921
4922 p2 = __le64_to_cpu(bsb.arraystart2) +
4923 __le64_to_cpu(bsb.length2);
4924 if (p2 > info->reshape_progress)
4925 info->reshape_progress = p2;
4926 }
4927 } else {
4928 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4929 if (bsb.magic[15] == '2') {
4930 unsigned long long p2;
4931
4932 p2 = __le64_to_cpu(bsb.arraystart2);
4933 if (p2 < info->reshape_progress)
4934 info->reshape_progress = p2;
4935 }
4936 }
4937 for (j=0; j<info->array.raid_disks; j++) {
4938 if (fdlist[j] < 0)
4939 continue;
4940 if (st->ss->load_super(st, fdlist[j], NULL))
4941 continue;
4942 st->ss->getinfo_super(st, &dinfo, NULL);
4943 dinfo.reshape_progress = info->reshape_progress;
4944 st->ss->update_super(st, &dinfo, "_reshape_progress",
4945 NULL,0, 0, NULL);
4946 st->ss->store_super(st, fdlist[j]);
4947 st->ss->free_super(st);
4948 }
4949 return 0;
4950 }
4951 /* Didn't find any backup data, try to see if any
4952 * was needed.
4953 */
4954 if (info->delta_disks < 0) {
4955 /* When shrinking, the critical section is at the end.
4956 * So see if we are before the critical section.
4957 */
4958 unsigned long long first_block;
4959 nstripe = ostripe = 0;
4960 first_block = 0;
4961 while (ostripe >= nstripe) {
4962 ostripe += info->array.chunk_size / 512;
4963 first_block = ostripe * odata;
4964 nstripe = first_block / ndata / (info->new_chunk/512) *
4965 (info->new_chunk/512);
4966 }
4967
4968 if (info->reshape_progress >= first_block)
4969 return 0;
4970 }
4971 if (info->delta_disks > 0) {
4972 /* See if we are beyond the critical section. */
4973 unsigned long long last_block;
4974 nstripe = ostripe = 0;
4975 last_block = 0;
4976 while (nstripe >= ostripe) {
4977 nstripe += info->new_chunk / 512;
4978 last_block = nstripe * ndata;
4979 ostripe = last_block / odata / (info->array.chunk_size/512) *
4980 (info->array.chunk_size/512);
4981 }
4982
4983 if (info->reshape_progress >= last_block)
4984 return 0;
4985 }
4986 /* needed to recover critical section! */
4987 if (verbose)
4988 pr_err("Failed to find backup of critical section\n");
4989 return 1;
4990 }
4991
4992 int Grow_continue_command(char *devname, int fd,
4993 char *backup_file, int verbose)
4994 {
4995 int ret_val = 0;
4996 struct supertype *st = NULL;
4997 struct mdinfo *content = NULL;
4998 struct mdinfo array;
4999 char *subarray = NULL;
5000 struct mdinfo *cc = NULL;
5001 struct mdstat_ent *mdstat = NULL;
5002 int cfd = -1;
5003 int fd2;
5004
5005 dprintf("Grow continue from command line called for %s\n", devname);
5006
5007 st = super_by_fd(fd, &subarray);
5008 if (!st || !st->ss) {
5009 pr_err("Unable to determine metadata format for %s\n", devname);
5010 return 1;
5011 }
5012 dprintf("Grow continue is run for ");
5013 if (st->ss->external == 0) {
5014 int d;
5015 int cnt = 5;
5016 dprintf_cont("native array (%s)\n", devname);
5017 if (md_get_array_info(fd, &array.array) < 0) {
5018 pr_err("%s is not an active md array - aborting\n",
5019 devname);
5020 ret_val = 1;
5021 goto Grow_continue_command_exit;
5022 }
5023 content = &array;
5024 sysfs_init(content, fd, NULL);
5025 /* Need to load a superblock.
5026 * FIXME we should really get what we need from
5027 * sysfs
5028 */
5029 do {
5030 for (d = 0; d < MAX_DISKS; d++) {
5031 mdu_disk_info_t disk;
5032 char *dv;
5033 int err;
5034 disk.number = d;
5035 if (md_get_disk_info(fd, &disk) < 0)
5036 continue;
5037 if (disk.major == 0 && disk.minor == 0)
5038 continue;
5039 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5040 continue;
5041 dv = map_dev(disk.major, disk.minor, 1);
5042 if (!dv)
5043 continue;
5044 fd2 = dev_open(dv, O_RDONLY);
5045 if (fd2 < 0)
5046 continue;
5047 err = st->ss->load_super(st, fd2, NULL);
5048 close(fd2);
5049 if (err)
5050 continue;
5051 break;
5052 }
5053 if (d == MAX_DISKS) {
5054 pr_err("Unable to load metadata for %s\n",
5055 devname);
5056 ret_val = 1;
5057 goto Grow_continue_command_exit;
5058 }
5059 st->ss->getinfo_super(st, content, NULL);
5060 if (!content->reshape_active)
5061 sleep(3);
5062 else
5063 break;
5064 } while (cnt-- > 0);
5065 } else {
5066 char *container;
5067
5068 if (subarray) {
5069 dprintf_cont("subarray (%s)\n", subarray);
5070 container = st->container_devnm;
5071 cfd = open_dev_excl(st->container_devnm);
5072 } else {
5073 container = st->devnm;
5074 close(fd);
5075 cfd = open_dev_excl(st->devnm);
5076 dprintf_cont("container (%s)\n", container);
5077 fd = cfd;
5078 }
5079 if (cfd < 0) {
5080 pr_err("Unable to open container for %s\n", devname);
5081 ret_val = 1;
5082 goto Grow_continue_command_exit;
5083 }
5084
5085 /* find in container array under reshape
5086 */
5087 ret_val = st->ss->load_container(st, cfd, NULL);
5088 if (ret_val) {
5089 pr_err("Cannot read superblock for %s\n", devname);
5090 ret_val = 1;
5091 goto Grow_continue_command_exit;
5092 }
5093
5094 cc = st->ss->container_content(st, subarray);
5095 for (content = cc; content ; content = content->next) {
5096 char *array_name;
5097 int allow_reshape = 1;
5098
5099 if (content->reshape_active == 0)
5100 continue;
5101 /* The decision about array or container wide
5102 * reshape is taken in Grow_continue based
5103 * content->reshape_active state, therefore we
5104 * need to check_reshape based on
5105 * reshape_active and subarray name
5106 */
5107 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5108 allow_reshape = 0;
5109 if (content->reshape_active == CONTAINER_RESHAPE &&
5110 (content->array.state
5111 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5112 allow_reshape = 0;
5113
5114 if (!allow_reshape) {
5115 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5116 devname, container);
5117 ret_val = 1;
5118 goto Grow_continue_command_exit;
5119 }
5120
5121 array_name = strchr(content->text_version+1, '/')+1;
5122 mdstat = mdstat_by_subdev(array_name, container);
5123 if (!mdstat)
5124 continue;
5125 if (mdstat->active == 0) {
5126 pr_err("Skipping inactive array %s.\n",
5127 mdstat->devnm);
5128 free_mdstat(mdstat);
5129 mdstat = NULL;
5130 continue;
5131 }
5132 break;
5133 }
5134 if (!content) {
5135 pr_err("Unable to determine reshaped array for %s\n", devname);
5136 ret_val = 1;
5137 goto Grow_continue_command_exit;
5138 }
5139 fd2 = open_dev(mdstat->devnm);
5140 if (fd2 < 0) {
5141 pr_err("cannot open (%s)\n", mdstat->devnm);
5142 ret_val = 1;
5143 goto Grow_continue_command_exit;
5144 }
5145
5146 if (sysfs_init(content, fd2, mdstat->devnm)) {
5147 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5148 mdstat->devnm);
5149 ret_val = 1;
5150 close(fd2);
5151 goto Grow_continue_command_exit;
5152 }
5153
5154 close(fd2);
5155
5156 /* start mdmon in case it is not running
5157 */
5158 if (!mdmon_running(container))
5159 start_mdmon(container);
5160 ping_monitor(container);
5161
5162 if (mdmon_running(container))
5163 st->update_tail = &st->updates;
5164 else {
5165 pr_err("No mdmon found. Grow cannot continue.\n");
5166 ret_val = 1;
5167 goto Grow_continue_command_exit;
5168 }
5169 }
5170
5171 /* verify that array under reshape is started from
5172 * correct position
5173 */
5174 if (verify_reshape_position(content, content->array.level) < 0) {
5175 ret_val = 1;
5176 goto Grow_continue_command_exit;
5177 }
5178
5179 /* continue reshape
5180 */
5181 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5182
5183 Grow_continue_command_exit:
5184 if (cfd > -1)
5185 close(cfd);
5186 st->ss->free_super(st);
5187 free_mdstat(mdstat);
5188 sysfs_free(cc);
5189 free(subarray);
5190
5191 return ret_val;
5192 }
5193
5194 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5195 char *backup_file, int forked, int freeze_reshape)
5196 {
5197 int ret_val = 2;
5198
5199 if (!info->reshape_active)
5200 return ret_val;
5201
5202 if (st->ss->external) {
5203 int cfd = open_dev(st->container_devnm);
5204
5205 if (cfd < 0)
5206 return 1;
5207
5208 st->ss->load_container(st, cfd, st->container_devnm);
5209 close(cfd);
5210 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5211 st, info, 0, backup_file, 0,
5212 forked, 1 | info->reshape_active,
5213 freeze_reshape);
5214 } else
5215 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5216 NULL, INVALID_SECTORS, backup_file,
5217 0, forked, 1 | info->reshape_active,
5218 freeze_reshape);
5219
5220 return ret_val;
5221 }
5222
5223 char *make_backup(char *name)
5224 {
5225 char *base = "backup_file-";
5226 int len;
5227 char *fname;
5228
5229 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5230 fname = xmalloc(len);
5231 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5232 return fname;
5233 }
5234
5235 char *locate_backup(char *name)
5236 {
5237 char *fl = make_backup(name);
5238 struct stat stb;
5239
5240 if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode))
5241 return fl;
5242
5243 free(fl);
5244 return NULL;
5245 }