]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
bb5fe45c851cf82c3e476d3063b86c3f2b40c48c
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <sys/wait.h>
30
31 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
32 #error no endian defined
33 #endif
34 #include "md_u.h"
35 #include "md_p.h"
36
37 int restore_backup(struct supertype *st,
38 struct mdinfo *content,
39 int working_disks,
40 int next_spare,
41 char **backup_filep,
42 int verbose)
43 {
44 int i;
45 int *fdlist;
46 struct mdinfo *dev;
47 int err;
48 int disk_count = next_spare + working_disks;
49 char *backup_file = *backup_filep;
50
51 dprintf("Called restore_backup()\n");
52 fdlist = xmalloc(sizeof(int) * disk_count);
53
54 enable_fds(next_spare);
55 for (i = 0; i < next_spare; i++)
56 fdlist[i] = -1;
57 for (dev = content->devs; dev; dev = dev->next) {
58 char buf[22];
59 int fd;
60
61 sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor);
62 fd = dev_open(buf, O_RDWR);
63
64 if (dev->disk.raid_disk >= 0)
65 fdlist[dev->disk.raid_disk] = fd;
66 else
67 fdlist[next_spare++] = fd;
68 }
69
70 if (!backup_file) {
71 backup_file = locate_backup(content->sys_name);
72 *backup_filep = backup_file;
73 }
74
75 if (st->ss->external && st->ss->recover_backup)
76 err = st->ss->recover_backup(st, content);
77 else
78 err = Grow_restart(st, content, fdlist, next_spare,
79 backup_file, verbose > 0);
80
81 while (next_spare > 0) {
82 next_spare--;
83 if (fdlist[next_spare] >= 0)
84 close(fdlist[next_spare]);
85 }
86 free(fdlist);
87 if (err) {
88 pr_err("Failed to restore critical section for reshape - sorry.\n");
89 if (!backup_file)
90 pr_err("Possibly you need to specify a --backup-file\n");
91 return 1;
92 }
93
94 dprintf("restore_backup() returns status OK.\n");
95 return 0;
96 }
97
98 int Grow_Add_device(char *devname, int fd, char *newdev)
99 {
100 /* Add a device to an active array.
101 * Currently, just extend a linear array.
102 * This requires writing a new superblock on the
103 * new device, calling the kernel to add the device,
104 * and if that succeeds, update the superblock on
105 * all other devices.
106 * This means that we need to *find* all other devices.
107 */
108 struct mdinfo info;
109
110 dev_t rdev;
111 int nfd, fd2;
112 int d, nd;
113 struct supertype *st = NULL;
114 char *subarray = NULL;
115
116 if (md_get_array_info(fd, &info.array) < 0) {
117 pr_err("cannot get array info for %s\n", devname);
118 return 1;
119 }
120
121 if (info.array.level != -1) {
122 pr_err("can only add devices to linear arrays\n");
123 return 1;
124 }
125
126 st = super_by_fd(fd, &subarray);
127 if (!st) {
128 pr_err("cannot handle arrays with superblock version %d\n",
129 info.array.major_version);
130 return 1;
131 }
132
133 if (subarray) {
134 pr_err("Cannot grow linear sub-arrays yet\n");
135 free(subarray);
136 free(st);
137 return 1;
138 }
139
140 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
141 if (nfd < 0) {
142 pr_err("cannot open %s\n", newdev);
143 free(st);
144 return 1;
145 }
146 if (!fstat_is_blkdev(nfd, newdev, &rdev)) {
147 close(nfd);
148 free(st);
149 return 1;
150 }
151 /* now check out all the devices and make sure we can read the
152 * superblock */
153 for (d=0 ; d < info.array.raid_disks ; d++) {
154 mdu_disk_info_t disk;
155 char *dv;
156
157 st->ss->free_super(st);
158
159 disk.number = d;
160 if (md_get_disk_info(fd, &disk) < 0) {
161 pr_err("cannot get device detail for device %d\n", d);
162 close(nfd);
163 free(st);
164 return 1;
165 }
166 dv = map_dev(disk.major, disk.minor, 1);
167 if (!dv) {
168 pr_err("cannot find device file for device %d\n", d);
169 close(nfd);
170 free(st);
171 return 1;
172 }
173 fd2 = dev_open(dv, O_RDWR);
174 if (fd2 < 0) {
175 pr_err("cannot open device file %s\n", dv);
176 close(nfd);
177 free(st);
178 return 1;
179 }
180
181 if (st->ss->load_super(st, fd2, NULL)) {
182 pr_err("cannot find super block on %s\n", dv);
183 close(nfd);
184 close(fd2);
185 free(st);
186 return 1;
187 }
188 close(fd2);
189 }
190 /* Ok, looks good. Lets update the superblock and write it out to
191 * newdev.
192 */
193
194 info.disk.number = d;
195 info.disk.major = major(rdev);
196 info.disk.minor = minor(rdev);
197 info.disk.raid_disk = d;
198 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
199 if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_NEW, newdev,
200 0, 0, NULL) != 0) {
201 pr_err("Preparing new metadata failed on %s\n", newdev);
202 close(nfd);
203 return 1;
204 }
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n", newdev);
208 close(nfd);
209 return 1;
210 }
211 close(nfd);
212
213 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
214 pr_err("Cannot add new disk to this array\n");
215 return 1;
216 }
217 /* Well, that seems to have worked.
218 * Now go through and update all superblocks
219 */
220
221 if (md_get_array_info(fd, &info.array) < 0) {
222 pr_err("cannot get array info for %s\n", devname);
223 return 1;
224 }
225
226 nd = d;
227 for (d=0 ; d < info.array.raid_disks ; d++) {
228 mdu_disk_info_t disk;
229 char *dv;
230
231 disk.number = d;
232 if (md_get_disk_info(fd, &disk) < 0) {
233 pr_err("cannot get device detail for device %d\n", d);
234 return 1;
235 }
236 dv = map_dev(disk.major, disk.minor, 1);
237 if (!dv) {
238 pr_err("cannot find device file for device %d\n", d);
239 return 1;
240 }
241 fd2 = dev_open(dv, O_RDWR);
242 if (fd2 < 0) {
243 pr_err("cannot open device file %s\n", dv);
244 return 1;
245 }
246 if (st->ss->load_super(st, fd2, NULL)) {
247 pr_err("cannot find super block on %s\n", dv);
248 close(fd);
249 close(fd2);
250 return 1;
251 }
252 info.array.raid_disks = nd+1;
253 info.array.nr_disks = nd+1;
254 info.array.active_disks = nd+1;
255 info.array.working_disks = nd+1;
256
257 if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_UPDATE, dv,
258 0, 0, NULL) != 0) {
259 pr_err("Updating metadata failed on %s\n", dv);
260 close(fd2);
261 return 1;
262 }
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 unsigned long long bitmapsize, array_size;
292 struct mdinfo *mdi;
293
294 /*
295 * We only ever get called if s->bitmap_file is != NULL, so this check
296 * is just here to quiet down static code checkers.
297 */
298 if (!s->bitmap_file)
299 return 1;
300
301 if (strcmp(s->bitmap_file, "clustered") == 0)
302 major = BITMAP_MAJOR_CLUSTERED;
303
304 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
305 if (errno == ENOMEM)
306 pr_err("Memory allocation failure.\n");
307 else
308 pr_err("bitmaps not supported by this kernel.\n");
309 return 1;
310 }
311 if (bmf.pathname[0]) {
312 if (strcmp(s->bitmap_file,"none") == 0) {
313 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
314 pr_err("failed to remove bitmap %s\n",
315 bmf.pathname);
316 return 1;
317 }
318 return 0;
319 }
320 pr_err("%s already has a bitmap (%s)\n", devname, bmf.pathname);
321 return 1;
322 }
323 if (md_get_array_info(fd, &array) != 0) {
324 pr_err("cannot get array status for %s\n", devname);
325 return 1;
326 }
327 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
328 if (strcmp(s->bitmap_file, "none")==0) {
329 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
330 if (md_set_array_info(fd, &array) != 0) {
331 if (array.state & (1 << MD_SB_CLUSTERED))
332 pr_err("failed to remove clustered bitmap.\n");
333 else
334 pr_err("failed to remove internal bitmap.\n");
335 return 1;
336 }
337 return 0;
338 }
339 pr_err("bitmap already present on %s\n", devname);
340 return 1;
341 }
342
343 if (strcmp(s->bitmap_file, "none") == 0) {
344 pr_err("no bitmap found on %s\n", devname);
345 return 1;
346 }
347 if (array.level <= 0) {
348 pr_err("Bitmaps not meaningful with level %s\n",
349 map_num(pers, array.level)?:"of this array");
350 return 1;
351 }
352 bitmapsize = array.size;
353 bitmapsize <<= 1;
354 if (get_dev_size(fd, NULL, &array_size) &&
355 array_size > (0x7fffffffULL << 9)) {
356 /* Array is big enough that we cannot trust array.size
357 * try other approaches
358 */
359 bitmapsize = get_component_size(fd);
360 }
361 if (bitmapsize == 0) {
362 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
363 return 1;
364 }
365
366 if (array.level == 10) {
367 int ncopies;
368
369 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
370 bitmapsize = bitmapsize * array.raid_disks / ncopies;
371
372 if (strcmp(s->bitmap_file, "clustered") == 0 &&
373 !is_near_layout_10(array.layout)) {
374 pr_err("only near layout is supported with clustered raid10\n");
375 return 1;
376 }
377 }
378
379 st = super_by_fd(fd, &subarray);
380 if (!st) {
381 pr_err("Cannot understand version %d.%d\n",
382 array.major_version, array.minor_version);
383 return 1;
384 }
385 if (subarray) {
386 pr_err("Cannot add bitmaps to sub-arrays yet\n");
387 free(subarray);
388 free(st);
389 return 1;
390 }
391
392 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
393 if (mdi) {
394 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
395 pr_err("Cannot add bitmap to array with PPL\n");
396 free(mdi);
397 free(st);
398 return 1;
399 }
400 free(mdi);
401 }
402
403 if (strcmp(s->bitmap_file, "internal") == 0 ||
404 strcmp(s->bitmap_file, "clustered") == 0) {
405 int rv;
406 int d;
407 int offset_setable = 0;
408 if (st->ss->add_internal_bitmap == NULL) {
409 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
410 return 1;
411 }
412 st->nodes = c->nodes;
413 st->cluster_name = c->homecluster;
414 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
415 if (mdi)
416 offset_setable = 1;
417 for (d = 0; d < st->max_devs; d++) {
418 mdu_disk_info_t disk;
419 char *dv;
420 int fd2;
421
422 disk.number = d;
423 if (md_get_disk_info(fd, &disk) < 0)
424 continue;
425 if (disk.major == 0 && disk.minor == 0)
426 continue;
427 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
428 continue;
429 dv = map_dev(disk.major, disk.minor, 1);
430 if (!dv)
431 continue;
432 if ((disk.state & (1 << MD_DISK_WRITEMOSTLY)) &&
433 (strcmp(s->bitmap_file, "clustered") == 0)) {
434 pr_err("%s disks marked write-mostly are not supported with clustered bitmap\n",devname);
435 free(mdi);
436 return 1;
437 }
438 fd2 = dev_open(dv, O_RDWR);
439 if (fd2 < 0)
440 continue;
441 rv = st->ss->load_super(st, fd2, NULL);
442 if (!rv) {
443 rv = st->ss->add_internal_bitmap(
444 st, &s->bitmap_chunk, c->delay,
445 s->write_behind, bitmapsize,
446 offset_setable, major);
447 if (!rv) {
448 st->ss->write_bitmap(st, fd2,
449 NodeNumUpdate);
450 } else {
451 pr_err("failed to create internal bitmap - chunksize problem.\n");
452 }
453 } else {
454 pr_err("failed to load super-block.\n");
455 }
456 close(fd2);
457 if (rv) {
458 free(mdi);
459 return 1;
460 }
461 }
462 if (offset_setable) {
463 st->ss->getinfo_super(st, mdi, NULL);
464 if (sysfs_init(mdi, fd, NULL)) {
465 pr_err("failed to initialize sysfs.\n");
466 free(mdi);
467 }
468 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
469 mdi->bitmap_offset);
470 free(mdi);
471 } else {
472 if (strcmp(s->bitmap_file, "clustered") == 0)
473 array.state |= (1 << MD_SB_CLUSTERED);
474 array.state |= (1 << MD_SB_BITMAP_PRESENT);
475 rv = md_set_array_info(fd, &array);
476 }
477 if (rv < 0) {
478 if (errno == EBUSY)
479 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
480 pr_err("failed to set internal bitmap.\n");
481 return 1;
482 }
483 } else {
484 int uuid[4];
485 int bitmap_fd;
486 int d;
487 int max_devs = st->max_devs;
488
489 /* try to load a superblock */
490 for (d = 0; d < max_devs; d++) {
491 mdu_disk_info_t disk;
492 char *dv;
493 int fd2;
494 disk.number = d;
495 if (md_get_disk_info(fd, &disk) < 0)
496 continue;
497 if ((disk.major==0 && disk.minor == 0) ||
498 (disk.state & (1 << MD_DISK_REMOVED)))
499 continue;
500 dv = map_dev(disk.major, disk.minor, 1);
501 if (!dv)
502 continue;
503 fd2 = dev_open(dv, O_RDONLY);
504 if (fd2 >= 0) {
505 if (st->ss->load_super(st, fd2, NULL) == 0) {
506 close(fd2);
507 st->ss->uuid_from_super(st, uuid);
508 break;
509 }
510 close(fd2);
511 }
512 }
513 if (d == max_devs) {
514 pr_err("cannot find UUID for array!\n");
515 return 1;
516 }
517 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
518 s->bitmap_chunk, c->delay, s->write_behind,
519 bitmapsize, major)) {
520 return 1;
521 }
522 bitmap_fd = open(s->bitmap_file, O_RDWR);
523 if (bitmap_fd < 0) {
524 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
525 return 1;
526 }
527 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
528 int err = errno;
529 if (errno == EBUSY)
530 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
531 pr_err("Cannot set bitmap file for %s: %s\n",
532 devname, strerror(err));
533 return 1;
534 }
535 }
536
537 return 0;
538 }
539
540 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
541 {
542 struct supertype *st;
543 struct mdinfo *sra;
544 struct mdinfo *sd;
545 char *subarray = NULL;
546 int ret = 0;
547 char container_dev[PATH_MAX];
548 char buf[20];
549
550 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
551 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
552 pr_err("Operation not supported for consistency policy %s\n",
553 map_num_s(consistency_policies, s->consistency_policy));
554 return 1;
555 }
556
557 st = super_by_fd(fd, &subarray);
558 if (!st)
559 return 1;
560
561 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
562 GET_DEVS|GET_STATE);
563 if (!sra) {
564 ret = 1;
565 goto free_st;
566 }
567
568 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
569 !st->ss->write_init_ppl) {
570 pr_err("%s metadata does not support PPL\n", st->ss->name);
571 ret = 1;
572 goto free_info;
573 }
574
575 if (sra->array.level != 5) {
576 pr_err("Operation not supported for array level %d\n",
577 sra->array.level);
578 ret = 1;
579 goto free_info;
580 }
581
582 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
583 pr_err("Consistency policy is already %s\n",
584 map_num_s(consistency_policies, s->consistency_policy));
585 ret = 1;
586 goto free_info;
587 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
588 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
589 pr_err("Current consistency policy is %s, cannot change to %s\n",
590 map_num_s(consistency_policies, sra->consistency_policy),
591 map_num_s(consistency_policies, s->consistency_policy));
592 ret = 1;
593 goto free_info;
594 }
595
596 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
597 if (sysfs_get_str(sra, NULL, "sync_action", buf, 20) <= 0) {
598 ret = 1;
599 goto free_info;
600 } else if (strcmp(buf, "reshape\n") == 0) {
601 pr_err("PPL cannot be enabled when reshape is in progress\n");
602 ret = 1;
603 goto free_info;
604 }
605 }
606
607 if (subarray) {
608 enum update_opt update;
609
610 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
611 update = UOPT_PPL;
612 else
613 update = UOPT_NO_PPL;
614
615 sprintf(container_dev, "/dev/%s", st->container_devnm);
616
617 ret = Update_subarray(container_dev, subarray, update, NULL,
618 c->verbose);
619 if (ret)
620 goto free_info;
621 }
622
623 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
624 struct mdinfo info;
625
626 if (subarray) {
627 struct mdinfo *mdi;
628 int cfd;
629
630 cfd = open(container_dev, O_RDWR|O_EXCL);
631 if (cfd < 0) {
632 pr_err("Failed to open %s\n", container_dev);
633 ret = 1;
634 goto free_info;
635 }
636
637 ret = st->ss->load_container(st, cfd, st->container_devnm);
638 close(cfd);
639
640 if (ret) {
641 pr_err("Cannot read superblock for %s\n",
642 container_dev);
643 goto free_info;
644 }
645
646 mdi = st->ss->container_content(st, subarray);
647 info = *mdi;
648 free(mdi);
649 }
650
651 for (sd = sra->devs; sd; sd = sd->next) {
652 int dfd;
653 char *devpath;
654
655 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
656 dfd = dev_open(devpath, O_RDWR);
657 if (dfd < 0) {
658 pr_err("Failed to open %s\n", devpath);
659 ret = 1;
660 goto free_info;
661 }
662
663 if (!subarray) {
664 ret = st->ss->load_super(st, dfd, NULL);
665 if (ret) {
666 pr_err("Failed to load super-block.\n");
667 close(dfd);
668 goto free_info;
669 }
670
671 ret = st->ss->update_super(st, sra, UOPT_PPL,
672 devname,
673 c->verbose, 0, NULL);
674 if (ret) {
675 close(dfd);
676 st->ss->free_super(st);
677 goto free_info;
678 }
679 st->ss->getinfo_super(st, &info, NULL);
680 }
681
682 ret |= sysfs_set_num(sra, sd, "ppl_sector",
683 info.ppl_sector);
684 ret |= sysfs_set_num(sra, sd, "ppl_size",
685 info.ppl_size);
686
687 if (ret) {
688 pr_err("Failed to set PPL attributes for %s\n",
689 sd->sys_name);
690 close(dfd);
691 st->ss->free_super(st);
692 goto free_info;
693 }
694
695 ret = st->ss->write_init_ppl(st, &info, dfd);
696 if (ret)
697 pr_err("Failed to write PPL\n");
698
699 close(dfd);
700
701 if (!subarray)
702 st->ss->free_super(st);
703
704 if (ret)
705 goto free_info;
706 }
707 }
708
709 ret = sysfs_set_str(sra, NULL, "consistency_policy",
710 map_num_s(consistency_policies,
711 s->consistency_policy));
712 if (ret)
713 pr_err("Failed to change array consistency policy\n");
714
715 free_info:
716 sysfs_free(sra);
717 free_st:
718 free(st);
719 free(subarray);
720
721 return ret;
722 }
723
724 /*
725 * When reshaping an array we might need to backup some data.
726 * This is written to all spares with a 'super_block' describing it.
727 * The superblock goes 4K from the end of the used space on the
728 * device.
729 * It if written after the backup is complete.
730 * It has the following structure.
731 */
732
733 static struct mdp_backup_super {
734 char magic[16]; /* md_backup_data-1 or -2 */
735 __u8 set_uuid[16];
736 __u64 mtime;
737 /* start/sizes in 512byte sectors */
738 __u64 devstart; /* address on backup device/file of data */
739 __u64 arraystart;
740 __u64 length;
741 __u32 sb_csum; /* csum of preceeding bytes. */
742 __u32 pad1;
743 __u64 devstart2; /* offset in to data of second section */
744 __u64 arraystart2;
745 __u64 length2;
746 __u32 sb_csum2; /* csum of preceeding bytes. */
747 __u8 pad[512-68-32];
748 } __attribute__((aligned(512))) bsb, bsb2;
749
750 static __u32 bsb_csum(char *buf, int len)
751 {
752 int i;
753 int csum = 0;
754 for (i = 0; i < len; i++)
755 csum = (csum<<3) + buf[0];
756 return __cpu_to_le32(csum);
757 }
758
759 static int check_idle(struct supertype *st)
760 {
761 /* Check that all member arrays for this container, or the
762 * container of this array, are idle
763 */
764 char *container = (st->container_devnm[0]
765 ? st->container_devnm : st->devnm);
766 struct mdstat_ent *ent, *e;
767 int is_idle = 1;
768
769 ent = mdstat_read(0, 0);
770 for (e = ent ; e; e = e->next) {
771 if (!is_container_member(e, container))
772 continue;
773 /* frozen array is not idle*/
774 if (e->percent >= 0 || e->metadata_version[9] == '-') {
775 is_idle = 0;
776 break;
777 }
778 }
779 free_mdstat(ent);
780 return is_idle;
781 }
782
783 static int freeze_container(struct supertype *st)
784 {
785 char *container = (st->container_devnm[0]
786 ? st->container_devnm : st->devnm);
787
788 if (!check_idle(st))
789 return -1;
790
791 if (block_monitor(container, 1)) {
792 pr_err("failed to freeze container\n");
793 return -2;
794 }
795
796 return 1;
797 }
798
799 static void unfreeze_container(struct supertype *st)
800 {
801 char *container = (st->container_devnm[0]
802 ? st->container_devnm : st->devnm);
803
804 unblock_monitor(container, 1);
805 }
806
807 static int freeze(struct supertype *st)
808 {
809 /* Try to freeze resync/rebuild on this array/container.
810 * Return -1 if the array is busy,
811 * return -2 container cannot be frozen,
812 * return 0 if this kernel doesn't support 'frozen'
813 * return 1 if it worked.
814 */
815 if (st->ss->external)
816 return freeze_container(st);
817 else {
818 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
819 int err;
820 char buf[20];
821
822 if (!sra)
823 return -1;
824 /* Need to clear any 'read-auto' status */
825 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
826 strncmp(buf, "read-auto", 9) == 0)
827 sysfs_set_str(sra, NULL, "array_state", "clean");
828
829 err = sysfs_freeze_array(sra);
830 sysfs_free(sra);
831 return err;
832 }
833 }
834
835 static void unfreeze(struct supertype *st)
836 {
837 if (st->ss->external)
838 return unfreeze_container(st);
839 else {
840 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
841 char buf[20];
842
843 if (sra &&
844 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0 &&
845 strcmp(buf, "frozen\n") == 0)
846 sysfs_set_str(sra, NULL, "sync_action", "idle");
847 sysfs_free(sra);
848 }
849 }
850
851 static void wait_reshape(struct mdinfo *sra)
852 {
853 int fd = sysfs_get_fd(sra, NULL, "sync_action");
854 char action[20];
855
856 if (fd < 0)
857 return;
858
859 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
860 strncmp(action, "reshape", 7) == 0)
861 sysfs_wait(fd, NULL);
862 close(fd);
863 }
864
865 static int reshape_super(struct supertype *st, unsigned long long size,
866 int level, int layout, int chunksize, int raid_disks,
867 int delta_disks, char *backup_file, char *dev,
868 int direction, int verbose)
869 {
870 /* nothing extra to check in the native case */
871 if (!st->ss->external)
872 return 0;
873 if (!st->ss->reshape_super || !st->ss->manage_reshape) {
874 pr_err("%s metadata does not support reshape\n",
875 st->ss->name);
876 return 1;
877 }
878
879 return st->ss->reshape_super(st, size, level, layout, chunksize,
880 raid_disks, delta_disks, backup_file, dev,
881 direction, verbose);
882 }
883
884 static void sync_metadata(struct supertype *st)
885 {
886 if (st->ss->external) {
887 if (st->update_tail) {
888 flush_metadata_updates(st);
889 st->update_tail = &st->updates;
890 } else
891 st->ss->sync_metadata(st);
892 }
893 }
894
895 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
896 {
897 /* when dealing with external metadata subarrays we need to be
898 * prepared to handle EAGAIN. The kernel may need to wait for
899 * mdmon to mark the array active so the kernel can handle
900 * allocations/writeback when preparing the reshape action
901 * (md_allow_write()). We temporarily disable safe_mode_delay
902 * to close a race with the array_state going clean before the
903 * next write to raid_disks / stripe_cache_size
904 */
905 char safe[50];
906 int rc;
907
908 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
909 if (!container ||
910 (strcmp(name, "raid_disks") != 0 &&
911 strcmp(name, "stripe_cache_size") != 0))
912 return sysfs_set_num(sra, NULL, name, n);
913
914 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
915 if (rc <= 0)
916 return -1;
917 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
918 rc = sysfs_set_num(sra, NULL, name, n);
919 if (rc < 0 && errno == EAGAIN) {
920 ping_monitor(container);
921 /* if we get EAGAIN here then the monitor is not active
922 * so stop trying
923 */
924 rc = sysfs_set_num(sra, NULL, name, n);
925 }
926 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
927 return rc;
928 }
929
930 int start_reshape(struct mdinfo *sra, int already_running,
931 int before_data_disks, int data_disks, struct supertype *st)
932 {
933 int err;
934 unsigned long long sync_max_to_set;
935
936 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
937 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
938 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
939 sra->reshape_progress);
940 if (before_data_disks <= data_disks)
941 sync_max_to_set = sra->reshape_progress / data_disks;
942 else
943 sync_max_to_set = (sra->component_size * data_disks
944 - sra->reshape_progress) / data_disks;
945
946 if (!already_running)
947 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
948
949 if (st->ss->external)
950 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
951 else
952 err = err ?: sysfs_set_str(sra, NULL, "sync_max", "max");
953
954 if (!already_running && err == 0) {
955 int cnt = 5;
956 do {
957 err = sysfs_set_str(sra, NULL, "sync_action",
958 "reshape");
959 if (err)
960 sleep_for(1, 0, true);
961 } while (err && errno == EBUSY && cnt-- > 0);
962 }
963 return err;
964 }
965
966 void abort_reshape(struct mdinfo *sra)
967 {
968 sysfs_set_str(sra, NULL, "sync_action", "idle");
969 /*
970 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
971 * suspend_hi to decrease as well as increase.")
972 * you could only increase suspend_{lo,hi} unless the region they
973 * covered was empty. So to reset to 0, you need to push suspend_lo
974 * up past suspend_hi first. So to maximize the chance of mdadm
975 * working on all kernels, we want to keep doing that.
976 */
977 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
978 sysfs_set_num(sra, NULL, "suspend_hi", 0);
979 sysfs_set_num(sra, NULL, "suspend_lo", 0);
980 sysfs_set_num(sra, NULL, "sync_min", 0);
981 // It isn't safe to reset sync_max as we aren't monitoring.
982 // Array really should be stopped at this point.
983 }
984
985 int remove_disks_for_takeover(struct supertype *st,
986 struct mdinfo *sra,
987 int layout)
988 {
989 int nr_of_copies;
990 struct mdinfo *remaining;
991 int slot;
992
993 if (st->ss->external) {
994 int rv = 0;
995 struct mdinfo *arrays = st->ss->container_content(st, NULL);
996 /*
997 * containter_content returns list of arrays in container
998 * If arrays->next is not NULL it means that there are
999 * 2 arrays in container and operation should be blocked
1000 */
1001 if (arrays) {
1002 if (arrays->next)
1003 rv = 1;
1004 sysfs_free(arrays);
1005 if (rv) {
1006 pr_err("Error. Cannot perform operation on %s- for this operation "
1007 "it MUST be single array in container\n", st->devnm);
1008 return rv;
1009 }
1010 }
1011 }
1012
1013 if (sra->array.level == 10)
1014 nr_of_copies = layout & 0xff;
1015 else if (sra->array.level == 1)
1016 nr_of_copies = sra->array.raid_disks;
1017 else
1018 return 1;
1019
1020 remaining = sra->devs;
1021 sra->devs = NULL;
1022 /* for each 'copy', select one device and remove from the list. */
1023 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
1024 struct mdinfo **diskp;
1025 int found = 0;
1026
1027 /* Find a working device to keep */
1028 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1029 struct mdinfo *disk = *diskp;
1030
1031 if (disk->disk.raid_disk < slot)
1032 continue;
1033 if (disk->disk.raid_disk >= slot + nr_of_copies)
1034 continue;
1035 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1036 continue;
1037 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1038 continue;
1039 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1040 continue;
1041
1042 /* We have found a good disk to use! */
1043 *diskp = disk->next;
1044 disk->next = sra->devs;
1045 sra->devs = disk;
1046 found = 1;
1047 break;
1048 }
1049 if (!found)
1050 break;
1051 }
1052
1053 if (slot < sra->array.raid_disks) {
1054 /* didn't find all slots */
1055 struct mdinfo **e;
1056 e = &remaining;
1057 while (*e)
1058 e = &(*e)->next;
1059 *e = sra->devs;
1060 sra->devs = remaining;
1061 return 1;
1062 }
1063
1064 /* Remove all 'remaining' devices from the array */
1065 while (remaining) {
1066 struct mdinfo *sd = remaining;
1067 remaining = sd->next;
1068
1069 sysfs_set_str(sra, sd, "state", "faulty");
1070 sysfs_set_str(sra, sd, "slot", "none");
1071 /* for external metadata disks should be removed in mdmon */
1072 if (!st->ss->external)
1073 sysfs_set_str(sra, sd, "state", "remove");
1074 sd->disk.state |= (1<<MD_DISK_REMOVED);
1075 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1076 sd->next = sra->devs;
1077 sra->devs = sd;
1078 }
1079 return 0;
1080 }
1081
1082 void reshape_free_fdlist(int *fdlist,
1083 unsigned long long *offsets,
1084 int size)
1085 {
1086 int i;
1087
1088 for (i = 0; i < size; i++)
1089 if (fdlist[i] >= 0)
1090 close(fdlist[i]);
1091
1092 free(fdlist);
1093 free(offsets);
1094 }
1095
1096 int reshape_prepare_fdlist(char *devname,
1097 struct mdinfo *sra,
1098 int raid_disks,
1099 int nrdisks,
1100 unsigned long blocks,
1101 char *backup_file,
1102 int *fdlist,
1103 unsigned long long *offsets)
1104 {
1105 int d = 0;
1106 struct mdinfo *sd;
1107
1108 enable_fds(nrdisks);
1109 for (d = 0; d <= nrdisks; d++)
1110 fdlist[d] = -1;
1111 d = raid_disks;
1112 for (sd = sra->devs; sd; sd = sd->next) {
1113 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1114 continue;
1115 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1116 sd->disk.raid_disk < raid_disks) {
1117 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1118 fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY);
1119 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1120 if (fdlist[sd->disk.raid_disk] < 0) {
1121 pr_err("%s: cannot open component %s\n",
1122 devname, dn ? dn : "-unknown-");
1123 d = -1;
1124 goto release;
1125 }
1126 } else if (backup_file == NULL) {
1127 /* spare */
1128 char *dn = map_dev(sd->disk.major, sd->disk.minor, 1);
1129 fdlist[d] = dev_open(dn, O_RDWR);
1130 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1131 if (fdlist[d] < 0) {
1132 pr_err("%s: cannot open component %s\n",
1133 devname, dn ? dn : "-unknown-");
1134 d = -1;
1135 goto release;
1136 }
1137 d++;
1138 }
1139 }
1140 release:
1141 return d;
1142 }
1143
1144 int reshape_open_backup_file(char *backup_file,
1145 int fd,
1146 char *devname,
1147 long blocks,
1148 int *fdlist,
1149 unsigned long long *offsets,
1150 char *sys_name,
1151 int restart)
1152 {
1153 /* Return 1 on success, 0 on any form of failure */
1154 /* need to check backup file is large enough */
1155 char buf[512];
1156 struct stat stb;
1157 unsigned int dev;
1158 int i;
1159
1160 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1161 S_IRUSR | S_IWUSR);
1162 *offsets = 8 * 512;
1163 if (*fdlist < 0) {
1164 pr_err("%s: cannot create backup file %s: %s\n",
1165 devname, backup_file, strerror(errno));
1166 return 0;
1167 }
1168 /* Guard against backup file being on array device.
1169 * If array is partitioned or if LVM etc is in the
1170 * way this will not notice, but it is better than
1171 * nothing.
1172 */
1173 fstat(*fdlist, &stb);
1174 dev = stb.st_dev;
1175 fstat(fd, &stb);
1176 if (stb.st_rdev == dev) {
1177 pr_err("backup file must NOT be on the array being reshaped.\n");
1178 close(*fdlist);
1179 return 0;
1180 }
1181
1182 memset(buf, 0, 512);
1183 for (i=0; i < blocks + 8 ; i++) {
1184 if (write(*fdlist, buf, 512) != 512) {
1185 pr_err("%s: cannot create backup file %s: %s\n",
1186 devname, backup_file, strerror(errno));
1187 return 0;
1188 }
1189 }
1190 if (fsync(*fdlist) != 0) {
1191 pr_err("%s: cannot create backup file %s: %s\n",
1192 devname, backup_file, strerror(errno));
1193 return 0;
1194 }
1195
1196 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1197 char *bu = make_backup(sys_name);
1198 if (symlink(backup_file, bu))
1199 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1200 strerror(errno));
1201 free(bu);
1202 }
1203
1204 return 1;
1205 }
1206
1207 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1208 unsigned int ndata, unsigned int odata)
1209 {
1210 unsigned long a, b, blocks;
1211 /* So how much do we need to backup.
1212 * We need an amount of data which is both a whole number of
1213 * old stripes and a whole number of new stripes.
1214 * So LCM for (chunksize*datadisks).
1215 */
1216 a = (ochunk/512) * odata;
1217 b = (nchunk/512) * ndata;
1218 /* Find GCD */
1219 a = GCD(a, b);
1220 /* LCM == product / GCD */
1221 blocks = (unsigned long)(ochunk/512) * (unsigned long)(nchunk/512) *
1222 odata * ndata / a;
1223
1224 return blocks;
1225 }
1226
1227 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1228 {
1229 /* Based on the current array state in info->array and
1230 * the changes in info->new_* etc, determine:
1231 * - whether the change is possible
1232 * - Intermediate level/raid_disks/layout
1233 * - whether a restriping reshape is needed
1234 * - number of sectors in minimum change unit. This
1235 * will cover a whole number of stripes in 'before' and
1236 * 'after'.
1237 *
1238 * Return message if the change should be rejected
1239 * NULL if the change can be achieved
1240 *
1241 * This can be called as part of starting a reshape, or
1242 * when assembling an array that is undergoing reshape.
1243 */
1244 int near, far, offset, copies;
1245 int new_disks;
1246 int old_chunk, new_chunk;
1247 /* delta_parity records change in number of devices
1248 * caused by level change
1249 */
1250 int delta_parity = 0;
1251
1252 memset(re, 0, sizeof(*re));
1253
1254 /* If a new level not explicitly given, we assume no-change */
1255 if (info->new_level == UnSet)
1256 info->new_level = info->array.level;
1257
1258 if (info->new_chunk)
1259 switch (info->new_level) {
1260 case 0:
1261 case 4:
1262 case 5:
1263 case 6:
1264 case 10:
1265 /* chunk size is meaningful, must divide component_size
1266 * evenly
1267 */
1268 if (info->component_size % (info->new_chunk/512)) {
1269 unsigned long long shrink = info->component_size;
1270 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1271 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1272 info->new_chunk/1024, info->component_size/2);
1273 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1274 devname, shrink/2);
1275 pr_err("will shrink the array so the given chunk size would work.\n");
1276 return "";
1277 }
1278 break;
1279 default:
1280 return "chunk size not meaningful for this level";
1281 }
1282 else
1283 info->new_chunk = info->array.chunk_size;
1284
1285 switch (info->array.level) {
1286 default:
1287 return "No reshape is possibly for this RAID level";
1288 case LEVEL_LINEAR:
1289 if (info->delta_disks != UnSet)
1290 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1291 else
1292 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1293 case 1:
1294 /* RAID1 can convert to RAID1 with different disks, or
1295 * raid5 with 2 disks, or
1296 * raid0 with 1 disk
1297 */
1298 if (info->new_level > 1 && (info->component_size & 7))
1299 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1300 if (info->new_level == 0) {
1301 if (info->delta_disks != UnSet &&
1302 info->delta_disks != 0)
1303 return "Cannot change number of disks with RAID1->RAID0 conversion";
1304 re->level = 0;
1305 re->before.data_disks = 1;
1306 re->after.data_disks = 1;
1307 return NULL;
1308 }
1309 if (info->new_level == 1) {
1310 if (info->delta_disks == UnSet)
1311 /* Don't know what to do */
1312 return "no change requested for Growing RAID1";
1313 re->level = 1;
1314 return NULL;
1315 }
1316 if (info->array.raid_disks != 2 && info->new_level == 5)
1317 return "Can only convert a 2-device array to RAID5";
1318 if (info->array.raid_disks == 2 && info->new_level == 5) {
1319 re->level = 5;
1320 re->before.data_disks = 1;
1321 if (info->delta_disks != UnSet &&
1322 info->delta_disks != 0)
1323 re->after.data_disks = 1 + info->delta_disks;
1324 else
1325 re->after.data_disks = 1;
1326 if (re->after.data_disks < 1)
1327 return "Number of disks too small for RAID5";
1328
1329 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1330 info->array.chunk_size = 65536;
1331 break;
1332 }
1333 /* Could do some multi-stage conversions, but leave that to
1334 * later.
1335 */
1336 return "Impossibly level change request for RAID1";
1337
1338 case 10:
1339 /* RAID10 can be converted from near mode to
1340 * RAID0 by removing some devices.
1341 * It can also be reshaped if the kernel supports
1342 * new_data_offset.
1343 */
1344 switch (info->new_level) {
1345 case 0:
1346 if ((info->array.layout & ~0xff) != 0x100)
1347 return "Cannot Grow RAID10 with far/offset layout";
1348 /*
1349 * number of devices must be multiple of
1350 * number of copies
1351 */
1352 if (info->array.raid_disks %
1353 (info->array.layout & 0xff))
1354 return "RAID10 layout too complex for Grow operation";
1355
1356 new_disks = (info->array.raid_disks /
1357 (info->array.layout & 0xff));
1358 if (info->delta_disks == UnSet)
1359 info->delta_disks = (new_disks
1360 - info->array.raid_disks);
1361
1362 if (info->delta_disks !=
1363 new_disks - info->array.raid_disks)
1364 return "New number of raid-devices impossible for RAID10";
1365 if (info->new_chunk &&
1366 info->new_chunk != info->array.chunk_size)
1367 return "Cannot change chunk-size with RAID10 Grow";
1368
1369 /* looks good */
1370 re->level = 0;
1371 re->before.data_disks = new_disks;
1372 re->after.data_disks = re->before.data_disks;
1373 return NULL;
1374
1375 case 10:
1376 near = info->array.layout & 0xff;
1377 far = (info->array.layout >> 8) & 0xff;
1378 offset = info->array.layout & 0x10000;
1379 if (far > 1 && !offset)
1380 return "Cannot reshape RAID10 in far-mode";
1381 copies = near * far;
1382
1383 old_chunk = info->array.chunk_size * far;
1384
1385 if (info->new_layout == UnSet)
1386 info->new_layout = info->array.layout;
1387 else {
1388 near = info->new_layout & 0xff;
1389 far = (info->new_layout >> 8) & 0xff;
1390 offset = info->new_layout & 0x10000;
1391 if (far > 1 && !offset)
1392 return "Cannot reshape RAID10 to far-mode";
1393 if (near * far != copies)
1394 return "Cannot change number of copies when reshaping RAID10";
1395 }
1396 if (info->delta_disks == UnSet)
1397 info->delta_disks = 0;
1398 new_disks = (info->array.raid_disks +
1399 info->delta_disks);
1400
1401 new_chunk = info->new_chunk * far;
1402
1403 re->level = 10;
1404 re->before.layout = info->array.layout;
1405 re->before.data_disks = info->array.raid_disks;
1406 re->after.layout = info->new_layout;
1407 re->after.data_disks = new_disks;
1408 /* For RAID10 we don't do backup but do allow reshape,
1409 * so set backup_blocks to INVALID_SECTORS rather than
1410 * zero.
1411 * And there is no need to synchronise stripes on both
1412 * 'old' and 'new'. So the important
1413 * number is the minimum data_offset difference
1414 * which is the larger of (offset copies * chunk).
1415 */
1416 re->backup_blocks = INVALID_SECTORS;
1417 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1418 if (new_disks < re->before.data_disks &&
1419 info->space_after < re->min_offset_change)
1420 /* Reduce component size by one chunk */
1421 re->new_size = (info->component_size -
1422 re->min_offset_change);
1423 else
1424 re->new_size = info->component_size;
1425 re->new_size = re->new_size * new_disks / copies;
1426 return NULL;
1427
1428 default:
1429 return "RAID10 can only be changed to RAID0";
1430 }
1431 case 0:
1432 /* RAID0 can be converted to RAID10, or to RAID456 */
1433 if (info->new_level == 10) {
1434 if (info->new_layout == UnSet &&
1435 info->delta_disks == UnSet) {
1436 /* Assume near=2 layout */
1437 info->new_layout = 0x102;
1438 info->delta_disks = info->array.raid_disks;
1439 }
1440 if (info->new_layout == UnSet) {
1441 int copies = 1 + (info->delta_disks
1442 / info->array.raid_disks);
1443 if (info->array.raid_disks * (copies-1) !=
1444 info->delta_disks)
1445 return "Impossible number of devices for RAID0->RAID10";
1446 info->new_layout = 0x100 + copies;
1447 }
1448 if (info->delta_disks == UnSet) {
1449 int copies = info->new_layout & 0xff;
1450 if (info->new_layout != 0x100 + copies)
1451 return "New layout impossible for RAID0->RAID10";;
1452 info->delta_disks = (copies - 1) *
1453 info->array.raid_disks;
1454 }
1455 if (info->new_chunk &&
1456 info->new_chunk != info->array.chunk_size)
1457 return "Cannot change chunk-size with RAID0->RAID10";
1458 /* looks good */
1459 re->level = 10;
1460 re->before.data_disks = (info->array.raid_disks +
1461 info->delta_disks);
1462 re->after.data_disks = re->before.data_disks;
1463 re->before.layout = info->new_layout;
1464 return NULL;
1465 }
1466
1467 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1468 * a raid4 style layout of the final level.
1469 */
1470 switch (info->new_level) {
1471 case 4:
1472 delta_parity = 1;
1473 case 0:
1474 re->level = 4;
1475 re->before.layout = 0;
1476 break;
1477 case 5:
1478 delta_parity = 1;
1479 re->level = 5;
1480 re->before.layout = ALGORITHM_PARITY_N;
1481 if (info->new_layout == UnSet)
1482 info->new_layout = map_name(r5layout, "default");
1483 break;
1484 case 6:
1485 delta_parity = 2;
1486 re->level = 6;
1487 re->before.layout = ALGORITHM_PARITY_N;
1488 if (info->new_layout == UnSet)
1489 info->new_layout = map_name(r6layout, "default");
1490 break;
1491 default:
1492 return "Impossible level change requested";
1493 }
1494 re->before.data_disks = info->array.raid_disks;
1495 /* determining 'after' layout happens outside this 'switch' */
1496 break;
1497
1498 case 4:
1499 info->array.layout = ALGORITHM_PARITY_N;
1500 case 5:
1501 switch (info->new_level) {
1502 case 0:
1503 delta_parity = -1;
1504 case 4:
1505 re->level = info->array.level;
1506 re->before.data_disks = info->array.raid_disks - 1;
1507 re->before.layout = info->array.layout;
1508 break;
1509 case 5:
1510 re->level = 5;
1511 re->before.data_disks = info->array.raid_disks - 1;
1512 re->before.layout = info->array.layout;
1513 break;
1514 case 6:
1515 delta_parity = 1;
1516 re->level = 6;
1517 re->before.data_disks = info->array.raid_disks - 1;
1518 switch (info->array.layout) {
1519 case ALGORITHM_LEFT_ASYMMETRIC:
1520 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1521 break;
1522 case ALGORITHM_RIGHT_ASYMMETRIC:
1523 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1524 break;
1525 case ALGORITHM_LEFT_SYMMETRIC:
1526 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1527 break;
1528 case ALGORITHM_RIGHT_SYMMETRIC:
1529 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1530 break;
1531 case ALGORITHM_PARITY_0:
1532 re->before.layout = ALGORITHM_PARITY_0_6;
1533 break;
1534 case ALGORITHM_PARITY_N:
1535 re->before.layout = ALGORITHM_PARITY_N_6;
1536 break;
1537 default:
1538 return "Cannot convert an array with this layout";
1539 }
1540 break;
1541 case 1:
1542 if (info->array.raid_disks != 2)
1543 return "Can only convert a 2-device array to RAID1";
1544 if (info->delta_disks != UnSet &&
1545 info->delta_disks != 0)
1546 return "Cannot set raid_disk when converting RAID5->RAID1";
1547 re->level = 1;
1548 info->new_chunk = 0;
1549 return NULL;
1550 default:
1551 return "Impossible level change requested";
1552 }
1553 break;
1554 case 6:
1555 switch (info->new_level) {
1556 case 4:
1557 case 5:
1558 delta_parity = -1;
1559 case 6:
1560 re->level = 6;
1561 re->before.data_disks = info->array.raid_disks - 2;
1562 re->before.layout = info->array.layout;
1563 break;
1564 default:
1565 return "Impossible level change requested";
1566 }
1567 break;
1568 }
1569
1570 /* If we reached here then it looks like a re-stripe is
1571 * happening. We have determined the intermediate level
1572 * and initial raid_disks/layout and stored these in 're'.
1573 *
1574 * We need to deduce the final layout that can be atomically
1575 * converted to the end state.
1576 */
1577 switch (info->new_level) {
1578 case 0:
1579 /* We can only get to RAID0 from RAID4 or RAID5
1580 * with appropriate layout and one extra device
1581 */
1582 if (re->level != 4 && re->level != 5)
1583 return "Cannot covert to RAID0 from this level";
1584
1585 switch (re->level) {
1586 case 4:
1587 re->before.layout = 0;
1588 re->after.layout = 0;
1589 break;
1590 case 5:
1591 re->after.layout = ALGORITHM_PARITY_N;
1592 break;
1593 }
1594 break;
1595
1596 case 4:
1597 /* We can only get to RAID4 from RAID5 */
1598 if (re->level != 4 && re->level != 5)
1599 return "Cannot convert to RAID4 from this level";
1600
1601 switch (re->level) {
1602 case 4:
1603 re->after.layout = 0;
1604 break;
1605 case 5:
1606 re->after.layout = ALGORITHM_PARITY_N;
1607 break;
1608 }
1609 break;
1610
1611 case 5:
1612 /* We get to RAID5 from RAID5 or RAID6 */
1613 if (re->level != 5 && re->level != 6)
1614 return "Cannot convert to RAID5 from this level";
1615
1616 switch (re->level) {
1617 case 5:
1618 if (info->new_layout == UnSet)
1619 re->after.layout = re->before.layout;
1620 else
1621 re->after.layout = info->new_layout;
1622 break;
1623 case 6:
1624 if (info->new_layout == UnSet)
1625 info->new_layout = re->before.layout;
1626
1627 /* after.layout needs to be raid6 version of new_layout */
1628 if (info->new_layout == ALGORITHM_PARITY_N)
1629 re->after.layout = ALGORITHM_PARITY_N;
1630 else {
1631 char layout[40];
1632 char *ls = map_num(r5layout, info->new_layout);
1633 int l;
1634 if (ls) {
1635 /* Current RAID6 layout has a RAID5
1636 * equivalent - good
1637 */
1638 strcat(strcpy(layout, ls), "-6");
1639 l = map_name(r6layout, layout);
1640 if (l == UnSet)
1641 return "Cannot find RAID6 layout to convert to";
1642 } else {
1643 /* Current RAID6 has no equivalent.
1644 * If it is already a '-6' layout we
1645 * can leave it unchanged, else we must
1646 * fail
1647 */
1648 ls = map_num(r6layout,
1649 info->new_layout);
1650 if (!ls ||
1651 strcmp(ls+strlen(ls)-2, "-6") != 0)
1652 return "Please specify new layout";
1653 l = info->new_layout;
1654 }
1655 re->after.layout = l;
1656 }
1657 }
1658 break;
1659
1660 case 6:
1661 /* We must already be at level 6 */
1662 if (re->level != 6)
1663 return "Impossible level change";
1664 if (info->new_layout == UnSet)
1665 re->after.layout = info->array.layout;
1666 else
1667 re->after.layout = info->new_layout;
1668 break;
1669 default:
1670 return "Impossible level change requested";
1671 }
1672 if (info->delta_disks == UnSet)
1673 info->delta_disks = delta_parity;
1674
1675 re->after.data_disks =
1676 (re->before.data_disks + info->delta_disks - delta_parity);
1677
1678 switch (re->level) {
1679 case 6:
1680 re->parity = 2;
1681 break;
1682 case 4:
1683 case 5:
1684 re->parity = 1;
1685 break;
1686 default:
1687 re->parity = 0;
1688 break;
1689 }
1690 /* So we have a restripe operation, we need to calculate the number
1691 * of blocks per reshape operation.
1692 */
1693 re->new_size = info->component_size * re->before.data_disks;
1694 if (info->new_chunk == 0)
1695 info->new_chunk = info->array.chunk_size;
1696 if (re->after.data_disks == re->before.data_disks &&
1697 re->after.layout == re->before.layout &&
1698 info->new_chunk == info->array.chunk_size) {
1699 /* Nothing to change, can change level immediately. */
1700 re->level = info->new_level;
1701 re->backup_blocks = 0;
1702 return NULL;
1703 }
1704 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1705 /* chunk and layout changes make no difference */
1706 re->level = info->new_level;
1707 re->backup_blocks = 0;
1708 return NULL;
1709 }
1710
1711 if (re->after.data_disks == re->before.data_disks &&
1712 get_linux_version() < 2006032)
1713 return "in-place reshape is not safe before 2.6.32 - sorry.";
1714
1715 if (re->after.data_disks < re->before.data_disks &&
1716 get_linux_version() < 2006030)
1717 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1718
1719 re->backup_blocks = compute_backup_blocks(
1720 info->new_chunk, info->array.chunk_size,
1721 re->after.data_disks, re->before.data_disks);
1722 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1723
1724 re->new_size = info->component_size * re->after.data_disks;
1725 return NULL;
1726 }
1727
1728 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1729 char *text_version)
1730 {
1731 struct mdinfo *info;
1732 char *subarray;
1733 int ret_val = -1;
1734
1735 if ((st == NULL) || (sra == NULL))
1736 return ret_val;
1737
1738 if (text_version == NULL)
1739 text_version = sra->text_version;
1740 subarray = strchr(text_version + 1, '/')+1;
1741 info = st->ss->container_content(st, subarray);
1742 if (info) {
1743 unsigned long long current_size = 0;
1744 unsigned long long new_size = info->custom_array_size/2;
1745
1746 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1747 new_size > current_size) {
1748 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1749 < 0)
1750 dprintf("Error: Cannot set array size");
1751 else {
1752 ret_val = 0;
1753 dprintf("Array size changed");
1754 }
1755 dprintf_cont(" from %llu to %llu.\n",
1756 current_size, new_size);
1757 }
1758 sysfs_free(info);
1759 } else
1760 dprintf("Error: set_array_size(): info pointer in NULL\n");
1761
1762 return ret_val;
1763 }
1764
1765 static int reshape_array(char *container, int fd, char *devname,
1766 struct supertype *st, struct mdinfo *info,
1767 int force, struct mddev_dev *devlist,
1768 unsigned long long data_offset,
1769 char *backup_file, int verbose, int forked,
1770 int restart, int freeze_reshape);
1771 static int reshape_container(char *container, char *devname,
1772 int mdfd,
1773 struct supertype *st,
1774 struct mdinfo *info,
1775 int force,
1776 char *backup_file, int verbose,
1777 int forked, int restart, int freeze_reshape);
1778
1779 /**
1780 * prepare_external_reshape() - prepares update on external metadata if supported.
1781 * @devname: Device name.
1782 * @subarray: Subarray.
1783 * @st: Supertype.
1784 * @container: Container.
1785 * @cfd: Container file descriptor.
1786 *
1787 * Function checks that the requested reshape is supported on external metadata,
1788 * and performs an initial check that the container holds the pre-requisite
1789 * spare devices (mdmon owns final validation).
1790 *
1791 * Return: 0 on success, else 1
1792 */
1793 static int prepare_external_reshape(char *devname, char *subarray,
1794 struct supertype *st, char *container,
1795 const int cfd)
1796 {
1797 struct mdinfo *cc = NULL;
1798 struct mdinfo *content = NULL;
1799
1800 if (st->ss->load_container(st, cfd, NULL)) {
1801 pr_err("Cannot read superblock for %s\n", devname);
1802 return 1;
1803 }
1804
1805 if (!st->ss->container_content)
1806 return 1;
1807
1808 cc = st->ss->container_content(st, subarray);
1809 for (content = cc; content ; content = content->next) {
1810 /*
1811 * check if reshape is allowed based on metadata
1812 * indications stored in content.array.status
1813 */
1814 if (is_bit_set(&content->array.state, MD_SB_BLOCK_VOLUME) ||
1815 is_bit_set(&content->array.state, MD_SB_BLOCK_CONTAINER_RESHAPE)) {
1816 pr_err("Cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1817 devname, container);
1818 goto error;
1819 }
1820 if (content->consistency_policy == CONSISTENCY_POLICY_PPL) {
1821 pr_err("Operation not supported when ppl consistency policy is enabled\n");
1822 goto error;
1823 }
1824 if (content->consistency_policy == CONSISTENCY_POLICY_BITMAP) {
1825 pr_err("Operation not supported when write-intent bitmap consistency policy is enabled\n");
1826 goto error;
1827 }
1828 }
1829 sysfs_free(cc);
1830 if (mdmon_running(container))
1831 st->update_tail = &st->updates;
1832 return 0;
1833 error:
1834 sysfs_free(cc);
1835 return 1;
1836 }
1837
1838 int Grow_reshape(char *devname, int fd,
1839 struct mddev_dev *devlist,
1840 struct context *c, struct shape *s)
1841 {
1842 /* Make some changes in the shape of an array.
1843 * The kernel must support the change.
1844 *
1845 * There are three different changes. Each can trigger
1846 * a resync or recovery so we freeze that until we have
1847 * requested everything (if kernel supports freezing - 2.6.30).
1848 * The steps are:
1849 * - change size (i.e. component_size)
1850 * - change level
1851 * - change layout/chunksize/ndisks
1852 *
1853 * The last can require a reshape. It is different on different
1854 * levels so we need to check the level before actioning it.
1855 * Some times the level change needs to be requested after the
1856 * reshape (e.g. raid6->raid5, raid5->raid0)
1857 *
1858 */
1859 struct mdu_array_info_s array;
1860 int rv = 0;
1861 struct supertype *st;
1862 char *subarray = NULL;
1863
1864 int frozen = 0;
1865 int changed = 0;
1866 char *container = NULL;
1867 int cfd = -1;
1868
1869 struct mddev_dev *dv;
1870 int added_disks;
1871
1872 struct mdinfo info;
1873 struct mdinfo *sra = NULL;
1874
1875 if (md_get_array_info(fd, &array) < 0) {
1876 pr_err("%s is not an active md array - aborting\n",
1877 devname);
1878 return 1;
1879 }
1880 if (s->level != UnSet && s->chunk) {
1881 pr_err("Cannot change array level in the same operation as changing chunk size.\n");
1882 return 1;
1883 }
1884
1885 if (s->data_offset != INVALID_SECTORS && array.level != 10 &&
1886 (array.level < 4 || array.level > 6)) {
1887 pr_err("--grow --data-offset not yet supported\n");
1888 return 1;
1889 }
1890
1891 if (s->size > 0 &&
1892 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1893 pr_err("cannot change component size at the same time as other changes.\n"
1894 " Change size first, then check data is intact before making other changes.\n");
1895 return 1;
1896 }
1897
1898 if (s->raiddisks && s->raiddisks < array.raid_disks &&
1899 array.level > 1 && get_linux_version() < 2006032 &&
1900 !check_env("MDADM_FORCE_FEWER")) {
1901 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1902 " Please use a newer kernel\n");
1903 return 1;
1904 }
1905
1906 if (array.level > 1 && s->size > 1 &&
1907 (unsigned long long) (array.chunk_size / 1024) > s->size) {
1908 pr_err("component size must be larger than chunk size.\n");
1909 return 1;
1910 }
1911
1912 st = super_by_fd(fd, &subarray);
1913 if (!st) {
1914 pr_err("Unable to determine metadata format for %s\n", devname);
1915 return 1;
1916 }
1917 if (s->raiddisks > st->max_devs) {
1918 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1919 return 1;
1920 }
1921 if (s->level == 0 && (array.state & (1 << MD_SB_BITMAP_PRESENT)) &&
1922 !(array.state & (1 << MD_SB_CLUSTERED)) && !st->ss->external) {
1923 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
1924 if (md_set_array_info(fd, &array) != 0) {
1925 pr_err("failed to remove internal bitmap.\n");
1926 return 1;
1927 }
1928 }
1929
1930 if (st->ss->external) {
1931 if (subarray) {
1932 container = st->container_devnm;
1933 cfd = open_dev_excl(st->container_devnm);
1934 } else {
1935 container = st->devnm;
1936 close(fd);
1937 cfd = open_dev_excl(st->devnm);
1938 fd = cfd;
1939 }
1940 if (cfd < 0) {
1941 pr_err("Unable to open container for %s\n", devname);
1942 free(subarray);
1943 return 1;
1944 }
1945
1946 rv = prepare_external_reshape(devname, subarray, st,
1947 container, cfd);
1948 if (rv > 0) {
1949 free(subarray);
1950 close(cfd);
1951 goto release;
1952 }
1953
1954 if (s->raiddisks && subarray) {
1955 pr_err("--raid-devices operation can be performed on a container only\n");
1956 close(cfd);
1957 free(subarray);
1958 return 1;
1959 }
1960 }
1961
1962 added_disks = 0;
1963 for (dv = devlist; dv; dv = dv->next)
1964 added_disks++;
1965 if (s->raiddisks > array.raid_disks &&
1966 array.spare_disks + added_disks <
1967 (s->raiddisks - array.raid_disks) &&
1968 !c->force) {
1969 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1970 " Use --force to over-ride this check.\n",
1971 s->raiddisks - array.raid_disks,
1972 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1973 array.spare_disks + added_disks);
1974 return 1;
1975 }
1976
1977 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS |
1978 GET_STATE | GET_VERSION);
1979 if (sra) {
1980 if (st->ss->external && subarray == NULL) {
1981 array.level = LEVEL_CONTAINER;
1982 sra->array.level = LEVEL_CONTAINER;
1983 }
1984 } else {
1985 pr_err("failed to read sysfs parameters for %s\n",
1986 devname);
1987 return 1;
1988 }
1989 frozen = freeze(st);
1990 if (frozen < -1) {
1991 /* freeze() already spewed the reason */
1992 sysfs_free(sra);
1993 return 1;
1994 } else if (frozen < 0) {
1995 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1996 sysfs_free(sra);
1997 return 1;
1998 }
1999
2000 /* ========= set size =============== */
2001 if (s->size > 0 &&
2002 (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
2003 unsigned long long orig_size = get_component_size(fd)/2;
2004 unsigned long long min_csize;
2005 struct mdinfo *mdi;
2006 int raid0_takeover = 0;
2007
2008 if (orig_size == 0)
2009 orig_size = (unsigned) array.size;
2010
2011 if (orig_size == 0) {
2012 pr_err("Cannot set device size in this type of array.\n");
2013 rv = 1;
2014 goto release;
2015 }
2016
2017 if (array.level == 0) {
2018 pr_err("Component size change is not supported for RAID0\n");
2019 rv = 1;
2020 goto release;
2021 }
2022
2023 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
2024 devname, APPLY_METADATA_CHANGES,
2025 c->verbose > 0)) {
2026 rv = 1;
2027 goto release;
2028 }
2029 sync_metadata(st);
2030 if (st->ss->external) {
2031 /* metadata can have size limitation
2032 * update size value according to metadata information
2033 */
2034 struct mdinfo *sizeinfo =
2035 st->ss->container_content(st, subarray);
2036 if (sizeinfo) {
2037 unsigned long long new_size =
2038 sizeinfo->custom_array_size/2;
2039 int data_disks = get_data_disks(
2040 sizeinfo->array.level,
2041 sizeinfo->array.layout,
2042 sizeinfo->array.raid_disks);
2043 new_size /= data_disks;
2044 dprintf("Metadata size correction from %llu to %llu (%llu)\n",
2045 orig_size, new_size,
2046 new_size * data_disks);
2047 s->size = new_size;
2048 sysfs_free(sizeinfo);
2049 }
2050 }
2051
2052 /* Update the size of each member device in case
2053 * they have been resized. This will never reduce
2054 * below the current used-size. The "size" attribute
2055 * understands '0' to mean 'max'.
2056 */
2057 min_csize = 0;
2058 for (mdi = sra->devs; mdi; mdi = mdi->next) {
2059 sysfs_set_num(sra, mdi, "size",
2060 s->size == MAX_SIZE ? 0 : s->size);
2061 if (array.not_persistent == 0 &&
2062 array.major_version == 0 &&
2063 get_linux_version() < 3001000) {
2064 /* Dangerous to allow size to exceed 2TB */
2065 unsigned long long csize;
2066 if (sysfs_get_ll(sra, mdi, "size",
2067 &csize) == 0) {
2068 if (csize >= 2ULL*1024*1024*1024)
2069 csize = 2ULL*1024*1024*1024;
2070 if ((min_csize == 0 ||
2071 (min_csize > csize)))
2072 min_csize = csize;
2073 }
2074 }
2075 }
2076 if (min_csize && s->size > min_csize) {
2077 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2078 rv = 1;
2079 goto size_change_error;
2080 }
2081 if (min_csize && s->size == MAX_SIZE) {
2082 /* Don't let the kernel choose a size - it will get
2083 * it wrong
2084 */
2085 pr_err("Limited v0.90 array to 2TB per device\n");
2086 s->size = min_csize;
2087 }
2088 if (st->ss->external) {
2089 if (sra->array.level == 0) {
2090 rv = sysfs_set_str(sra, NULL, "level", "raid5");
2091 if (!rv) {
2092 raid0_takeover = 1;
2093 /* get array parameters after takeover
2094 * to change one parameter at time only
2095 */
2096 rv = md_get_array_info(fd, &array);
2097 }
2098 }
2099 /* make sure mdmon is
2100 * aware of the new level */
2101 if (!mdmon_running(st->container_devnm))
2102 start_mdmon(st->container_devnm);
2103 ping_monitor(container);
2104 if (mdmon_running(st->container_devnm) &&
2105 st->update_tail == NULL)
2106 st->update_tail = &st->updates;
2107 }
2108
2109 if (s->size == MAX_SIZE)
2110 s->size = 0;
2111 array.size = s->size;
2112 if (s->size & ~INT32_MAX) {
2113 /* got truncated to 32bit, write to
2114 * component_size instead
2115 */
2116 if (sra)
2117 rv = sysfs_set_num(sra, NULL,
2118 "component_size", s->size);
2119 else
2120 rv = -1;
2121 } else {
2122 rv = md_set_array_info(fd, &array);
2123
2124 /* manage array size when it is managed externally
2125 */
2126 if ((rv == 0) && st->ss->external)
2127 rv = set_array_size(st, sra, sra->text_version);
2128 }
2129
2130 if (raid0_takeover) {
2131 /* do not recync non-existing parity,
2132 * we will drop it anyway
2133 */
2134 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2135 /* go back to raid0, drop parity disk
2136 */
2137 sysfs_set_str(sra, NULL, "level", "raid0");
2138 md_get_array_info(fd, &array);
2139 }
2140
2141 size_change_error:
2142 if (rv != 0) {
2143 int err = errno;
2144
2145 /* restore metadata */
2146 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2147 UnSet, NULL, devname,
2148 ROLLBACK_METADATA_CHANGES,
2149 c->verbose) == 0)
2150 sync_metadata(st);
2151 pr_err("Cannot set device size for %s: %s\n",
2152 devname, strerror(err));
2153 if (err == EBUSY &&
2154 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2155 cont_err("Bitmap must be removed before size can be changed\n");
2156 rv = 1;
2157 goto release;
2158 }
2159 if (s->assume_clean) {
2160 /* This will fail on kernels older than 3.0 unless
2161 * a backport has been arranged.
2162 */
2163 if (sra == NULL ||
2164 sysfs_set_str(sra, NULL, "resync_start",
2165 "none") < 0)
2166 pr_err("--assume-clean not supported with --grow on this kernel\n");
2167 }
2168 md_get_array_info(fd, &array);
2169 s->size = get_component_size(fd)/2;
2170 if (s->size == 0)
2171 s->size = array.size;
2172 if (c->verbose >= 0) {
2173 if (s->size == orig_size)
2174 pr_err("component size of %s unchanged at %lluK\n",
2175 devname, s->size);
2176 else
2177 pr_err("component size of %s has been set to %lluK\n",
2178 devname, s->size);
2179 }
2180 changed = 1;
2181 } else if (!is_container(array.level)) {
2182 s->size = get_component_size(fd)/2;
2183 if (s->size == 0)
2184 s->size = array.size;
2185 }
2186
2187 /* See if there is anything else to do */
2188 if ((s->level == UnSet || s->level == array.level) &&
2189 (s->layout_str == NULL) &&
2190 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2191 s->data_offset == INVALID_SECTORS &&
2192 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2193 /* Nothing more to do */
2194 if (!changed && c->verbose >= 0)
2195 pr_err("%s: no change requested\n", devname);
2196 goto release;
2197 }
2198
2199 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2200 * current implementation assumes that following conditions must be met:
2201 * - RAID10:
2202 * - far_copies == 1
2203 * - near_copies == 2
2204 */
2205 if ((s->level == 0 && array.level == 10 && sra &&
2206 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2207 (s->level == 0 && array.level == 1 && sra)) {
2208 int err;
2209
2210 err = remove_disks_for_takeover(st, sra, array.layout);
2211 if (err) {
2212 dprintf("Array cannot be reshaped\n");
2213 if (cfd > -1)
2214 close(cfd);
2215 rv = 1;
2216 goto release;
2217 }
2218 /* Make sure mdmon has seen the device removal
2219 * and updated metadata before we continue with
2220 * level change
2221 */
2222 if (container)
2223 ping_monitor(container);
2224 }
2225
2226 memset(&info, 0, sizeof(info));
2227 info.array = array;
2228 if (sysfs_init(&info, fd, NULL)) {
2229 pr_err("failed to initialize sysfs.\n");
2230 rv = 1;
2231 goto release;
2232 }
2233 strcpy(info.text_version, sra->text_version);
2234 info.component_size = s->size*2;
2235 info.new_level = s->level;
2236 info.new_chunk = s->chunk * 1024;
2237 if (is_container(info.array.level)) {
2238 info.delta_disks = UnSet;
2239 info.array.raid_disks = s->raiddisks;
2240 } else if (s->raiddisks)
2241 info.delta_disks = s->raiddisks - info.array.raid_disks;
2242 else
2243 info.delta_disks = UnSet;
2244 if (s->layout_str == NULL) {
2245 info.new_layout = UnSet;
2246 if (info.array.level == 6 &&
2247 (info.new_level == 6 || info.new_level == UnSet) &&
2248 info.array.layout >= 16) {
2249 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2250 cont_err("during the reshape, please specify --layout=preserve\n");
2251 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2252 rv = 1;
2253 goto release;
2254 }
2255 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2256 strcmp(s->layout_str, "normalize") == 0) {
2257 /* If we have a -6 RAID6 layout, remove the '-6'. */
2258 info.new_layout = UnSet;
2259 if (info.array.level == 6 && info.new_level == UnSet) {
2260 char l[40], *h;
2261 strcpy(l, map_num_s(r6layout, info.array.layout));
2262 h = strrchr(l, '-');
2263 if (h && strcmp(h, "-6") == 0) {
2264 *h = 0;
2265 info.new_layout = map_name(r6layout, l);
2266 }
2267 } else {
2268 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2269 rv = 1;
2270 goto release;
2271 }
2272 } else if (strcmp(s->layout_str, "preserve") == 0) {
2273 /* This means that a non-standard RAID6 layout
2274 * is OK.
2275 * In particular:
2276 * - When reshape a RAID6 (e.g. adding a device)
2277 * which is in a non-standard layout, it is OK
2278 * to preserve that layout.
2279 * - When converting a RAID5 to RAID6, leave it in
2280 * the XXX-6 layout, don't re-layout.
2281 */
2282 if (info.array.level == 6 && info.new_level == UnSet)
2283 info.new_layout = info.array.layout;
2284 else if (info.array.level == 5 && info.new_level == 6) {
2285 char l[40];
2286 strcpy(l, map_num_s(r5layout, info.array.layout));
2287 strcat(l, "-6");
2288 info.new_layout = map_name(r6layout, l);
2289 } else {
2290 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2291 rv = 1;
2292 goto release;
2293 }
2294 } else {
2295 int l = info.new_level;
2296 if (l == UnSet)
2297 l = info.array.level;
2298 switch (l) {
2299 case 5:
2300 info.new_layout = map_name(r5layout, s->layout_str);
2301 break;
2302 case 6:
2303 info.new_layout = map_name(r6layout, s->layout_str);
2304 break;
2305 case 10:
2306 info.new_layout = parse_layout_10(s->layout_str);
2307 break;
2308 case LEVEL_FAULTY:
2309 info.new_layout = parse_layout_faulty(s->layout_str);
2310 break;
2311 default:
2312 pr_err("layout not meaningful with this level\n");
2313 rv = 1;
2314 goto release;
2315 }
2316 if (info.new_layout == UnSet) {
2317 pr_err("layout %s not understood for this level\n",
2318 s->layout_str);
2319 rv = 1;
2320 goto release;
2321 }
2322 }
2323
2324 if (array.level == LEVEL_FAULTY) {
2325 if (s->level != UnSet && s->level != array.level) {
2326 pr_err("cannot change level of Faulty device\n");
2327 rv =1 ;
2328 }
2329 if (s->chunk) {
2330 pr_err("cannot set chunksize of Faulty device\n");
2331 rv =1 ;
2332 }
2333 if (s->raiddisks && s->raiddisks != 1) {
2334 pr_err("cannot set raid_disks of Faulty device\n");
2335 rv =1 ;
2336 }
2337 if (s->layout_str) {
2338 if (md_get_array_info(fd, &array) != 0) {
2339 dprintf("Cannot get array information.\n");
2340 goto release;
2341 }
2342 array.layout = info.new_layout;
2343 if (md_set_array_info(fd, &array) != 0) {
2344 pr_err("failed to set new layout\n");
2345 rv = 1;
2346 } else if (c->verbose >= 0)
2347 printf("layout for %s set to %d\n",
2348 devname, array.layout);
2349 }
2350 } else if (is_container(array.level)) {
2351 /* This change is to be applied to every array in the
2352 * container. This is only needed when the metadata imposes
2353 * restraints of the various arrays in the container.
2354 * Currently we only know that IMSM requires all arrays
2355 * to have the same number of devices so changing the
2356 * number of devices (On-Line Capacity Expansion) must be
2357 * performed at the level of the container
2358 */
2359 close_fd(&fd);
2360 rv = reshape_container(container, devname, -1, st, &info,
2361 c->force, c->backup_file, c->verbose,
2362 0, 0, 0);
2363 frozen = 0;
2364 } else {
2365 /* get spare devices from external metadata
2366 */
2367 if (st->ss->external) {
2368 struct mdinfo *info2;
2369
2370 info2 = st->ss->container_content(st, subarray);
2371 if (info2) {
2372 info.array.spare_disks =
2373 info2->array.spare_disks;
2374 sysfs_free(info2);
2375 }
2376 }
2377
2378 /* Impose these changes on a single array. First
2379 * check that the metadata is OK with the change. */
2380
2381 if (reshape_super(st, 0, info.new_level,
2382 info.new_layout, info.new_chunk,
2383 info.array.raid_disks, info.delta_disks,
2384 c->backup_file, devname,
2385 APPLY_METADATA_CHANGES, c->verbose)) {
2386 rv = 1;
2387 goto release;
2388 }
2389 sync_metadata(st);
2390 rv = reshape_array(container, fd, devname, st, &info, c->force,
2391 devlist, s->data_offset, c->backup_file,
2392 c->verbose, 0, 0, 0);
2393 frozen = 0;
2394 }
2395 release:
2396 sysfs_free(sra);
2397 if (frozen > 0)
2398 unfreeze(st);
2399 return rv;
2400 }
2401
2402 /* verify_reshape_position()
2403 * Function checks if reshape position in metadata is not farther
2404 * than position in md.
2405 * Return value:
2406 * 0 : not valid sysfs entry
2407 * it can be caused by not started reshape, it should be started
2408 * by reshape array or raid0 array is before takeover
2409 * -1 : error, reshape position is obviously wrong
2410 * 1 : success, reshape progress correct or updated
2411 */
2412 static int verify_reshape_position(struct mdinfo *info, int level)
2413 {
2414 int ret_val = 0;
2415 char buf[40];
2416 int rv;
2417
2418 /* read sync_max, failure can mean raid0 array */
2419 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2420
2421 if (rv > 0) {
2422 char *ep;
2423 unsigned long long position = strtoull(buf, &ep, 0);
2424
2425 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2426 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2427 position *= get_data_disks(level,
2428 info->new_layout,
2429 info->array.raid_disks);
2430 if (info->reshape_progress < position) {
2431 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2432 info->reshape_progress, position);
2433 info->reshape_progress = position;
2434 ret_val = 1;
2435 } else if (info->reshape_progress > position) {
2436 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2437 position, info->reshape_progress);
2438 ret_val = -1;
2439 } else {
2440 dprintf("Reshape position in md and metadata are the same;");
2441 ret_val = 1;
2442 }
2443 }
2444 } else if (rv == 0) {
2445 /* for valid sysfs entry, 0-length content
2446 * should be indicated as error
2447 */
2448 ret_val = -1;
2449 }
2450
2451 return ret_val;
2452 }
2453
2454 static unsigned long long choose_offset(unsigned long long lo,
2455 unsigned long long hi,
2456 unsigned long long min,
2457 unsigned long long max)
2458 {
2459 /* Choose a new offset between hi and lo.
2460 * It must be between min and max, but
2461 * we would prefer something near the middle of hi/lo, and also
2462 * prefer to be aligned to a big power of 2.
2463 *
2464 * So we start with the middle, then for each bit,
2465 * starting at '1' and increasing, if it is set, we either
2466 * add it or subtract it if possible, preferring the option
2467 * which is furthest from the boundary.
2468 *
2469 * We stop once we get a 1MB alignment. As units are in sectors,
2470 * 1MB = 2*1024 sectors.
2471 */
2472 unsigned long long choice = (lo + hi) / 2;
2473 unsigned long long bit = 1;
2474
2475 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2476 unsigned long long bigger, smaller;
2477 if (! (bit & choice))
2478 continue;
2479 bigger = choice + bit;
2480 smaller = choice - bit;
2481 if (bigger > max && smaller < min)
2482 break;
2483 if (bigger > max)
2484 choice = smaller;
2485 else if (smaller < min)
2486 choice = bigger;
2487 else if (hi - bigger > smaller - lo)
2488 choice = bigger;
2489 else
2490 choice = smaller;
2491 }
2492 return choice;
2493 }
2494
2495 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2496 char *devname, int delta_disks,
2497 unsigned long long data_offset,
2498 unsigned long long min,
2499 int can_fallback)
2500 {
2501 struct mdinfo *sd;
2502 int dir = 0;
2503 int err = 0;
2504 unsigned long long before, after;
2505
2506 /* Need to find min space before and after so same is used
2507 * on all devices
2508 */
2509 before = UINT64_MAX;
2510 after = UINT64_MAX;
2511 for (sd = sra->devs; sd; sd = sd->next) {
2512 char *dn;
2513 int dfd;
2514 int rv;
2515 struct supertype *st2;
2516 struct mdinfo info2;
2517
2518 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2519 continue;
2520 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2521 dfd = dev_open(dn, O_RDONLY);
2522 if (dfd < 0) {
2523 pr_err("%s: cannot open component %s\n",
2524 devname, dn ? dn : "-unknown-");
2525 goto release;
2526 }
2527 st2 = dup_super(st);
2528 rv = st2->ss->load_super(st2,dfd, NULL);
2529 close(dfd);
2530 if (rv) {
2531 free(st2);
2532 pr_err("%s: cannot get superblock from %s\n",
2533 devname, dn);
2534 goto release;
2535 }
2536 st2->ss->getinfo_super(st2, &info2, NULL);
2537 st2->ss->free_super(st2);
2538 free(st2);
2539 if (info2.space_before == 0 &&
2540 info2.space_after == 0) {
2541 /* Metadata doesn't support data_offset changes */
2542 if (!can_fallback)
2543 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2544 devname);
2545 goto fallback;
2546 }
2547 if (before > info2.space_before)
2548 before = info2.space_before;
2549 if (after > info2.space_after)
2550 after = info2.space_after;
2551
2552 if (data_offset != INVALID_SECTORS) {
2553 if (dir == 0) {
2554 if (info2.data_offset == data_offset) {
2555 pr_err("%s: already has that data_offset\n",
2556 dn);
2557 goto release;
2558 }
2559 if (data_offset < info2.data_offset)
2560 dir = -1;
2561 else
2562 dir = 1;
2563 } else if ((data_offset <= info2.data_offset &&
2564 dir == 1) ||
2565 (data_offset >= info2.data_offset &&
2566 dir == -1)) {
2567 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2568 dn);
2569 goto release;
2570 }
2571 }
2572 }
2573 if (before == UINT64_MAX)
2574 /* impossible really, there must be no devices */
2575 return 1;
2576
2577 for (sd = sra->devs; sd; sd = sd->next) {
2578 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2579 unsigned long long new_data_offset;
2580
2581 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2582 continue;
2583 if (delta_disks < 0) {
2584 /* Don't need any space as array is shrinking
2585 * just move data_offset up by min
2586 */
2587 if (data_offset == INVALID_SECTORS)
2588 new_data_offset = sd->data_offset + min;
2589 else {
2590 if (data_offset < sd->data_offset + min) {
2591 pr_err("--data-offset too small for %s\n",
2592 dn);
2593 goto release;
2594 }
2595 new_data_offset = data_offset;
2596 }
2597 } else if (delta_disks > 0) {
2598 /* need space before */
2599 if (before < min) {
2600 if (can_fallback)
2601 goto fallback;
2602 pr_err("Insufficient head-space for reshape on %s\n",
2603 dn);
2604 goto release;
2605 }
2606 if (data_offset == INVALID_SECTORS)
2607 new_data_offset = sd->data_offset - min;
2608 else {
2609 if (data_offset > sd->data_offset - min) {
2610 pr_err("--data-offset too large for %s\n",
2611 dn);
2612 goto release;
2613 }
2614 new_data_offset = data_offset;
2615 }
2616 } else {
2617 if (dir == 0) {
2618 /* can move up or down. If 'data_offset'
2619 * was set we would have already decided,
2620 * so just choose direction with most space.
2621 */
2622 if (before > after)
2623 dir = -1;
2624 else
2625 dir = 1;
2626 }
2627 sysfs_set_str(sra, NULL, "reshape_direction",
2628 dir == 1 ? "backwards" : "forwards");
2629 if (dir > 0) {
2630 /* Increase data offset */
2631 if (after < min) {
2632 if (can_fallback)
2633 goto fallback;
2634 pr_err("Insufficient tail-space for reshape on %s\n",
2635 dn);
2636 goto release;
2637 }
2638 if (data_offset != INVALID_SECTORS &&
2639 data_offset < sd->data_offset + min) {
2640 pr_err("--data-offset too small on %s\n",
2641 dn);
2642 goto release;
2643 }
2644 if (data_offset != INVALID_SECTORS)
2645 new_data_offset = data_offset;
2646 else
2647 new_data_offset = choose_offset(sd->data_offset,
2648 sd->data_offset + after,
2649 sd->data_offset + min,
2650 sd->data_offset + after);
2651 } else {
2652 /* Decrease data offset */
2653 if (before < min) {
2654 if (can_fallback)
2655 goto fallback;
2656 pr_err("insufficient head-room on %s\n",
2657 dn);
2658 goto release;
2659 }
2660 if (data_offset != INVALID_SECTORS &&
2661 data_offset > sd->data_offset - min) {
2662 pr_err("--data-offset too large on %s\n",
2663 dn);
2664 goto release;
2665 }
2666 if (data_offset != INVALID_SECTORS)
2667 new_data_offset = data_offset;
2668 else
2669 new_data_offset = choose_offset(sd->data_offset - before,
2670 sd->data_offset,
2671 sd->data_offset - before,
2672 sd->data_offset - min);
2673 }
2674 }
2675 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2676 if (err < 0 && errno == E2BIG) {
2677 /* try again after increasing data size to max */
2678 err = sysfs_set_num(sra, sd, "size", 0);
2679 if (err < 0 && errno == EINVAL &&
2680 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2681 /* some kernels have a bug where you cannot
2682 * use '0' on spare devices. */
2683 sysfs_set_num(sra, sd, "size",
2684 (sra->component_size + after)/2);
2685 }
2686 err = sysfs_set_num(sra, sd, "new_offset",
2687 new_data_offset);
2688 }
2689 if (err < 0) {
2690 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2691 pr_err("data-offset is too big for %s\n", dn);
2692 goto release;
2693 }
2694 if (sd == sra->devs &&
2695 (errno == ENOENT || errno == E2BIG))
2696 /* Early kernel, no 'new_offset' file,
2697 * or kernel doesn't like us.
2698 * For RAID5/6 this is not fatal
2699 */
2700 return 1;
2701 pr_err("Cannot set new_offset for %s\n", dn);
2702 break;
2703 }
2704 }
2705 return err;
2706 release:
2707 return -1;
2708 fallback:
2709 /* Just use a backup file */
2710 return 1;
2711 }
2712
2713 static int raid10_reshape(char *container, int fd, char *devname,
2714 struct supertype *st, struct mdinfo *info,
2715 struct reshape *reshape,
2716 unsigned long long data_offset,
2717 int force, int verbose)
2718 {
2719 /* Changing raid_disks, layout, chunksize or possibly
2720 * just data_offset for a RAID10.
2721 * We must always change data_offset. We change by at least
2722 * ->min_offset_change which is the largest of the old and new
2723 * chunk sizes.
2724 * If raid_disks is increasing, then data_offset must decrease
2725 * by at least this copy size.
2726 * If raid_disks is unchanged, data_offset must increase or
2727 * decrease by at least min_offset_change but preferably by much more.
2728 * We choose half of the available space.
2729 * If raid_disks is decreasing, data_offset must increase by
2730 * at least min_offset_change. To allow of this, component_size
2731 * must be decreased by the same amount.
2732 *
2733 * So we calculate the required minimum and direction, possibly
2734 * reduce the component_size, then iterate through the devices
2735 * and set the new_data_offset.
2736 * If that all works, we set chunk_size, layout, raid_disks, and start
2737 * 'reshape'
2738 */
2739 struct mdinfo *sra;
2740 unsigned long long min;
2741 int err = 0;
2742
2743 sra = sysfs_read(fd, NULL,
2744 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2745 );
2746 if (!sra) {
2747 pr_err("%s: Cannot get array details from sysfs\n", devname);
2748 goto release;
2749 }
2750 min = reshape->min_offset_change;
2751
2752 if (info->delta_disks)
2753 sysfs_set_str(sra, NULL, "reshape_direction",
2754 info->delta_disks < 0 ? "backwards" : "forwards");
2755 if (info->delta_disks < 0 && info->space_after < min) {
2756 int rv = sysfs_set_num(sra, NULL, "component_size",
2757 (sra->component_size - min)/2);
2758 if (rv) {
2759 pr_err("cannot reduce component size\n");
2760 goto release;
2761 }
2762 }
2763 err = set_new_data_offset(sra, st, devname, info->delta_disks,
2764 data_offset, min, 0);
2765 if (err == 1) {
2766 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2767 cont_err("supported on this kernel\n");
2768 err = -1;
2769 }
2770 if (err < 0)
2771 goto release;
2772
2773 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2774 err = errno;
2775 if (!err && sysfs_set_num(sra, NULL, "layout",
2776 reshape->after.layout) < 0)
2777 err = errno;
2778 if (!err &&
2779 sysfs_set_num(sra, NULL, "raid_disks",
2780 info->array.raid_disks + info->delta_disks) < 0)
2781 err = errno;
2782 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2783 err = errno;
2784 if (err) {
2785 pr_err("Cannot set array shape for %s\n",
2786 devname);
2787 if (err == EBUSY &&
2788 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2789 cont_err(" Bitmap must be removed before shape can be changed\n");
2790 goto release;
2791 }
2792 sysfs_free(sra);
2793 return 0;
2794 release:
2795 sysfs_free(sra);
2796 return 1;
2797 }
2798
2799 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2800 {
2801 struct mdinfo *sra, *sd;
2802 /* Initialisation to silence compiler warning */
2803 unsigned long long min_space_before = 0, min_space_after = 0;
2804 int first = 1;
2805
2806 sra = sysfs_read(fd, NULL, GET_DEVS);
2807 if (!sra)
2808 return;
2809 for (sd = sra->devs; sd; sd = sd->next) {
2810 char *dn;
2811 int dfd;
2812 struct supertype *st2;
2813 struct mdinfo info2;
2814
2815 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2816 continue;
2817 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2818 dfd = dev_open(dn, O_RDONLY);
2819 if (dfd < 0)
2820 break;
2821 st2 = dup_super(st);
2822 if (st2->ss->load_super(st2,dfd, NULL)) {
2823 close(dfd);
2824 free(st2);
2825 break;
2826 }
2827 close(dfd);
2828 st2->ss->getinfo_super(st2, &info2, NULL);
2829 st2->ss->free_super(st2);
2830 free(st2);
2831 if (first ||
2832 min_space_before > info2.space_before)
2833 min_space_before = info2.space_before;
2834 if (first ||
2835 min_space_after > info2.space_after)
2836 min_space_after = info2.space_after;
2837 first = 0;
2838 }
2839 if (sd == NULL && !first) {
2840 info->space_after = min_space_after;
2841 info->space_before = min_space_before;
2842 }
2843 sysfs_free(sra);
2844 }
2845
2846 static void update_cache_size(char *container, struct mdinfo *sra,
2847 struct mdinfo *info,
2848 int disks, unsigned long long blocks)
2849 {
2850 /* Check that the internal stripe cache is
2851 * large enough, or it won't work.
2852 * It must hold at least 4 stripes of the larger
2853 * chunk size
2854 */
2855 unsigned long cache;
2856 cache = max(info->array.chunk_size, info->new_chunk);
2857 cache *= 4; /* 4 stripes minimum */
2858 cache /= 512; /* convert to sectors */
2859 /* make sure there is room for 'blocks' with a bit to spare */
2860 if (cache < 16 + blocks / disks)
2861 cache = 16 + blocks / disks;
2862 cache /= (4096/512); /* Convert from sectors to pages */
2863
2864 if (sra->cache_size < cache)
2865 subarray_set_num(container, sra, "stripe_cache_size",
2866 cache+1);
2867 }
2868
2869 static int impose_reshape(struct mdinfo *sra,
2870 struct mdinfo *info,
2871 struct supertype *st,
2872 int fd,
2873 int restart,
2874 char *devname, char *container,
2875 struct reshape *reshape)
2876 {
2877 struct mdu_array_info_s array;
2878
2879 sra->new_chunk = info->new_chunk;
2880
2881 if (restart) {
2882 /* for external metadata checkpoint saved by mdmon can be lost
2883 * or missed /due to e.g. crash/. Check if md is not during
2884 * restart farther than metadata points to.
2885 * If so, this means metadata information is obsolete.
2886 */
2887 if (st->ss->external)
2888 verify_reshape_position(info, reshape->level);
2889 sra->reshape_progress = info->reshape_progress;
2890 } else {
2891 sra->reshape_progress = 0;
2892 if (reshape->after.data_disks < reshape->before.data_disks)
2893 /* start from the end of the new array */
2894 sra->reshape_progress = (sra->component_size
2895 * reshape->after.data_disks);
2896 }
2897
2898 md_get_array_info(fd, &array);
2899 if (info->array.chunk_size == info->new_chunk &&
2900 reshape->before.layout == reshape->after.layout &&
2901 st->ss->external == 0) {
2902 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2903 array.raid_disks = reshape->after.data_disks + reshape->parity;
2904 if (!restart && md_set_array_info(fd, &array) != 0) {
2905 int err = errno;
2906
2907 pr_err("Cannot set device shape for %s: %s\n",
2908 devname, strerror(errno));
2909
2910 if (err == EBUSY &&
2911 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2912 cont_err("Bitmap must be removed before shape can be changed\n");
2913
2914 goto release;
2915 }
2916 } else if (!restart) {
2917 /* set them all just in case some old 'new_*' value
2918 * persists from some earlier problem.
2919 */
2920 int err = 0;
2921 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2922 err = errno;
2923 if (!err && sysfs_set_num(sra, NULL, "layout",
2924 reshape->after.layout) < 0)
2925 err = errno;
2926 if (!err && subarray_set_num(container, sra, "raid_disks",
2927 reshape->after.data_disks +
2928 reshape->parity) < 0)
2929 err = errno;
2930 if (err) {
2931 pr_err("Cannot set device shape for %s\n", devname);
2932
2933 if (err == EBUSY &&
2934 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2935 cont_err("Bitmap must be removed before shape can be changed\n");
2936 goto release;
2937 }
2938 }
2939 return 0;
2940 release:
2941 return -1;
2942 }
2943
2944 static int impose_level(int fd, int level, char *devname, int verbose)
2945 {
2946 char *c;
2947 struct mdu_array_info_s array;
2948 struct mdinfo info;
2949
2950 if (sysfs_init(&info, fd, NULL)) {
2951 pr_err("failed to initialize sysfs.\n");
2952 return 1;
2953 }
2954
2955 md_get_array_info(fd, &array);
2956 if (level == 0 && is_level456(array.level)) {
2957 /* To convert to RAID0 we need to fail and
2958 * remove any non-data devices. */
2959 int found = 0;
2960 int d;
2961 int data_disks = array.raid_disks - 1;
2962 if (array.level == 6)
2963 data_disks -= 1;
2964 if (array.level == 5 && array.layout != ALGORITHM_PARITY_N)
2965 return -1;
2966 if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6)
2967 return -1;
2968 sysfs_set_str(&info, NULL,"sync_action", "idle");
2969 /* First remove any spares so no recovery starts */
2970 for (d = 0, found = 0;
2971 d < MAX_DISKS && found < array.nr_disks; d++) {
2972 mdu_disk_info_t disk;
2973 disk.number = d;
2974 if (md_get_disk_info(fd, &disk) < 0)
2975 continue;
2976 if (disk.major == 0 && disk.minor == 0)
2977 continue;
2978 found++;
2979 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2980 disk.raid_disk < data_disks)
2981 /* keep this */
2982 continue;
2983 ioctl(fd, HOT_REMOVE_DISK,
2984 makedev(disk.major, disk.minor));
2985 }
2986 /* Now fail anything left */
2987 md_get_array_info(fd, &array);
2988 for (d = 0, found = 0;
2989 d < MAX_DISKS && found < array.nr_disks; d++) {
2990 mdu_disk_info_t disk;
2991 disk.number = d;
2992 if (md_get_disk_info(fd, &disk) < 0)
2993 continue;
2994 if (disk.major == 0 && disk.minor == 0)
2995 continue;
2996 found++;
2997 if ((disk.state & (1 << MD_DISK_ACTIVE)) &&
2998 disk.raid_disk < data_disks)
2999 /* keep this */
3000 continue;
3001 ioctl(fd, SET_DISK_FAULTY,
3002 makedev(disk.major, disk.minor));
3003 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
3004 }
3005 }
3006 c = map_num(pers, level);
3007 if (c) {
3008 int err = sysfs_set_str(&info, NULL, "level", c);
3009 if (err) {
3010 err = errno;
3011 pr_err("%s: could not set level to %s\n",
3012 devname, c);
3013 if (err == EBUSY &&
3014 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
3015 cont_err("Bitmap must be removed before level can be changed\n");
3016 return err;
3017 }
3018 if (verbose >= 0)
3019 pr_err("level of %s changed to %s\n", devname, c);
3020 }
3021 return 0;
3022 }
3023
3024 int sigterm = 0;
3025 static void catch_term(int sig)
3026 {
3027 sigterm = 1;
3028 }
3029
3030 static int reshape_array(char *container, int fd, char *devname,
3031 struct supertype *st, struct mdinfo *info,
3032 int force, struct mddev_dev *devlist,
3033 unsigned long long data_offset,
3034 char *backup_file, int verbose, int forked,
3035 int restart, int freeze_reshape)
3036 {
3037 struct reshape reshape;
3038 int spares_needed;
3039 char *msg;
3040 int orig_level = UnSet;
3041 int odisks;
3042 int delayed;
3043
3044 struct mdu_array_info_s array;
3045 char *c;
3046
3047 struct mddev_dev *dv;
3048 int added_disks;
3049
3050 int *fdlist = NULL;
3051 unsigned long long *offsets = NULL;
3052 int d;
3053 int nrdisks;
3054 int err;
3055 unsigned long blocks;
3056 unsigned long long array_size;
3057 int done;
3058 struct mdinfo *sra = NULL;
3059 char buf[20];
3060
3061 /* when reshaping a RAID0, the component_size might be zero.
3062 * So try to fix that up.
3063 */
3064 if (md_get_array_info(fd, &array) != 0) {
3065 dprintf("Cannot get array information.\n");
3066 goto release;
3067 }
3068 if (array.level == 0 && info->component_size == 0) {
3069 get_dev_size(fd, NULL, &array_size);
3070 info->component_size = array_size / array.raid_disks;
3071 }
3072
3073 if (array.level == 10)
3074 /* Need space_after info */
3075 get_space_after(fd, st, info);
3076
3077 if (info->reshape_active) {
3078 int new_level = info->new_level;
3079 info->new_level = UnSet;
3080 if (info->delta_disks > 0)
3081 info->array.raid_disks -= info->delta_disks;
3082 msg = analyse_change(devname, info, &reshape);
3083 info->new_level = new_level;
3084 if (info->delta_disks > 0)
3085 info->array.raid_disks += info->delta_disks;
3086 if (!restart)
3087 /* Make sure the array isn't read-only */
3088 ioctl(fd, RESTART_ARRAY_RW, 0);
3089 } else
3090 msg = analyse_change(devname, info, &reshape);
3091 if (msg) {
3092 /* if msg == "", error has already been printed */
3093 if (msg[0])
3094 pr_err("%s\n", msg);
3095 goto release;
3096 }
3097 if (restart && (reshape.level != info->array.level ||
3098 reshape.before.layout != info->array.layout ||
3099 reshape.before.data_disks + reshape.parity !=
3100 info->array.raid_disks - max(0, info->delta_disks))) {
3101 pr_err("reshape info is not in native format - cannot continue.\n");
3102 goto release;
3103 }
3104
3105 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3106 !((sysfs_get_str(info, NULL, "sync_action",
3107 buf, sizeof(buf)) > 0) &&
3108 (strncmp(buf, "reshape", 7) == 0))) {
3109 /* When reshape is restarted from '0', very begin of array
3110 * it is possible that for external metadata reshape and array
3111 * configuration doesn't happen.
3112 * Check if md has the same opinion, and reshape is restarted
3113 * from 0. If so, this is regular reshape start after reshape
3114 * switch in metadata to next array only.
3115 */
3116 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3117 (info->reshape_progress == 0))
3118 restart = 0;
3119 }
3120 if (restart) {
3121 /*
3122 * reshape already started. just skip to monitoring
3123 * the reshape
3124 */
3125 if (reshape.backup_blocks == 0)
3126 return 0;
3127 if (restart & RESHAPE_NO_BACKUP)
3128 return 0;
3129
3130 /* Need 'sra' down at 'started:' */
3131 sra = sysfs_read(fd, NULL,
3132 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|
3133 GET_CHUNK|GET_CACHE);
3134 if (!sra) {
3135 pr_err("%s: Cannot get array details from sysfs\n",
3136 devname);
3137 goto release;
3138 }
3139
3140 if (!backup_file)
3141 backup_file = locate_backup(sra->sys_name);
3142
3143 goto started;
3144 }
3145 /* The container is frozen but the array may not be.
3146 * So freeze the array so spares don't get put to the wrong use
3147 * FIXME there should probably be a cleaner separation between
3148 * freeze_array and freeze_container.
3149 */
3150 sysfs_freeze_array(info);
3151 /* Check we have enough spares to not be degraded */
3152 added_disks = 0;
3153 for (dv = devlist; dv ; dv=dv->next)
3154 added_disks++;
3155 spares_needed = max(reshape.before.data_disks,
3156 reshape.after.data_disks) +
3157 reshape.parity - array.raid_disks;
3158
3159 if (!force && info->new_level > 1 && info->array.level > 1 &&
3160 spares_needed > info->array.spare_disks + added_disks) {
3161 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3162 " Use --force to over-ride this check.\n",
3163 spares_needed,
3164 spares_needed == 1 ? "" : "s",
3165 info->array.spare_disks + added_disks);
3166 goto release;
3167 }
3168 /* Check we have enough spares to not fail */
3169 spares_needed = max(reshape.before.data_disks,
3170 reshape.after.data_disks)
3171 - array.raid_disks;
3172 if ((info->new_level > 1 || info->new_level == 0) &&
3173 spares_needed > info->array.spare_disks +added_disks) {
3174 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3175 spares_needed, spares_needed == 1 ? "" : "s",
3176 info->array.spare_disks + added_disks);
3177 goto release;
3178 }
3179
3180 if (reshape.level != array.level) {
3181 int err = impose_level(fd, reshape.level, devname, verbose);
3182 if (err)
3183 goto release;
3184 info->new_layout = UnSet; /* after level change,
3185 * layout is meaningless */
3186 orig_level = array.level;
3187 sysfs_freeze_array(info);
3188
3189 if (reshape.level > 0 && st->ss->external) {
3190 /* make sure mdmon is aware of the new level */
3191 if (mdmon_running(container))
3192 flush_mdmon(container);
3193
3194 if (!mdmon_running(container))
3195 start_mdmon(container);
3196 ping_monitor(container);
3197 if (mdmon_running(container) && st->update_tail == NULL)
3198 st->update_tail = &st->updates;
3199 }
3200 }
3201 /* ->reshape_super might have chosen some spares from the
3202 * container that it wants to be part of the new array.
3203 * We can collect them with ->container_content and give
3204 * them to the kernel.
3205 */
3206 if (st->ss->reshape_super && st->ss->container_content) {
3207 char *subarray = strchr(info->text_version+1, '/')+1;
3208 struct mdinfo *info2 =
3209 st->ss->container_content(st, subarray);
3210 struct mdinfo *d;
3211
3212 if (info2) {
3213 if (sysfs_init(info2, fd, st->devnm)) {
3214 pr_err("unable to initialize sysfs for %s\n",
3215 st->devnm);
3216 free(info2);
3217 goto release;
3218 }
3219 /* When increasing number of devices, we need to set
3220 * new raid_disks before adding these, or they might
3221 * be rejected.
3222 */
3223 if (reshape.backup_blocks &&
3224 reshape.after.data_disks >
3225 reshape.before.data_disks)
3226 subarray_set_num(container, info2, "raid_disks",
3227 reshape.after.data_disks +
3228 reshape.parity);
3229 for (d = info2->devs; d; d = d->next) {
3230 if (d->disk.state == 0 &&
3231 d->disk.raid_disk >= 0) {
3232 /* This is a spare that wants to
3233 * be part of the array.
3234 */
3235 add_disk(fd, st, info2, d);
3236 }
3237 }
3238 sysfs_free(info2);
3239 }
3240 }
3241 /* We might have been given some devices to add to the
3242 * array. Now that the array has been changed to the right
3243 * level and frozen, we can safely add them.
3244 */
3245 if (devlist) {
3246 if (Manage_subdevs(devname, fd, devlist, verbose, 0, UOPT_UNDEFINED, 0))
3247 goto release;
3248 }
3249
3250 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3251 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3252 if (reshape.backup_blocks == 0) {
3253 /* No restriping needed, but we might need to impose
3254 * some more changes: layout, raid_disks, chunk_size
3255 */
3256 /* read current array info */
3257 if (md_get_array_info(fd, &array) != 0) {
3258 dprintf("Cannot get array information.\n");
3259 goto release;
3260 }
3261 /* compare current array info with new values and if
3262 * it is different update them to new */
3263 if (info->new_layout != UnSet &&
3264 info->new_layout != array.layout) {
3265 array.layout = info->new_layout;
3266 if (md_set_array_info(fd, &array) != 0) {
3267 pr_err("failed to set new layout\n");
3268 goto release;
3269 } else if (verbose >= 0)
3270 printf("layout for %s set to %d\n",
3271 devname, array.layout);
3272 }
3273 if (info->delta_disks != UnSet && info->delta_disks != 0 &&
3274 array.raid_disks !=
3275 (info->array.raid_disks + info->delta_disks)) {
3276 array.raid_disks += info->delta_disks;
3277 if (md_set_array_info(fd, &array) != 0) {
3278 pr_err("failed to set raid disks\n");
3279 goto release;
3280 } else if (verbose >= 0) {
3281 printf("raid_disks for %s set to %d\n",
3282 devname, array.raid_disks);
3283 }
3284 }
3285 if (info->new_chunk != 0 &&
3286 info->new_chunk != array.chunk_size) {
3287 if (sysfs_set_num(info, NULL,
3288 "chunk_size", info->new_chunk) != 0) {
3289 pr_err("failed to set chunk size\n");
3290 goto release;
3291 } else if (verbose >= 0)
3292 printf("chunk size for %s set to %d\n",
3293 devname, info->new_chunk);
3294 }
3295 unfreeze(st);
3296 return 0;
3297 }
3298
3299 /*
3300 * There are three possibilities.
3301 * 1/ The array will shrink.
3302 * We need to ensure the reshape will pause before reaching
3303 * the 'critical section'. We also need to fork and wait for
3304 * that to happen. When it does we
3305 * suspend/backup/complete/unfreeze
3306 *
3307 * 2/ The array will not change size.
3308 * This requires that we keep a backup of a sliding window
3309 * so that we can restore data after a crash. So we need
3310 * to fork and monitor progress.
3311 * In future we will allow the data_offset to change, so
3312 * a sliding backup becomes unnecessary.
3313 *
3314 * 3/ The array will grow. This is relatively easy.
3315 * However the kernel's restripe routines will cheerfully
3316 * overwrite some early data before it is safe. So we
3317 * need to make a backup of the early parts of the array
3318 * and be ready to restore it if rebuild aborts very early.
3319 * For externally managed metadata, we still need a forked
3320 * child to monitor the reshape and suspend IO over the region
3321 * that is being reshaped.
3322 *
3323 * We backup data by writing it to one spare, or to a
3324 * file which was given on command line.
3325 *
3326 * In each case, we first make sure that storage is available
3327 * for the required backup.
3328 * Then we:
3329 * - request the shape change.
3330 * - fork to handle backup etc.
3331 */
3332 /* Check that we can hold all the data */
3333 get_dev_size(fd, NULL, &array_size);
3334 if (reshape.new_size < (array_size/512)) {
3335 pr_err("this change will reduce the size of the array.\n"
3336 " use --grow --array-size first to truncate array.\n"
3337 " e.g. mdadm --grow %s --array-size %llu\n",
3338 devname, reshape.new_size/2);
3339 goto release;
3340 }
3341
3342 if (array.level == 10) {
3343 /* Reshaping RAID10 does not require any data backup by
3344 * user-space. Instead it requires that the data_offset
3345 * is changed to avoid the need for backup.
3346 * So this is handled very separately
3347 */
3348 if (restart)
3349 /* Nothing to do. */
3350 return 0;
3351 return raid10_reshape(container, fd, devname, st, info,
3352 &reshape, data_offset, force, verbose);
3353 }
3354 sra = sysfs_read(fd, NULL,
3355 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3356 GET_CACHE);
3357 if (!sra) {
3358 pr_err("%s: Cannot get array details from sysfs\n",
3359 devname);
3360 goto release;
3361 }
3362
3363 if (!backup_file)
3364 switch(set_new_data_offset(sra, st, devname,
3365 reshape.after.data_disks - reshape.before.data_disks,
3366 data_offset,
3367 reshape.min_offset_change, 1)) {
3368 case -1:
3369 goto release;
3370 case 0:
3371 /* Updated data_offset, so it's easy now */
3372 update_cache_size(container, sra, info,
3373 min(reshape.before.data_disks,
3374 reshape.after.data_disks),
3375 reshape.backup_blocks);
3376
3377 /* Right, everything seems fine. Let's kick things off.
3378 */
3379 sync_metadata(st);
3380
3381 if (impose_reshape(sra, info, st, fd, restart,
3382 devname, container, &reshape) < 0)
3383 goto release;
3384 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3385 struct mdinfo *sd;
3386 if (errno != EINVAL) {
3387 pr_err("Failed to initiate reshape!\n");
3388 goto release;
3389 }
3390 /* revert data_offset and try the old way */
3391 for (sd = sra->devs; sd; sd = sd->next) {
3392 sysfs_set_num(sra, sd, "new_offset",
3393 sd->data_offset);
3394 sysfs_set_str(sra, NULL, "reshape_direction",
3395 "forwards");
3396 }
3397 break;
3398 }
3399 if (info->new_level == reshape.level)
3400 return 0;
3401 /* need to adjust level when reshape completes */
3402 switch(fork()) {
3403 case -1: /* ignore error, but don't wait */
3404 return 0;
3405 default: /* parent */
3406 return 0;
3407 case 0:
3408 manage_fork_fds(0);
3409 map_fork();
3410 break;
3411 }
3412 close(fd);
3413 wait_reshape(sra);
3414 fd = open_dev(sra->sys_name);
3415 if (fd >= 0)
3416 impose_level(fd, info->new_level, devname, verbose);
3417 return 0;
3418 case 1: /* Couldn't set data_offset, try the old way */
3419 if (data_offset != INVALID_SECTORS) {
3420 pr_err("Cannot update data_offset on this array\n");
3421 goto release;
3422 }
3423 break;
3424 }
3425
3426 started:
3427 /* Decide how many blocks (sectors) for a reshape
3428 * unit. The number we have so far is just a minimum
3429 */
3430 blocks = reshape.backup_blocks;
3431 if (reshape.before.data_disks ==
3432 reshape.after.data_disks) {
3433 /* Make 'blocks' bigger for better throughput, but
3434 * not so big that we reject it below.
3435 * Try for 16 megabytes
3436 */
3437 while (blocks * 32 < sra->component_size && blocks < 16*1024*2)
3438 blocks *= 2;
3439 } else
3440 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3441
3442 if (blocks >= sra->component_size/2) {
3443 pr_err("%s: Something wrong - reshape aborted\n", devname);
3444 goto release;
3445 }
3446
3447 /* Now we need to open all these devices so we can read/write.
3448 */
3449 nrdisks = max(reshape.before.data_disks,
3450 reshape.after.data_disks) + reshape.parity
3451 + sra->array.spare_disks;
3452 fdlist = xcalloc((1+nrdisks), sizeof(int));
3453 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3454
3455 odisks = reshape.before.data_disks + reshape.parity;
3456 d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks,
3457 backup_file, fdlist, offsets);
3458 if (d < odisks) {
3459 goto release;
3460 }
3461 if ((st->ss->manage_reshape == NULL) ||
3462 (st->ss->recover_backup == NULL)) {
3463 if (backup_file == NULL) {
3464 if (reshape.after.data_disks <=
3465 reshape.before.data_disks) {
3466 pr_err("%s: Cannot grow - need backup-file\n",
3467 devname);
3468 pr_err(" Please provide one with \"--backup=...\"\n");
3469 goto release;
3470 } else if (d == odisks) {
3471 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3472 goto release;
3473 }
3474 } else {
3475 if (!reshape_open_backup_file(backup_file, fd, devname,
3476 (signed)blocks,
3477 fdlist+d, offsets+d,
3478 sra->sys_name, restart)) {
3479 goto release;
3480 }
3481 d++;
3482 }
3483 }
3484
3485 update_cache_size(container, sra, info,
3486 min(reshape.before.data_disks,
3487 reshape.after.data_disks), blocks);
3488
3489 /* Right, everything seems fine. Let's kick things off.
3490 * If only changing raid_disks, use ioctl, else use
3491 * sysfs.
3492 */
3493 sync_metadata(st);
3494
3495 if (impose_reshape(sra, info, st, fd, restart,
3496 devname, container, &reshape) < 0)
3497 goto release;
3498
3499 err = start_reshape(sra, restart, reshape.before.data_disks,
3500 reshape.after.data_disks, st);
3501 if (err) {
3502 pr_err("Cannot %s reshape for %s\n",
3503 restart ? "continue" : "start", devname);
3504 goto release;
3505 }
3506 if (restart)
3507 sysfs_set_str(sra, NULL, "array_state", "active");
3508 if (freeze_reshape) {
3509 free(fdlist);
3510 free(offsets);
3511 sysfs_free(sra);
3512 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3513 sra->reshape_progress);
3514 return 1;
3515 }
3516
3517 if (!forked)
3518 if (continue_via_systemd(container ?: sra->sys_name,
3519 GROW_SERVICE)) {
3520 free(fdlist);
3521 free(offsets);
3522 sysfs_free(sra);
3523 return 0;
3524 }
3525
3526 /* Now we just need to kick off the reshape and watch, while
3527 * handling backups of the data...
3528 * This is all done by a forked background process.
3529 */
3530 switch(forked ? 0 : fork()) {
3531 case -1:
3532 pr_err("Cannot run child to monitor reshape: %s\n",
3533 strerror(errno));
3534 abort_reshape(sra);
3535 goto release;
3536 default:
3537 free(fdlist);
3538 free(offsets);
3539 sysfs_free(sra);
3540 return 0;
3541 case 0:
3542 map_fork();
3543 break;
3544 }
3545
3546 /* Close unused file descriptor in the forked process */
3547 close_fd(&fd);
3548
3549 /* If another array on the same devices is busy, the
3550 * reshape will wait for them. This would mean that
3551 * the first section that we suspend will stay suspended
3552 * for a long time. So check on that possibility
3553 * by looking for "DELAYED" in /proc/mdstat, and if found,
3554 * wait a while
3555 */
3556 do {
3557 struct mdstat_ent *mds, *m;
3558 delayed = 0;
3559 mds = mdstat_read(1, 0);
3560 for (m = mds; m; m = m->next)
3561 if (strcmp(m->devnm, sra->sys_name) == 0) {
3562 if (m->resync && m->percent == RESYNC_DELAYED)
3563 delayed = 1;
3564 if (m->resync == 0)
3565 /* Haven't started the reshape thread
3566 * yet, wait a bit
3567 */
3568 delayed = 2;
3569 break;
3570 }
3571 free_mdstat(mds);
3572 if (delayed == 1 && get_linux_version() < 3007000) {
3573 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3574 " You might experience problems until other reshapes complete.\n");
3575 delayed = 0;
3576 }
3577 if (delayed)
3578 mdstat_wait(30 - (delayed-1) * 25);
3579 } while (delayed);
3580 mdstat_close();
3581 if (check_env("MDADM_GROW_VERIFY"))
3582 fd = open(devname, O_RDONLY | O_DIRECT);
3583 else
3584 fd = -1;
3585 mlockall(MCL_FUTURE);
3586
3587 if (signal_s(SIGTERM, catch_term) == SIG_ERR)
3588 goto release;
3589
3590 if (st->ss->external) {
3591 /* metadata handler takes it from here */
3592 done = st->ss->manage_reshape(
3593 fd, sra, &reshape, st, blocks,
3594 fdlist, offsets, d - odisks, fdlist + odisks,
3595 offsets + odisks);
3596 } else
3597 done = child_monitor(
3598 fd, sra, &reshape, st, blocks, fdlist, offsets,
3599 d - odisks, fdlist + odisks, offsets + odisks);
3600
3601 free(fdlist);
3602 free(offsets);
3603
3604 if (backup_file && done) {
3605 char *bul;
3606 bul = make_backup(sra->sys_name);
3607 if (bul) {
3608 char buf[1024];
3609 int l = readlink(bul, buf, sizeof(buf) - 1);
3610 if (l > 0) {
3611 buf[l]=0;
3612 unlink(buf);
3613 }
3614 unlink(bul);
3615 free(bul);
3616 }
3617 unlink(backup_file);
3618 }
3619 if (!done) {
3620 abort_reshape(sra);
3621 goto out;
3622 }
3623
3624 if (!st->ss->external &&
3625 !(reshape.before.data_disks != reshape.after.data_disks &&
3626 info->custom_array_size) && info->new_level == reshape.level &&
3627 !forked) {
3628 /* no need to wait for the reshape to finish as
3629 * there is nothing more to do.
3630 */
3631 sysfs_free(sra);
3632 exit(0);
3633 }
3634 wait_reshape(sra);
3635
3636 if (st->ss->external) {
3637 /* Re-load the metadata as much could have changed */
3638 int cfd = open_dev(st->container_devnm);
3639 if (cfd >= 0) {
3640 flush_mdmon(container);
3641 st->ss->free_super(st);
3642 st->ss->load_container(st, cfd, container);
3643 close(cfd);
3644 }
3645 }
3646
3647 /* set new array size if required customer_array_size is used
3648 * by this metadata.
3649 */
3650 if (reshape.before.data_disks != reshape.after.data_disks &&
3651 info->custom_array_size)
3652 set_array_size(st, info, info->text_version);
3653
3654 if (info->new_level != reshape.level) {
3655 if (fd < 0)
3656 fd = open(devname, O_RDONLY);
3657 impose_level(fd, info->new_level, devname, verbose);
3658 close(fd);
3659 if (info->new_level == 0)
3660 st->update_tail = NULL;
3661 }
3662 out:
3663 sysfs_free(sra);
3664 if (forked)
3665 return 0;
3666 unfreeze(st);
3667 exit(0);
3668
3669 release:
3670 free(fdlist);
3671 free(offsets);
3672 if (orig_level != UnSet && sra) {
3673 c = map_num(pers, orig_level);
3674 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3675 pr_err("aborting level change\n");
3676 }
3677 sysfs_free(sra);
3678 if (!forked)
3679 unfreeze(st);
3680 return 1;
3681 }
3682
3683 /* mdfd handle is passed to be closed in child process (after fork).
3684 */
3685 int reshape_container(char *container, char *devname,
3686 int mdfd,
3687 struct supertype *st,
3688 struct mdinfo *info,
3689 int force,
3690 char *backup_file, int verbose,
3691 int forked, int restart, int freeze_reshape)
3692 {
3693 struct mdinfo *cc = NULL;
3694 int rv = restart;
3695 char last_devnm[32] = "";
3696
3697 /* component_size is not meaningful for a container,
3698 * so pass '0' meaning 'no change'
3699 */
3700 if (!restart &&
3701 reshape_super(st, 0, info->new_level,
3702 info->new_layout, info->new_chunk,
3703 info->array.raid_disks, info->delta_disks,
3704 backup_file, devname, APPLY_METADATA_CHANGES,
3705 verbose)) {
3706 unfreeze(st);
3707 return 1;
3708 }
3709
3710 sync_metadata(st);
3711
3712 /* ping monitor to be sure that update is on disk
3713 */
3714 ping_monitor(container);
3715
3716 if (!forked && !freeze_reshape)
3717 if (continue_via_systemd(container, GROW_SERVICE))
3718 return 0;
3719
3720 switch (forked ? 0 : fork()) {
3721 case -1: /* error */
3722 perror("Cannot fork to complete reshape\n");
3723 unfreeze(st);
3724 return 1;
3725 default: /* parent */
3726 if (!freeze_reshape)
3727 printf("%s: multi-array reshape continues in background\n", Name);
3728 return 0;
3729 case 0: /* child */
3730 manage_fork_fds(0);
3731 map_fork();
3732 break;
3733 }
3734
3735 /* close unused handle in child process
3736 */
3737 if (mdfd > -1)
3738 close(mdfd);
3739
3740 while(1) {
3741 /* For each member array with reshape_active,
3742 * we need to perform the reshape.
3743 * We pick the first array that needs reshaping and
3744 * reshape it. reshape_array() will re-read the metadata
3745 * so the next time through a different array should be
3746 * ready for reshape.
3747 * It is possible that the 'different' array will not
3748 * be assembled yet. In that case we simple exit.
3749 * When it is assembled, the mdadm which assembles it
3750 * will take over the reshape.
3751 */
3752 struct mdinfo *content;
3753 int fd;
3754 struct mdstat_ent *mdstat;
3755 char *adev;
3756 dev_t devid;
3757
3758 sysfs_free(cc);
3759
3760 cc = st->ss->container_content(st, NULL);
3761
3762 for (content = cc; content ; content = content->next) {
3763 char *subarray;
3764 if (!content->reshape_active)
3765 continue;
3766
3767 subarray = strchr(content->text_version+1, '/')+1;
3768 mdstat = mdstat_by_subdev(subarray, container);
3769 if (!mdstat)
3770 continue;
3771 if (mdstat->active == 0) {
3772 pr_err("Skipping inactive array %s.\n",
3773 mdstat->devnm);
3774 free_mdstat(mdstat);
3775 mdstat = NULL;
3776 continue;
3777 }
3778 break;
3779 }
3780 if (!content)
3781 break;
3782
3783 devid = devnm2devid(mdstat->devnm);
3784 adev = map_dev(major(devid), minor(devid), 0);
3785 if (!adev)
3786 adev = content->text_version;
3787
3788 fd = open_dev(mdstat->devnm);
3789 if (fd < 0) {
3790 pr_err("Device %s cannot be opened for reshape.\n",
3791 adev);
3792 break;
3793 }
3794
3795 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3796 /* Do not allow for multiple reshape_array() calls for
3797 * the same array.
3798 * It can happen when reshape_array() returns without
3799 * error, when reshape is not finished (wrong reshape
3800 * starting/continuation conditions). Mdmon doesn't
3801 * switch to next array in container and reentry
3802 * conditions for the same array occur.
3803 * This is possibly interim until the behaviour of
3804 * reshape_array is resolved().
3805 */
3806 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3807 close(fd);
3808 break;
3809 }
3810 strcpy(last_devnm, mdstat->devnm);
3811
3812 if (sysfs_init(content, fd, mdstat->devnm)) {
3813 pr_err("Unable to initialize sysfs for %s\n",
3814 mdstat->devnm);
3815 rv = 1;
3816 break;
3817 }
3818
3819 if (mdmon_running(container))
3820 flush_mdmon(container);
3821
3822 rv = reshape_array(container, fd, adev, st,
3823 content, force, NULL, INVALID_SECTORS,
3824 backup_file, verbose, 1, restart,
3825 freeze_reshape);
3826 close(fd);
3827
3828 if (freeze_reshape) {
3829 sysfs_free(cc);
3830 exit(0);
3831 }
3832
3833 restart = 0;
3834 if (rv)
3835 break;
3836
3837 if (mdmon_running(container))
3838 flush_mdmon(container);
3839 }
3840 if (!rv)
3841 unfreeze(st);
3842 sysfs_free(cc);
3843 exit(0);
3844 }
3845
3846 /*
3847 * We run a child process in the background which performs the following
3848 * steps:
3849 * - wait for resync to reach a certain point
3850 * - suspend io to the following section
3851 * - backup that section
3852 * - allow resync to proceed further
3853 * - resume io
3854 * - discard the backup.
3855 *
3856 * When are combined in slightly different ways in the three cases.
3857 * Grow:
3858 * - suspend/backup/allow/wait/resume/discard
3859 * Shrink:
3860 * - allow/wait/suspend/backup/allow/wait/resume/discard
3861 * same-size:
3862 * - wait/resume/discard/suspend/backup/allow
3863 *
3864 * suspend/backup/allow always come together
3865 * wait/resume/discard do too.
3866 * For the same-size case we have two backups to improve flow.
3867 *
3868 */
3869
3870 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3871 unsigned long long backup_point,
3872 unsigned long long wait_point,
3873 unsigned long long *suspend_point,
3874 unsigned long long *reshape_completed, int *frozen)
3875 {
3876 /* This function is called repeatedly by the reshape manager.
3877 * It determines how much progress can safely be made and allows
3878 * that progress.
3879 * - 'info' identifies the array and particularly records in
3880 * ->reshape_progress the metadata's knowledge of progress
3881 * This is a sector offset from the start of the array
3882 * of the next array block to be relocated. This number
3883 * may increase from 0 or decrease from array_size, depending
3884 * on the type of reshape that is happening.
3885 * Note that in contrast, 'sync_completed' is a block count of the
3886 * reshape so far. It gives the distance between the start point
3887 * (head or tail of device) and the next place that data will be
3888 * written. It always increases.
3889 * - 'reshape' is the structure created by analyse_change
3890 * - 'backup_point' shows how much the metadata manager has backed-up
3891 * data. For reshapes with increasing progress, it is the next address
3892 * to be backed up, previous addresses have been backed-up. For
3893 * decreasing progress, it is the earliest address that has been
3894 * backed up - later address are also backed up.
3895 * So addresses between reshape_progress and backup_point are
3896 * backed up providing those are in the 'correct' order.
3897 * - 'wait_point' is an array address. When reshape_completed
3898 * passes this point, progress_reshape should return. It might
3899 * return earlier if it determines that ->reshape_progress needs
3900 * to be updated or further backup is needed.
3901 * - suspend_point is maintained by progress_reshape and the caller
3902 * should not touch it except to initialise to zero.
3903 * It is an array address and it only increases in 2.6.37 and earlier.
3904 * This makes it difficult to handle reducing reshapes with
3905 * external metadata.
3906 * However: it is similar to backup_point in that it records the
3907 * other end of a suspended region from reshape_progress.
3908 * it is moved to extend the region that is safe to backup and/or
3909 * reshape
3910 * - reshape_completed is read from sysfs and returned. The caller
3911 * should copy this into ->reshape_progress when it has reason to
3912 * believe that the metadata knows this, and any backup outside this
3913 * has been erased.
3914 *
3915 * Return value is:
3916 * 1 if more data from backup_point - but only as far as suspend_point,
3917 * should be backed up
3918 * 0 if things are progressing smoothly
3919 * -1 if the reshape is finished because it is all done,
3920 * -2 if the reshape is finished due to an error.
3921 */
3922
3923 int advancing = (reshape->after.data_disks
3924 >= reshape->before.data_disks);
3925 unsigned long long need_backup; /* All data between start of array and
3926 * here will at some point need to
3927 * be backed up.
3928 */
3929 unsigned long long read_offset, write_offset;
3930 unsigned long long write_range;
3931 unsigned long long max_progress, target, completed;
3932 unsigned long long array_size = (info->component_size
3933 * reshape->before.data_disks);
3934 int fd;
3935 char buf[20];
3936
3937 /* First, we unsuspend any region that is now known to be safe.
3938 * If suspend_point is on the 'wrong' side of reshape_progress, then
3939 * we don't have or need suspension at the moment. This is true for
3940 * native metadata when we don't need to back-up.
3941 */
3942 if (advancing) {
3943 if (info->reshape_progress <= *suspend_point)
3944 sysfs_set_num(info, NULL, "suspend_lo",
3945 info->reshape_progress);
3946 } else {
3947 /* Note: this won't work in 2.6.37 and before.
3948 * Something somewhere should make sure we don't need it!
3949 */
3950 if (info->reshape_progress >= *suspend_point)
3951 sysfs_set_num(info, NULL, "suspend_hi",
3952 info->reshape_progress);
3953 }
3954
3955 /* Now work out how far it is safe to progress.
3956 * If the read_offset for ->reshape_progress is less than
3957 * 'blocks' beyond the write_offset, we can only progress as far
3958 * as a backup.
3959 * Otherwise we can progress until the write_offset for the new location
3960 * reaches (within 'blocks' of) the read_offset at the current location.
3961 * However that region must be suspended unless we are using native
3962 * metadata.
3963 * If we need to suspend more, we limit it to 128M per device, which is
3964 * rather arbitrary and should be some time-based calculation.
3965 */
3966 read_offset = info->reshape_progress / reshape->before.data_disks;
3967 write_offset = info->reshape_progress / reshape->after.data_disks;
3968 write_range = info->new_chunk/512;
3969 if (reshape->before.data_disks == reshape->after.data_disks)
3970 need_backup = array_size;
3971 else
3972 need_backup = reshape->backup_blocks;
3973 if (advancing) {
3974 if (read_offset < write_offset + write_range)
3975 max_progress = backup_point;
3976 else
3977 max_progress =
3978 read_offset * reshape->after.data_disks;
3979 } else {
3980 if (read_offset > write_offset - write_range)
3981 /* Can only progress as far as has been backed up,
3982 * which must be suspended */
3983 max_progress = backup_point;
3984 else if (info->reshape_progress <= need_backup)
3985 max_progress = backup_point;
3986 else {
3987 if (info->array.major_version >= 0)
3988 /* Can progress until backup is needed */
3989 max_progress = need_backup;
3990 else {
3991 /* Can progress until metadata update is required */
3992 max_progress =
3993 read_offset * reshape->after.data_disks;
3994 /* but data must be suspended */
3995 if (max_progress < *suspend_point)
3996 max_progress = *suspend_point;
3997 }
3998 }
3999 }
4000
4001 /* We know it is safe to progress to 'max_progress' providing
4002 * it is suspended or we are using native metadata.
4003 * Consider extending suspend_point 128M per device if it
4004 * is less than 64M per device beyond reshape_progress.
4005 * But always do a multiple of 'blocks'
4006 * FIXME this is too big - it takes to long to complete
4007 * this much.
4008 */
4009 target = 64*1024*2 * min(reshape->before.data_disks,
4010 reshape->after.data_disks);
4011 target /= reshape->backup_blocks;
4012 if (target < 2)
4013 target = 2;
4014 target *= reshape->backup_blocks;
4015
4016 /* For externally managed metadata we always need to suspend IO to
4017 * the area being reshaped so we regularly push suspend_point forward.
4018 * For native metadata we only need the suspend if we are going to do
4019 * a backup.
4020 */
4021 if (advancing) {
4022 if ((need_backup > info->reshape_progress ||
4023 info->array.major_version < 0) &&
4024 *suspend_point < info->reshape_progress + target) {
4025 if (need_backup < *suspend_point + 2 * target)
4026 *suspend_point = need_backup;
4027 else if (*suspend_point + 2 * target < array_size)
4028 *suspend_point += 2 * target;
4029 else
4030 *suspend_point = array_size;
4031 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4032 if (max_progress > *suspend_point)
4033 max_progress = *suspend_point;
4034 }
4035 } else {
4036 if (info->array.major_version >= 0) {
4037 /* Only need to suspend when about to backup */
4038 if (info->reshape_progress < need_backup * 2 &&
4039 *suspend_point > 0) {
4040 *suspend_point = 0;
4041 sysfs_set_num(info, NULL, "suspend_lo", 0);
4042 sysfs_set_num(info, NULL, "suspend_hi",
4043 need_backup);
4044 }
4045 } else {
4046 /* Need to suspend continually */
4047 if (info->reshape_progress < *suspend_point)
4048 *suspend_point = info->reshape_progress;
4049 if (*suspend_point + target < info->reshape_progress)
4050 /* No need to move suspend region yet */;
4051 else {
4052 if (*suspend_point >= 2 * target)
4053 *suspend_point -= 2 * target;
4054 else
4055 *suspend_point = 0;
4056 sysfs_set_num(info, NULL, "suspend_lo",
4057 *suspend_point);
4058 }
4059 if (max_progress < *suspend_point)
4060 max_progress = *suspend_point;
4061 }
4062 }
4063
4064 /* now set sync_max to allow that progress. sync_max, like
4065 * sync_completed is a count of sectors written per device, so
4066 * we find the difference between max_progress and the start point,
4067 * and divide that by after.data_disks to get a sync_max
4068 * number.
4069 * At the same time we convert wait_point to a similar number
4070 * for comparing against sync_completed.
4071 */
4072 /* scale down max_progress to per_disk */
4073 max_progress /= reshape->after.data_disks;
4074 /*
4075 * Round to chunk size as some kernels give an erroneously
4076 * high number
4077 */
4078 max_progress /= info->new_chunk/512;
4079 max_progress *= info->new_chunk/512;
4080 /* And round to old chunk size as the kernel wants that */
4081 max_progress /= info->array.chunk_size/512;
4082 max_progress *= info->array.chunk_size/512;
4083 /* Limit progress to the whole device */
4084 if (max_progress > info->component_size)
4085 max_progress = info->component_size;
4086 wait_point /= reshape->after.data_disks;
4087 if (!advancing) {
4088 /* switch from 'device offset' to 'processed block count' */
4089 max_progress = info->component_size - max_progress;
4090 wait_point = info->component_size - wait_point;
4091 }
4092
4093 if (!*frozen)
4094 sysfs_set_num(info, NULL, "sync_max", max_progress);
4095
4096 /* Now wait. If we have already reached the point that we were
4097 * asked to wait to, don't wait at all, else wait for any change.
4098 * We need to select on 'sync_completed' as that is the place that
4099 * notifications happen, but we are really interested in
4100 * 'reshape_position'
4101 */
4102 fd = sysfs_get_fd(info, NULL, "sync_completed");
4103 if (fd < 0)
4104 goto check_progress;
4105
4106 if (sysfs_fd_get_ll(fd, &completed) < 0)
4107 goto check_progress;
4108
4109 while (completed < max_progress && completed < wait_point) {
4110 /* Check that sync_action is still 'reshape' to avoid
4111 * waiting forever on a dead array
4112 */
4113 char action[20];
4114 if (sysfs_get_str(info, NULL, "sync_action", action, 20) <= 0 ||
4115 strncmp(action, "reshape", 7) != 0)
4116 break;
4117 /* Some kernels reset 'sync_completed' to zero
4118 * before setting 'sync_action' to 'idle'.
4119 * So we need these extra tests.
4120 */
4121 if (completed == 0 && advancing &&
4122 strncmp(action, "idle", 4) == 0 &&
4123 info->reshape_progress > 0)
4124 break;
4125 if (completed == 0 && !advancing &&
4126 strncmp(action, "idle", 4) == 0 &&
4127 info->reshape_progress <
4128 (info->component_size * reshape->after.data_disks))
4129 break;
4130 sysfs_wait(fd, NULL);
4131 if (sysfs_fd_get_ll(fd, &completed) < 0)
4132 goto check_progress;
4133 }
4134 /* Some kernels reset 'sync_completed' to zero,
4135 * we need to have real point we are in md.
4136 * So in that case, read 'reshape_position' from sysfs.
4137 */
4138 if (completed == 0) {
4139 unsigned long long reshapep;
4140 char action[20];
4141 if (sysfs_get_str(info, NULL, "sync_action", action, 20) > 0 &&
4142 strncmp(action, "idle", 4) == 0 &&
4143 sysfs_get_ll(info, NULL,
4144 "reshape_position", &reshapep) == 0)
4145 *reshape_completed = reshapep;
4146 } else {
4147 /* some kernels can give an incorrectly high
4148 * 'completed' number, so round down */
4149 completed /= (info->new_chunk/512);
4150 completed *= (info->new_chunk/512);
4151 /* Convert 'completed' back in to a 'progress' number */
4152 completed *= reshape->after.data_disks;
4153 if (!advancing)
4154 completed = (info->component_size
4155 * reshape->after.data_disks
4156 - completed);
4157 *reshape_completed = completed;
4158 }
4159
4160 close(fd);
4161
4162 /* We return the need_backup flag. Caller will decide
4163 * how much - a multiple of ->backup_blocks up to *suspend_point
4164 */
4165 if (advancing)
4166 return need_backup > info->reshape_progress;
4167 else
4168 return need_backup >= info->reshape_progress;
4169
4170 check_progress:
4171 /* if we couldn't read a number from sync_completed, then
4172 * either the reshape did complete, or it aborted.
4173 * We can tell which by checking for 'none' in reshape_position.
4174 * If it did abort, then it might immediately restart if it
4175 * it was just a device failure that leaves us degraded but
4176 * functioning.
4177 */
4178 if (sysfs_get_str(info, NULL, "reshape_position", buf,
4179 sizeof(buf)) < 0 || strncmp(buf, "none", 4) != 0) {
4180 /* The abort might only be temporary. Wait up to 10
4181 * seconds for fd to contain a valid number again.
4182 */
4183 int wait = 10000;
4184 int rv = -2;
4185 unsigned long long new_sync_max;
4186 while (fd >= 0 && rv < 0 && wait > 0) {
4187 if (sysfs_wait(fd, &wait) != 1)
4188 break;
4189 switch (sysfs_fd_get_ll(fd, &completed)) {
4190 case 0:
4191 /* all good again */
4192 rv = 1;
4193 /* If "sync_max" is no longer max_progress
4194 * we need to freeze things
4195 */
4196 sysfs_get_ll(info, NULL, "sync_max",
4197 &new_sync_max);
4198 *frozen = (new_sync_max != max_progress);
4199 break;
4200 case -2: /* read error - abort */
4201 wait = 0;
4202 break;
4203 }
4204 }
4205 if (fd >= 0)
4206 close(fd);
4207 return rv; /* abort */
4208 } else {
4209 /* Maybe racing with array shutdown - check state */
4210 if (fd >= 0)
4211 close(fd);
4212 if (sysfs_get_str(info, NULL, "array_state", buf,
4213 sizeof(buf)) < 0 ||
4214 strncmp(buf, "inactive", 8) == 0 ||
4215 strncmp(buf, "clear",5) == 0)
4216 return -2; /* abort */
4217 return -1; /* complete */
4218 }
4219 }
4220
4221 /* FIXME return status is never checked */
4222 static int grow_backup(struct mdinfo *sra,
4223 unsigned long long offset, /* per device */
4224 unsigned long stripes, /* per device, in old chunks */
4225 int *sources, unsigned long long *offsets,
4226 int disks, int chunk, int level, int layout,
4227 int dests, int *destfd, unsigned long long *destoffsets,
4228 int part, int *degraded,
4229 char *buf)
4230 {
4231 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4232 * to storage 'destfd' (offset 'destoffsets'), after first
4233 * suspending IO. Then allow resync to continue
4234 * over the suspended section.
4235 * Use part 'part' of the backup-super-block.
4236 */
4237 int odata = disks;
4238 int rv = 0;
4239 int i;
4240 unsigned long long ll;
4241 int new_degraded;
4242 //printf("offset %llu\n", offset);
4243 if (level >= 4)
4244 odata--;
4245 if (level == 6)
4246 odata--;
4247
4248 /* Check that array hasn't become degraded, else we might backup the wrong data */
4249 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4250 return -1; /* FIXME this error is ignored */
4251 new_degraded = (int)ll;
4252 if (new_degraded != *degraded) {
4253 /* check each device to ensure it is still working */
4254 struct mdinfo *sd;
4255 for (sd = sra->devs ; sd ; sd = sd->next) {
4256 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4257 continue;
4258 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4259 char sbuf[100];
4260
4261 if (sysfs_get_str(sra, sd, "state",
4262 sbuf, sizeof(sbuf)) < 0 ||
4263 strstr(sbuf, "faulty") ||
4264 strstr(sbuf, "in_sync") == NULL) {
4265 /* this device is dead */
4266 sd->disk.state = (1<<MD_DISK_FAULTY);
4267 if (sd->disk.raid_disk >= 0 &&
4268 sources[sd->disk.raid_disk] >= 0) {
4269 close(sources[sd->disk.raid_disk]);
4270 sources[sd->disk.raid_disk] = -1;
4271 }
4272 }
4273 }
4274 }
4275 *degraded = new_degraded;
4276 }
4277 if (part) {
4278 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4279 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4280 } else {
4281 bsb.arraystart = __cpu_to_le64(offset * odata);
4282 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4283 }
4284 if (part)
4285 bsb.magic[15] = '2';
4286 for (i = 0; i < dests; i++)
4287 if (part)
4288 lseek64(destfd[i], destoffsets[i] +
4289 __le64_to_cpu(bsb.devstart2)*512, 0);
4290 else
4291 lseek64(destfd[i], destoffsets[i], 0);
4292
4293 rv = save_stripes(sources, offsets, disks, chunk, level, layout,
4294 dests, destfd, offset * 512 * odata,
4295 stripes * chunk * odata, buf);
4296
4297 if (rv)
4298 return rv;
4299 bsb.mtime = __cpu_to_le64(time(0));
4300 for (i = 0; i < dests; i++) {
4301 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4302
4303 bsb.sb_csum = bsb_csum((char*)&bsb,
4304 ((char*)&bsb.sb_csum)-((char*)&bsb));
4305 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4306 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4307 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4308
4309 rv = -1;
4310 if ((unsigned long long)lseek64(destfd[i],
4311 destoffsets[i] - 4096, 0) !=
4312 destoffsets[i] - 4096)
4313 break;
4314 if (write(destfd[i], &bsb, 512) != 512)
4315 break;
4316 if (destoffsets[i] > 4096) {
4317 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4318 destoffsets[i]+stripes*chunk*odata)
4319 break;
4320 if (write(destfd[i], &bsb, 512) != 512)
4321 break;
4322 }
4323 fsync(destfd[i]);
4324 rv = 0;
4325 }
4326
4327 return rv;
4328 }
4329
4330 /* in 2.6.30, the value reported by sync_completed can be
4331 * less that it should be by one stripe.
4332 * This only happens when reshape hits sync_max and pauses.
4333 * So allow wait_backup to either extent sync_max further
4334 * than strictly necessary, or return before the
4335 * sync has got quite as far as we would really like.
4336 * This is what 'blocks2' is for.
4337 * The various caller give appropriate values so that
4338 * every works.
4339 */
4340 /* FIXME return value is often ignored */
4341 static int forget_backup(int dests, int *destfd,
4342 unsigned long long *destoffsets,
4343 int part)
4344 {
4345 /*
4346 * Erase backup 'part' (which is 0 or 1)
4347 */
4348 int i;
4349 int rv;
4350
4351 if (part) {
4352 bsb.arraystart2 = __cpu_to_le64(0);
4353 bsb.length2 = __cpu_to_le64(0);
4354 } else {
4355 bsb.arraystart = __cpu_to_le64(0);
4356 bsb.length = __cpu_to_le64(0);
4357 }
4358 bsb.mtime = __cpu_to_le64(time(0));
4359 rv = 0;
4360 for (i = 0; i < dests; i++) {
4361 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4362 bsb.sb_csum = bsb_csum((char*)&bsb,
4363 ((char*)&bsb.sb_csum)-((char*)&bsb));
4364 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4365 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4366 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4367 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4368 destoffsets[i]-4096)
4369 rv = -1;
4370 if (rv == 0 && write(destfd[i], &bsb, 512) != 512)
4371 rv = -1;
4372 fsync(destfd[i]);
4373 }
4374 return rv;
4375 }
4376
4377 static void fail(char *msg)
4378 {
4379 int rv;
4380 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4381 rv |= (write(2, "\n", 1) != 1);
4382 exit(rv ? 1 : 2);
4383 }
4384
4385 static char *abuf, *bbuf;
4386 static unsigned long long abuflen;
4387 static void validate(int afd, int bfd, unsigned long long offset)
4388 {
4389 /* check that the data in the backup against the array.
4390 * This is only used for regression testing and should not
4391 * be used while the array is active
4392 */
4393 if (afd < 0)
4394 return;
4395 lseek64(bfd, offset - 4096, 0);
4396 if (read(bfd, &bsb2, 512) != 512)
4397 fail("cannot read bsb");
4398 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4399 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4400 fail("first csum bad");
4401 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4402 fail("magic is bad");
4403 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4404 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4405 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4406 fail("second csum bad");
4407
4408 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4409 fail("devstart is wrong");
4410
4411 if (bsb2.length) {
4412 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4413
4414 if (abuflen < len) {
4415 free(abuf);
4416 free(bbuf);
4417 abuflen = len;
4418 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4419 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4420 abuflen = 0;
4421 /* just stop validating on mem-alloc failure */
4422 return;
4423 }
4424 }
4425
4426 lseek64(bfd, offset, 0);
4427 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4428 //printf("len %llu\n", len);
4429 fail("read first backup failed");
4430 }
4431 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4432 if ((unsigned long long)read(afd, abuf, len) != len)
4433 fail("read first from array failed");
4434 if (memcmp(bbuf, abuf, len) != 0) {
4435 #if 0
4436 int i;
4437 printf("offset=%llu len=%llu\n",
4438 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4439 for (i=0; i<len; i++)
4440 if (bbuf[i] != abuf[i]) {
4441 printf("first diff byte %d\n", i);
4442 break;
4443 }
4444 #endif
4445 fail("data1 compare failed");
4446 }
4447 }
4448 if (bsb2.length2) {
4449 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4450
4451 if (abuflen < len) {
4452 free(abuf);
4453 free(bbuf);
4454 abuflen = len;
4455 abuf = xmalloc(abuflen);
4456 bbuf = xmalloc(abuflen);
4457 }
4458
4459 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4460 if ((unsigned long long)read(bfd, bbuf, len) != len)
4461 fail("read second backup failed");
4462 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4463 if ((unsigned long long)read(afd, abuf, len) != len)
4464 fail("read second from array failed");
4465 if (memcmp(bbuf, abuf, len) != 0)
4466 fail("data2 compare failed");
4467 }
4468 }
4469
4470 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4471 struct supertype *st, unsigned long blocks,
4472 int *fds, unsigned long long *offsets,
4473 int dests, int *destfd, unsigned long long *destoffsets)
4474 {
4475 /* Monitor a reshape where backup is being performed using
4476 * 'native' mechanism - either to a backup file, or
4477 * to some space in a spare.
4478 */
4479 char *buf;
4480 int degraded = -1;
4481 unsigned long long speed;
4482 unsigned long long suspend_point, array_size;
4483 unsigned long long backup_point, wait_point;
4484 unsigned long long reshape_completed;
4485 int done = 0;
4486 int increasing = reshape->after.data_disks >=
4487 reshape->before.data_disks;
4488 int part = 0; /* The next part of the backup area to fill. It
4489 * may already be full, so we need to check */
4490 int level = reshape->level;
4491 int layout = reshape->before.layout;
4492 int data = reshape->before.data_disks;
4493 int disks = reshape->before.data_disks + reshape->parity;
4494 int chunk = sra->array.chunk_size;
4495 struct mdinfo *sd;
4496 unsigned long stripes;
4497 int uuid[4];
4498 int frozen = 0;
4499
4500 /* set up the backup-super-block. This requires the
4501 * uuid from the array.
4502 */
4503 /* Find a superblock */
4504 for (sd = sra->devs; sd; sd = sd->next) {
4505 char *dn;
4506 int devfd;
4507 int ok;
4508 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4509 continue;
4510 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4511 devfd = dev_open(dn, O_RDONLY);
4512 if (devfd < 0)
4513 continue;
4514 ok = st->ss->load_super(st, devfd, NULL);
4515 close(devfd);
4516 if (ok == 0)
4517 break;
4518 }
4519 if (!sd) {
4520 pr_err("Cannot find a superblock\n");
4521 return 0;
4522 }
4523
4524 memset(&bsb, 0, 512);
4525 memcpy(bsb.magic, "md_backup_data-1", 16);
4526 st->ss->uuid_from_super(st, uuid);
4527 memcpy(bsb.set_uuid, uuid, 16);
4528 bsb.mtime = __cpu_to_le64(time(0));
4529 bsb.devstart2 = blocks;
4530
4531 stripes = blocks / (sra->array.chunk_size/512) /
4532 reshape->before.data_disks;
4533
4534 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4535 /* Don't start the 'reshape' */
4536 return 0;
4537 if (reshape->before.data_disks == reshape->after.data_disks) {
4538 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4539 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4540 }
4541
4542 if (increasing) {
4543 array_size = sra->component_size * reshape->after.data_disks;
4544 backup_point = sra->reshape_progress;
4545 suspend_point = 0;
4546 } else {
4547 array_size = sra->component_size * reshape->before.data_disks;
4548 backup_point = reshape->backup_blocks;
4549 suspend_point = array_size;
4550 }
4551
4552 while (!done) {
4553 int rv;
4554
4555 /* Want to return as soon the oldest backup slot can
4556 * be released as that allows us to start backing up
4557 * some more, providing suspend_point has been
4558 * advanced, which it should have.
4559 */
4560 if (increasing) {
4561 wait_point = array_size;
4562 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4563 wait_point = (__le64_to_cpu(bsb.arraystart) +
4564 __le64_to_cpu(bsb.length));
4565 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4566 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4567 __le64_to_cpu(bsb.length2));
4568 } else {
4569 wait_point = 0;
4570 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4571 wait_point = __le64_to_cpu(bsb.arraystart);
4572 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4573 wait_point = __le64_to_cpu(bsb.arraystart2);
4574 }
4575
4576 reshape_completed = sra->reshape_progress;
4577 rv = progress_reshape(sra, reshape,
4578 backup_point, wait_point,
4579 &suspend_point, &reshape_completed,
4580 &frozen);
4581 /* external metadata would need to ping_monitor here */
4582 sra->reshape_progress = reshape_completed;
4583
4584 /* Clear any backup region that is before 'here' */
4585 if (increasing) {
4586 if (__le64_to_cpu(bsb.length) > 0 &&
4587 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4588 __le64_to_cpu(bsb.length)))
4589 forget_backup(dests, destfd,
4590 destoffsets, 0);
4591 if (__le64_to_cpu(bsb.length2) > 0 &&
4592 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4593 __le64_to_cpu(bsb.length2)))
4594 forget_backup(dests, destfd,
4595 destoffsets, 1);
4596 } else {
4597 if (__le64_to_cpu(bsb.length) > 0 &&
4598 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4599 forget_backup(dests, destfd,
4600 destoffsets, 0);
4601 if (__le64_to_cpu(bsb.length2) > 0 &&
4602 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4603 forget_backup(dests, destfd,
4604 destoffsets, 1);
4605 }
4606 if (sigterm)
4607 rv = -2;
4608 if (rv < 0) {
4609 if (rv == -1)
4610 done = 1;
4611 break;
4612 }
4613 if (rv == 0 && increasing && !st->ss->external) {
4614 /* No longer need to monitor this reshape */
4615 sysfs_set_str(sra, NULL, "sync_max", "max");
4616 done = 1;
4617 break;
4618 }
4619
4620 while (rv) {
4621 unsigned long long offset;
4622 unsigned long actual_stripes;
4623 /* Need to backup some data.
4624 * If 'part' is not used and the desired
4625 * backup size is suspended, do a backup,
4626 * then consider the next part.
4627 */
4628 /* Check that 'part' is unused */
4629 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4630 break;
4631 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4632 break;
4633
4634 offset = backup_point / data;
4635 actual_stripes = stripes;
4636 if (increasing) {
4637 if (offset + actual_stripes * (chunk/512) >
4638 sra->component_size)
4639 actual_stripes = ((sra->component_size - offset)
4640 / (chunk/512));
4641 if (offset + actual_stripes * (chunk/512) >
4642 suspend_point/data)
4643 break;
4644 } else {
4645 if (offset < actual_stripes * (chunk/512))
4646 actual_stripes = offset / (chunk/512);
4647 offset -= actual_stripes * (chunk/512);
4648 if (offset < suspend_point/data)
4649 break;
4650 }
4651 if (actual_stripes == 0)
4652 break;
4653 grow_backup(sra, offset, actual_stripes, fds, offsets,
4654 disks, chunk, level, layout, dests, destfd,
4655 destoffsets, part, &degraded, buf);
4656 validate(afd, destfd[0], destoffsets[0]);
4657 /* record where 'part' is up to */
4658 part = !part;
4659 if (increasing)
4660 backup_point += actual_stripes * (chunk/512) * data;
4661 else
4662 backup_point -= actual_stripes * (chunk/512) * data;
4663 }
4664 }
4665
4666 /* FIXME maybe call progress_reshape one more time instead */
4667 /* remove any remaining suspension */
4668 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4669 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4670 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4671 sysfs_set_num(sra, NULL, "sync_min", 0);
4672
4673 if (reshape->before.data_disks == reshape->after.data_disks)
4674 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4675 free(buf);
4676 return done;
4677 }
4678
4679 /*
4680 * If any spare contains md_back_data-1 which is recent wrt mtime,
4681 * write that data into the array and update the super blocks with
4682 * the new reshape_progress
4683 */
4684 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist,
4685 int cnt, char *backup_file, int verbose)
4686 {
4687 int i, j;
4688 int old_disks;
4689 unsigned long long *offsets;
4690 unsigned long long nstripe, ostripe;
4691 int ndata, odata;
4692
4693 odata = info->array.raid_disks - info->delta_disks - 1;
4694 if (info->array.level == 6)
4695 odata--; /* number of data disks */
4696 ndata = info->array.raid_disks - 1;
4697 if (info->new_level == 6)
4698 ndata--;
4699
4700 old_disks = info->array.raid_disks - info->delta_disks;
4701
4702 if (info->delta_disks <= 0)
4703 /* Didn't grow, so the backup file must have
4704 * been used
4705 */
4706 old_disks = cnt;
4707 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4708 struct mdinfo dinfo;
4709 int fd;
4710 int bsbsize;
4711 char *devname, namebuf[20];
4712 unsigned long long lo, hi;
4713
4714 /* This was a spare and may have some saved data on it.
4715 * Load the superblock, find and load the
4716 * backup_super_block.
4717 * If either fail, go on to next device.
4718 * If the backup contains no new info, just return
4719 * else restore data and update all superblocks
4720 */
4721 if (i == old_disks-1) {
4722 fd = open(backup_file, O_RDONLY);
4723 if (fd<0) {
4724 pr_err("backup file %s inaccessible: %s\n",
4725 backup_file, strerror(errno));
4726 continue;
4727 }
4728 devname = backup_file;
4729 } else {
4730 fd = fdlist[i];
4731 if (fd < 0)
4732 continue;
4733 if (st->ss->load_super(st, fd, NULL))
4734 continue;
4735
4736 st->ss->getinfo_super(st, &dinfo, NULL);
4737 st->ss->free_super(st);
4738
4739 if (lseek64(fd,
4740 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4741 0) < 0) {
4742 pr_err("Cannot seek on device %d\n", i);
4743 continue; /* Cannot seek */
4744 }
4745 sprintf(namebuf, "device-%d", i);
4746 devname = namebuf;
4747 }
4748 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4749 if (verbose)
4750 pr_err("Cannot read from %s\n", devname);
4751 continue; /* Cannot read */
4752 }
4753 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4754 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4755 if (verbose)
4756 pr_err("No backup metadata on %s\n", devname);
4757 continue;
4758 }
4759 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4760 if (verbose)
4761 pr_err("Bad backup-metadata checksum on %s\n",
4762 devname);
4763 continue; /* bad checksum */
4764 }
4765 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4766 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4767 if (verbose)
4768 pr_err("Bad backup-metadata checksum2 on %s\n",
4769 devname);
4770 continue; /* Bad second checksum */
4771 }
4772 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4773 if (verbose)
4774 pr_err("Wrong uuid on backup-metadata on %s\n",
4775 devname);
4776 continue; /* Wrong uuid */
4777 }
4778
4779 /*
4780 * array utime and backup-mtime should be updated at
4781 * much the same time, but it seems that sometimes
4782 * they aren't... So allow considerable flexability in
4783 * matching, and allow this test to be overridden by
4784 * an environment variable.
4785 */
4786 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4787 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4788 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4789 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4790 (unsigned long)__le64_to_cpu(bsb.mtime),
4791 (unsigned long)info->array.utime);
4792 } else {
4793 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4794 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4795 continue; /* time stamp is too bad */
4796 }
4797 }
4798
4799 if (bsb.magic[15] == '1') {
4800 if (bsb.length == 0)
4801 continue;
4802 if (info->delta_disks >= 0) {
4803 /* reshape_progress is increasing */
4804 if (__le64_to_cpu(bsb.arraystart)
4805 + __le64_to_cpu(bsb.length)
4806 < info->reshape_progress) {
4807 nonew:
4808 if (verbose)
4809 pr_err("backup-metadata found on %s but is not needed\n", devname);
4810 continue; /* No new data here */
4811 }
4812 } else {
4813 /* reshape_progress is decreasing */
4814 if (__le64_to_cpu(bsb.arraystart) >=
4815 info->reshape_progress)
4816 goto nonew; /* No new data here */
4817 }
4818 } else {
4819 if (bsb.length == 0 && bsb.length2 == 0)
4820 continue;
4821 if (info->delta_disks >= 0) {
4822 /* reshape_progress is increasing */
4823 if ((__le64_to_cpu(bsb.arraystart)
4824 + __le64_to_cpu(bsb.length)
4825 < info->reshape_progress) &&
4826 (__le64_to_cpu(bsb.arraystart2)
4827 + __le64_to_cpu(bsb.length2)
4828 < info->reshape_progress))
4829 goto nonew; /* No new data here */
4830 } else {
4831 /* reshape_progress is decreasing */
4832 if (__le64_to_cpu(bsb.arraystart) >=
4833 info->reshape_progress &&
4834 __le64_to_cpu(bsb.arraystart2) >=
4835 info->reshape_progress)
4836 goto nonew; /* No new data here */
4837 }
4838 }
4839 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4840 second_fail:
4841 if (verbose)
4842 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4843 devname);
4844 continue; /* Cannot seek */
4845 }
4846 /* There should be a duplicate backup superblock 4k before here */
4847 if (lseek64(fd, -4096, 1) < 0 ||
4848 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4849 goto second_fail; /* Cannot find leading superblock */
4850 if (bsb.magic[15] == '1')
4851 bsbsize = offsetof(struct mdp_backup_super, pad1);
4852 else
4853 bsbsize = offsetof(struct mdp_backup_super, pad);
4854 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4855 goto second_fail; /* Cannot find leading superblock */
4856
4857 /* Now need the data offsets for all devices. */
4858 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4859 for(j=0; j<info->array.raid_disks; j++) {
4860 if (fdlist[j] < 0)
4861 continue;
4862 if (st->ss->load_super(st, fdlist[j], NULL))
4863 /* FIXME should be this be an error */
4864 continue;
4865 st->ss->getinfo_super(st, &dinfo, NULL);
4866 st->ss->free_super(st);
4867 offsets[j] = dinfo.data_offset * 512;
4868 }
4869 printf("%s: restoring critical section\n", Name);
4870
4871 if (restore_stripes(fdlist, offsets, info->array.raid_disks,
4872 info->new_chunk, info->new_level,
4873 info->new_layout, fd,
4874 __le64_to_cpu(bsb.devstart)*512,
4875 __le64_to_cpu(bsb.arraystart)*512,
4876 __le64_to_cpu(bsb.length)*512, NULL)) {
4877 /* didn't succeed, so giveup */
4878 if (verbose)
4879 pr_err("Error restoring backup from %s\n",
4880 devname);
4881 free(offsets);
4882 return 1;
4883 }
4884
4885 if (bsb.magic[15] == '2' &&
4886 restore_stripes(fdlist, offsets, info->array.raid_disks,
4887 info->new_chunk, info->new_level,
4888 info->new_layout, fd,
4889 __le64_to_cpu(bsb.devstart)*512 +
4890 __le64_to_cpu(bsb.devstart2)*512,
4891 __le64_to_cpu(bsb.arraystart2)*512,
4892 __le64_to_cpu(bsb.length2)*512, NULL)) {
4893 /* didn't succeed, so giveup */
4894 if (verbose)
4895 pr_err("Error restoring second backup from %s\n",
4896 devname);
4897 free(offsets);
4898 return 1;
4899 }
4900
4901 free(offsets);
4902
4903 /* Ok, so the data is restored. Let's update those superblocks. */
4904
4905 lo = hi = 0;
4906 if (bsb.length) {
4907 lo = __le64_to_cpu(bsb.arraystart);
4908 hi = lo + __le64_to_cpu(bsb.length);
4909 }
4910 if (bsb.magic[15] == '2' && bsb.length2) {
4911 unsigned long long lo1, hi1;
4912 lo1 = __le64_to_cpu(bsb.arraystart2);
4913 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4914 if (lo == hi) {
4915 lo = lo1;
4916 hi = hi1;
4917 } else if (lo < lo1)
4918 hi = hi1;
4919 else
4920 lo = lo1;
4921 }
4922 if (lo < hi && (info->reshape_progress < lo ||
4923 info->reshape_progress > hi))
4924 /* backup does not affect reshape_progress*/ ;
4925 else if (info->delta_disks >= 0) {
4926 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4927 __le64_to_cpu(bsb.length);
4928 if (bsb.magic[15] == '2') {
4929 unsigned long long p2;
4930
4931 p2 = __le64_to_cpu(bsb.arraystart2) +
4932 __le64_to_cpu(bsb.length2);
4933 if (p2 > info->reshape_progress)
4934 info->reshape_progress = p2;
4935 }
4936 } else {
4937 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4938 if (bsb.magic[15] == '2') {
4939 unsigned long long p2;
4940
4941 p2 = __le64_to_cpu(bsb.arraystart2);
4942 if (p2 < info->reshape_progress)
4943 info->reshape_progress = p2;
4944 }
4945 }
4946 for (j=0; j<info->array.raid_disks; j++) {
4947 if (fdlist[j] < 0)
4948 continue;
4949 if (st->ss->load_super(st, fdlist[j], NULL))
4950 continue;
4951 st->ss->getinfo_super(st, &dinfo, NULL);
4952 dinfo.reshape_progress = info->reshape_progress;
4953 st->ss->update_super(st, &dinfo,
4954 UOPT_SPEC__RESHAPE_PROGRESS,
4955 NULL,0, 0, NULL);
4956 st->ss->store_super(st, fdlist[j]);
4957 st->ss->free_super(st);
4958 }
4959 return 0;
4960 }
4961 /* Didn't find any backup data, try to see if any
4962 * was needed.
4963 */
4964 if (info->delta_disks < 0) {
4965 /* When shrinking, the critical section is at the end.
4966 * So see if we are before the critical section.
4967 */
4968 unsigned long long first_block;
4969 nstripe = ostripe = 0;
4970 first_block = 0;
4971 while (ostripe >= nstripe) {
4972 ostripe += info->array.chunk_size / 512;
4973 first_block = ostripe * odata;
4974 nstripe = first_block / ndata / (info->new_chunk/512) *
4975 (info->new_chunk/512);
4976 }
4977
4978 if (info->reshape_progress >= first_block)
4979 return 0;
4980 }
4981 if (info->delta_disks > 0) {
4982 /* See if we are beyond the critical section. */
4983 unsigned long long last_block;
4984 nstripe = ostripe = 0;
4985 last_block = 0;
4986 while (nstripe >= ostripe) {
4987 nstripe += info->new_chunk / 512;
4988 last_block = nstripe * ndata;
4989 ostripe = last_block / odata / (info->array.chunk_size/512) *
4990 (info->array.chunk_size/512);
4991 }
4992
4993 if (info->reshape_progress >= last_block)
4994 return 0;
4995 }
4996 /* needed to recover critical section! */
4997 if (verbose)
4998 pr_err("Failed to find backup of critical section\n");
4999 return 1;
5000 }
5001
5002 int Grow_continue_command(char *devname, int fd,
5003 char *backup_file, int verbose)
5004 {
5005 int ret_val = 0;
5006 struct supertype *st = NULL;
5007 struct mdinfo *content = NULL;
5008 struct mdinfo array;
5009 char *subarray = NULL;
5010 struct mdinfo *cc = NULL;
5011 struct mdstat_ent *mdstat = NULL;
5012 int cfd = -1;
5013 int fd2;
5014
5015 dprintf("Grow continue from command line called for %s\n", devname);
5016
5017 st = super_by_fd(fd, &subarray);
5018 if (!st || !st->ss) {
5019 pr_err("Unable to determine metadata format for %s\n", devname);
5020 return 1;
5021 }
5022 dprintf("Grow continue is run for ");
5023 if (st->ss->external == 0) {
5024 int d;
5025 int cnt = 5;
5026 dprintf_cont("native array (%s)\n", devname);
5027 if (md_get_array_info(fd, &array.array) < 0) {
5028 pr_err("%s is not an active md array - aborting\n",
5029 devname);
5030 ret_val = 1;
5031 goto Grow_continue_command_exit;
5032 }
5033 content = &array;
5034 sysfs_init(content, fd, NULL);
5035 /* Need to load a superblock.
5036 * FIXME we should really get what we need from
5037 * sysfs
5038 */
5039 do {
5040 for (d = 0; d < MAX_DISKS; d++) {
5041 mdu_disk_info_t disk;
5042 char *dv;
5043 int err;
5044 disk.number = d;
5045 if (md_get_disk_info(fd, &disk) < 0)
5046 continue;
5047 if (disk.major == 0 && disk.minor == 0)
5048 continue;
5049 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5050 continue;
5051 dv = map_dev(disk.major, disk.minor, 1);
5052 if (!dv)
5053 continue;
5054 fd2 = dev_open(dv, O_RDONLY);
5055 if (fd2 < 0)
5056 continue;
5057 err = st->ss->load_super(st, fd2, NULL);
5058 close(fd2);
5059 if (err)
5060 continue;
5061 break;
5062 }
5063 if (d == MAX_DISKS) {
5064 pr_err("Unable to load metadata for %s\n",
5065 devname);
5066 ret_val = 1;
5067 goto Grow_continue_command_exit;
5068 }
5069 st->ss->getinfo_super(st, content, NULL);
5070 if (!content->reshape_active)
5071 sleep_for(3, 0, true);
5072 else
5073 break;
5074 } while (cnt-- > 0);
5075 } else {
5076 char *container;
5077
5078 if (subarray) {
5079 dprintf_cont("subarray (%s)\n", subarray);
5080 container = st->container_devnm;
5081 cfd = open_dev_excl(st->container_devnm);
5082 } else {
5083 container = st->devnm;
5084 close(fd);
5085 cfd = open_dev_excl(st->devnm);
5086 dprintf_cont("container (%s)\n", container);
5087 fd = cfd;
5088 }
5089 if (cfd < 0) {
5090 pr_err("Unable to open container for %s\n", devname);
5091 ret_val = 1;
5092 goto Grow_continue_command_exit;
5093 }
5094
5095 /* find in container array under reshape
5096 */
5097 ret_val = st->ss->load_container(st, cfd, NULL);
5098 if (ret_val) {
5099 pr_err("Cannot read superblock for %s\n", devname);
5100 ret_val = 1;
5101 goto Grow_continue_command_exit;
5102 }
5103
5104 cc = st->ss->container_content(st, subarray);
5105 for (content = cc; content ; content = content->next) {
5106 char *array_name;
5107 int allow_reshape = 1;
5108
5109 if (content->reshape_active == 0)
5110 continue;
5111 /* The decision about array or container wide
5112 * reshape is taken in Grow_continue based
5113 * content->reshape_active state, therefore we
5114 * need to check_reshape based on
5115 * reshape_active and subarray name
5116 */
5117 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5118 allow_reshape = 0;
5119 if (content->reshape_active == CONTAINER_RESHAPE &&
5120 (content->array.state
5121 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5122 allow_reshape = 0;
5123
5124 if (!allow_reshape) {
5125 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5126 devname, container);
5127 ret_val = 1;
5128 goto Grow_continue_command_exit;
5129 }
5130
5131 array_name = strchr(content->text_version+1, '/')+1;
5132 mdstat = mdstat_by_subdev(array_name, container);
5133 if (!mdstat)
5134 continue;
5135 if (mdstat->active == 0) {
5136 pr_err("Skipping inactive array %s.\n",
5137 mdstat->devnm);
5138 free_mdstat(mdstat);
5139 mdstat = NULL;
5140 continue;
5141 }
5142 break;
5143 }
5144 if (!content) {
5145 pr_err("Unable to determine reshaped array for %s\n", devname);
5146 ret_val = 1;
5147 goto Grow_continue_command_exit;
5148 }
5149 fd2 = open_dev(mdstat->devnm);
5150 if (fd2 < 0) {
5151 pr_err("cannot open (%s)\n", mdstat->devnm);
5152 ret_val = 1;
5153 goto Grow_continue_command_exit;
5154 }
5155
5156 if (sysfs_init(content, fd2, mdstat->devnm)) {
5157 pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n",
5158 mdstat->devnm);
5159 ret_val = 1;
5160 close(fd2);
5161 goto Grow_continue_command_exit;
5162 }
5163
5164 close(fd2);
5165
5166 /* start mdmon in case it is not running
5167 */
5168 if (!mdmon_running(container))
5169 start_mdmon(container);
5170 ping_monitor(container);
5171
5172 if (mdmon_running(container))
5173 st->update_tail = &st->updates;
5174 else {
5175 pr_err("No mdmon found. Grow cannot continue.\n");
5176 ret_val = 1;
5177 goto Grow_continue_command_exit;
5178 }
5179 }
5180
5181 /* verify that array under reshape is started from
5182 * correct position
5183 */
5184 if (verify_reshape_position(content, content->array.level) < 0) {
5185 ret_val = 1;
5186 goto Grow_continue_command_exit;
5187 }
5188
5189 /* continue reshape
5190 */
5191 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5192
5193 Grow_continue_command_exit:
5194 if (cfd > -1)
5195 close(cfd);
5196 st->ss->free_super(st);
5197 free_mdstat(mdstat);
5198 sysfs_free(cc);
5199 free(subarray);
5200
5201 return ret_val;
5202 }
5203
5204 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5205 char *backup_file, int forked, int freeze_reshape)
5206 {
5207 int ret_val = 2;
5208
5209 if (!info->reshape_active)
5210 return ret_val;
5211
5212 if (st->ss->external) {
5213 int cfd = open_dev(st->container_devnm);
5214
5215 if (cfd < 0)
5216 return 1;
5217
5218 st->ss->load_container(st, cfd, st->container_devnm);
5219 close(cfd);
5220 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5221 st, info, 0, backup_file, 0,
5222 forked, 1 | info->reshape_active,
5223 freeze_reshape);
5224 } else
5225 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5226 NULL, INVALID_SECTORS, backup_file,
5227 0, forked, 1 | info->reshape_active,
5228 freeze_reshape);
5229
5230 return ret_val;
5231 }
5232
5233 char *make_backup(char *name)
5234 {
5235 char *base = "backup_file-";
5236 int len;
5237 char *fname;
5238
5239 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5240 fname = xmalloc(len);
5241 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5242 return fname;
5243 }
5244
5245 char *locate_backup(char *name)
5246 {
5247 char *fl = make_backup(name);
5248 struct stat stb;
5249
5250 if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode))
5251 return fl;
5252
5253 free(fl);
5254 return NULL;
5255 }