]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Grow: support consistency policy change
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 struct stat stb;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 fstat(nfd, &stb);
149 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
150 pr_err("%s is not a block device!\n", newdev);
151 close(nfd);
152 free(st);
153 return 1;
154 }
155 /* now check out all the devices and make sure we can read the
156 * superblock */
157 for (d=0 ; d < info.array.raid_disks ; d++) {
158 mdu_disk_info_t disk;
159 char *dv;
160
161 st->ss->free_super(st);
162
163 disk.number = d;
164 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
165 pr_err("cannot get device detail for device %d\n",
166 d);
167 close(nfd);
168 free(st);
169 return 1;
170 }
171 dv = map_dev(disk.major, disk.minor, 1);
172 if (!dv) {
173 pr_err("cannot find device file for device %d\n",
174 d);
175 close(nfd);
176 free(st);
177 return 1;
178 }
179 fd2 = dev_open(dv, O_RDWR);
180 if (fd2 < 0) {
181 pr_err("cannot open device file %s\n", dv);
182 close(nfd);
183 free(st);
184 return 1;
185 }
186
187 if (st->ss->load_super(st, fd2, NULL)) {
188 pr_err("cannot find super block on %s\n", dv);
189 close(nfd);
190 close(fd2);
191 free(st);
192 return 1;
193 }
194 close(fd2);
195 }
196 /* Ok, looks good. Lets update the superblock and write it out to
197 * newdev.
198 */
199
200 info.disk.number = d;
201 info.disk.major = major(stb.st_rdev);
202 info.disk.minor = minor(stb.st_rdev);
203 info.disk.raid_disk = d;
204 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
205 st->ss->update_super(st, &info, "linear-grow-new", newdev,
206 0, 0, NULL);
207
208 if (st->ss->store_super(st, nfd)) {
209 pr_err("Cannot store new superblock on %s\n",
210 newdev);
211 close(nfd);
212 return 1;
213 }
214 close(nfd);
215
216 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
217 pr_err("Cannot add new disk to this array\n");
218 return 1;
219 }
220 /* Well, that seems to have worked.
221 * Now go through and update all superblocks
222 */
223
224 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
225 pr_err("cannot get array info for %s\n", devname);
226 return 1;
227 }
228
229 nd = d;
230 for (d=0 ; d < info.array.raid_disks ; d++) {
231 mdu_disk_info_t disk;
232 char *dv;
233
234 disk.number = d;
235 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
236 pr_err("cannot get device detail for device %d\n",
237 d);
238 return 1;
239 }
240 dv = map_dev(disk.major, disk.minor, 1);
241 if (!dv) {
242 pr_err("cannot find device file for device %d\n",
243 d);
244 return 1;
245 }
246 fd2 = dev_open(dv, O_RDWR);
247 if (fd2 < 0) {
248 pr_err("cannot open device file %s\n", dv);
249 return 1;
250 }
251 if (st->ss->load_super(st, fd2, NULL)) {
252 pr_err("cannot find super block on %s\n", dv);
253 close(fd);
254 return 1;
255 }
256 info.array.raid_disks = nd+1;
257 info.array.nr_disks = nd+1;
258 info.array.active_disks = nd+1;
259 info.array.working_disks = nd+1;
260
261 st->ss->update_super(st, &info, "linear-grow-update", dv,
262 0, 0, NULL);
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 int vers = md_get_version(fd);
292 unsigned long long bitmapsize, array_size;
293 struct mdinfo *mdi;
294
295 if (vers < 9003) {
296 major = BITMAP_MAJOR_HOSTENDIAN;
297 pr_err("Warning - bitmaps created on this kernel are not portable\n"
298 " between different architectures. Consider upgrading the Linux kernel.\n");
299 }
300
301 /*
302 * We only ever get called if s->bitmap_file is != NULL, so this check
303 * is just here to quiet down static code checkers.
304 */
305 if (!s->bitmap_file)
306 return 1;
307
308 if (strcmp(s->bitmap_file, "clustered") == 0)
309 major = BITMAP_MAJOR_CLUSTERED;
310
311 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
312 if (errno == ENOMEM)
313 pr_err("Memory allocation failure.\n");
314 else
315 pr_err("bitmaps not supported by this kernel.\n");
316 return 1;
317 }
318 if (bmf.pathname[0]) {
319 if (strcmp(s->bitmap_file,"none") == 0) {
320 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
321 pr_err("failed to remove bitmap %s\n",
322 bmf.pathname);
323 return 1;
324 }
325 return 0;
326 }
327 pr_err("%s already has a bitmap (%s)\n",
328 devname, bmf.pathname);
329 return 1;
330 }
331 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
332 pr_err("cannot get array status for %s\n", devname);
333 return 1;
334 }
335 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
336 if (strcmp(s->bitmap_file, "none")==0) {
337 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
338 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
339 if (array.state & (1 << MD_SB_CLUSTERED))
340 pr_err("failed to remove clustered bitmap.\n");
341 else
342 pr_err("failed to remove internal bitmap.\n");
343 return 1;
344 }
345 return 0;
346 }
347 pr_err("bitmap already present on %s\n", devname);
348 return 1;
349 }
350
351 if (strcmp(s->bitmap_file, "none") == 0) {
352 pr_err("no bitmap found on %s\n", devname);
353 return 1;
354 }
355 if (array.level <= 0) {
356 pr_err("Bitmaps not meaningful with level %s\n",
357 map_num(pers, array.level)?:"of this array");
358 return 1;
359 }
360 bitmapsize = array.size;
361 bitmapsize <<= 1;
362 if (get_dev_size(fd, NULL, &array_size) &&
363 array_size > (0x7fffffffULL << 9)) {
364 /* Array is big enough that we cannot trust array.size
365 * try other approaches
366 */
367 bitmapsize = get_component_size(fd);
368 }
369 if (bitmapsize == 0) {
370 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
371 return 1;
372 }
373
374 if (array.level == 10) {
375 int ncopies;
376
377 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
378 bitmapsize = bitmapsize * array.raid_disks / ncopies;
379 }
380
381 st = super_by_fd(fd, &subarray);
382 if (!st) {
383 pr_err("Cannot understand version %d.%d\n",
384 array.major_version, array.minor_version);
385 return 1;
386 }
387 if (subarray) {
388 pr_err("Cannot add bitmaps to sub-arrays yet\n");
389 free(subarray);
390 free(st);
391 return 1;
392 }
393
394 mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY);
395 if (mdi) {
396 if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) {
397 pr_err("Cannot add bitmap to array with PPL\n");
398 free(mdi);
399 free(st);
400 return 1;
401 }
402 free(mdi);
403 }
404
405 if (strcmp(s->bitmap_file, "internal") == 0 ||
406 strcmp(s->bitmap_file, "clustered") == 0) {
407 int rv;
408 int d;
409 int offset_setable = 0;
410 if (st->ss->add_internal_bitmap == NULL) {
411 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
412 return 1;
413 }
414 st->nodes = c->nodes;
415 st->cluster_name = c->homecluster;
416 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
417 if (mdi)
418 offset_setable = 1;
419 for (d = 0; d < st->max_devs; d++) {
420 mdu_disk_info_t disk;
421 char *dv;
422 int fd2;
423
424 disk.number = d;
425 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
426 continue;
427 if (disk.major == 0 && disk.minor == 0)
428 continue;
429 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
430 continue;
431 dv = map_dev(disk.major, disk.minor, 1);
432 if (!dv)
433 continue;
434 fd2 = dev_open(dv, O_RDWR);
435 if (fd2 < 0)
436 continue;
437 rv = st->ss->load_super(st, fd2, NULL);
438 if (!rv) {
439 rv = st->ss->add_internal_bitmap(
440 st, &s->bitmap_chunk, c->delay,
441 s->write_behind, bitmapsize,
442 offset_setable, major);
443 if (!rv) {
444 st->ss->write_bitmap(st, fd2,
445 NodeNumUpdate);
446 } else {
447 pr_err("failed to create internal bitmap - chunksize problem.\n");
448 }
449 } else {
450 pr_err("failed to load super-block.\n");
451 }
452 close(fd2);
453 if (rv)
454 return 1;
455 }
456 if (offset_setable) {
457 st->ss->getinfo_super(st, mdi, NULL);
458 sysfs_init(mdi, fd, NULL);
459 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
460 mdi->bitmap_offset);
461 free(mdi);
462 } else {
463 if (strcmp(s->bitmap_file, "clustered") == 0)
464 array.state |= (1 << MD_SB_CLUSTERED);
465 array.state |= (1 << MD_SB_BITMAP_PRESENT);
466 rv = ioctl(fd, SET_ARRAY_INFO, &array);
467 }
468 if (rv < 0) {
469 if (errno == EBUSY)
470 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
471 pr_err("failed to set internal bitmap.\n");
472 return 1;
473 }
474 } else {
475 int uuid[4];
476 int bitmap_fd;
477 int d;
478 int max_devs = st->max_devs;
479
480 /* try to load a superblock */
481 for (d = 0; d < max_devs; d++) {
482 mdu_disk_info_t disk;
483 char *dv;
484 int fd2;
485 disk.number = d;
486 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
487 continue;
488 if ((disk.major==0 && disk.minor == 0) ||
489 (disk.state & (1 << MD_DISK_REMOVED)))
490 continue;
491 dv = map_dev(disk.major, disk.minor, 1);
492 if (!dv)
493 continue;
494 fd2 = dev_open(dv, O_RDONLY);
495 if (fd2 >= 0) {
496 if (st->ss->load_super(st, fd2, NULL) == 0) {
497 close(fd2);
498 st->ss->uuid_from_super(st, uuid);
499 break;
500 }
501 close(fd2);
502 }
503 }
504 if (d == max_devs) {
505 pr_err("cannot find UUID for array!\n");
506 return 1;
507 }
508 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
509 s->bitmap_chunk, c->delay, s->write_behind,
510 bitmapsize, major)) {
511 return 1;
512 }
513 bitmap_fd = open(s->bitmap_file, O_RDWR);
514 if (bitmap_fd < 0) {
515 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
516 return 1;
517 }
518 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
519 int err = errno;
520 if (errno == EBUSY)
521 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
522 pr_err("Cannot set bitmap file for %s: %s\n",
523 devname, strerror(err));
524 return 1;
525 }
526 }
527
528 return 0;
529 }
530
531 int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s)
532 {
533 struct supertype *st;
534 struct mdinfo *sra;
535 struct mdinfo *sd;
536 char *subarray = NULL;
537 int ret = 0;
538 char container_dev[PATH_MAX];
539
540 if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
541 s->consistency_policy != CONSISTENCY_POLICY_PPL) {
542 pr_err("Operation not supported for consistency policy %s\n",
543 map_num(consistency_policies, s->consistency_policy));
544 return 1;
545 }
546
547 st = super_by_fd(fd, &subarray);
548 if (!st)
549 return 1;
550
551 sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL|
552 GET_DEVS|GET_STATE);
553 if (!sra) {
554 ret = 1;
555 goto free_st;
556 }
557
558 if (s->consistency_policy == CONSISTENCY_POLICY_PPL &&
559 !st->ss->write_init_ppl) {
560 pr_err("%s metadata does not support PPL\n", st->ss->name);
561 ret = 1;
562 goto free_info;
563 }
564
565 if (sra->array.level != 5) {
566 pr_err("Operation not supported for array level %d\n",
567 sra->array.level);
568 ret = 1;
569 goto free_info;
570 }
571
572 if (sra->consistency_policy == (unsigned)s->consistency_policy) {
573 pr_err("Consistency policy is already %s\n",
574 map_num(consistency_policies, s->consistency_policy));
575 ret = 1;
576 goto free_info;
577 } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC &&
578 sra->consistency_policy != CONSISTENCY_POLICY_PPL) {
579 pr_err("Current consistency policy is %s, cannot change to %s\n",
580 map_num(consistency_policies, sra->consistency_policy),
581 map_num(consistency_policies, s->consistency_policy));
582 ret = 1;
583 goto free_info;
584 }
585
586 if (subarray) {
587 char *update;
588
589 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
590 update = "ppl";
591 else
592 update = "no-ppl";
593
594 sprintf(container_dev, "/dev/%s", st->container_devnm);
595
596 ret = Update_subarray(container_dev, subarray, update, NULL,
597 c->verbose);
598 if (ret)
599 goto free_info;
600 }
601
602 if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
603 struct mdinfo info;
604
605 if (subarray) {
606 struct mdinfo *mdi;
607 int cfd;
608
609 cfd = open(container_dev, O_RDWR|O_EXCL);
610 if (cfd < 0) {
611 pr_err("Failed to open %s\n", container_dev);
612 ret = 1;
613 goto free_info;
614 }
615
616 ret = st->ss->load_container(st, cfd, st->container_devnm);
617 close(cfd);
618
619 if (ret) {
620 pr_err("Cannot read superblock for %s\n",
621 container_dev);
622 goto free_info;
623 }
624
625 mdi = st->ss->container_content(st, subarray);
626 info = *mdi;
627 free(mdi);
628 }
629
630 for (sd = sra->devs; sd; sd = sd->next) {
631 int dfd;
632 char *devpath;
633
634 if ((sd->disk.state & (1 << MD_DISK_SYNC)) == 0)
635 continue;
636
637 devpath = map_dev(sd->disk.major, sd->disk.minor, 0);
638 dfd = dev_open(devpath, O_RDWR);
639 if (dfd < 0) {
640 pr_err("Failed to open %s\n", devpath);
641 ret = 1;
642 goto free_info;
643 }
644
645 if (!subarray) {
646 ret = st->ss->load_super(st, dfd, NULL);
647 if (ret) {
648 pr_err("Failed to load super-block.\n");
649 close(dfd);
650 goto free_info;
651 }
652
653 ret = st->ss->update_super(st, sra, "ppl", devname,
654 c->verbose, 0, NULL);
655 if (ret) {
656 close(dfd);
657 st->ss->free_super(st);
658 goto free_info;
659 }
660 st->ss->getinfo_super(st, &info, NULL);
661 }
662
663 ret |= sysfs_set_num(sra, sd, "ppl_sector", info.ppl_sector);
664 ret |= sysfs_set_num(sra, sd, "ppl_size", info.ppl_size);
665
666 if (ret) {
667 pr_err("Failed to set PPL attributes for %s\n",
668 sd->sys_name);
669 close(dfd);
670 st->ss->free_super(st);
671 goto free_info;
672 }
673
674 ret = st->ss->write_init_ppl(st, &info, dfd);
675 if (ret)
676 pr_err("Failed to write PPL\n");
677
678 close(dfd);
679
680 if (!subarray)
681 st->ss->free_super(st);
682
683 if (ret)
684 goto free_info;
685 }
686 }
687
688 ret = sysfs_set_str(sra, NULL, "consistency_policy",
689 map_num(consistency_policies,
690 s->consistency_policy));
691 if (ret)
692 pr_err("Failed to change array consistency policy\n");
693
694 free_info:
695 sysfs_free(sra);
696 free_st:
697 free(st);
698 free(subarray);
699
700 return ret;
701 }
702
703 /*
704 * When reshaping an array we might need to backup some data.
705 * This is written to all spares with a 'super_block' describing it.
706 * The superblock goes 4K from the end of the used space on the
707 * device.
708 * It if written after the backup is complete.
709 * It has the following structure.
710 */
711
712 static struct mdp_backup_super {
713 char magic[16]; /* md_backup_data-1 or -2 */
714 __u8 set_uuid[16];
715 __u64 mtime;
716 /* start/sizes in 512byte sectors */
717 __u64 devstart; /* address on backup device/file of data */
718 __u64 arraystart;
719 __u64 length;
720 __u32 sb_csum; /* csum of preceeding bytes. */
721 __u32 pad1;
722 __u64 devstart2; /* offset in to data of second section */
723 __u64 arraystart2;
724 __u64 length2;
725 __u32 sb_csum2; /* csum of preceeding bytes. */
726 __u8 pad[512-68-32];
727 } __attribute__((aligned(512))) bsb, bsb2;
728
729 static __u32 bsb_csum(char *buf, int len)
730 {
731 int i;
732 int csum = 0;
733 for (i = 0; i < len; i++)
734 csum = (csum<<3) + buf[0];
735 return __cpu_to_le32(csum);
736 }
737
738 static int check_idle(struct supertype *st)
739 {
740 /* Check that all member arrays for this container, or the
741 * container of this array, are idle
742 */
743 char *container = (st->container_devnm[0]
744 ? st->container_devnm : st->devnm);
745 struct mdstat_ent *ent, *e;
746 int is_idle = 1;
747
748 ent = mdstat_read(0, 0);
749 for (e = ent ; e; e = e->next) {
750 if (!is_container_member(e, container))
751 continue;
752 if (e->percent >= 0) {
753 is_idle = 0;
754 break;
755 }
756 }
757 free_mdstat(ent);
758 return is_idle;
759 }
760
761 static int freeze_container(struct supertype *st)
762 {
763 char *container = (st->container_devnm[0]
764 ? st->container_devnm : st->devnm);
765
766 if (!check_idle(st))
767 return -1;
768
769 if (block_monitor(container, 1)) {
770 pr_err("failed to freeze container\n");
771 return -2;
772 }
773
774 return 1;
775 }
776
777 static void unfreeze_container(struct supertype *st)
778 {
779 char *container = (st->container_devnm[0]
780 ? st->container_devnm : st->devnm);
781
782 unblock_monitor(container, 1);
783 }
784
785 static int freeze(struct supertype *st)
786 {
787 /* Try to freeze resync/rebuild on this array/container.
788 * Return -1 if the array is busy,
789 * return -2 container cannot be frozen,
790 * return 0 if this kernel doesn't support 'frozen'
791 * return 1 if it worked.
792 */
793 if (st->ss->external)
794 return freeze_container(st);
795 else {
796 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
797 int err;
798 char buf[20];
799
800 if (!sra)
801 return -1;
802 /* Need to clear any 'read-auto' status */
803 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
804 strncmp(buf, "read-auto", 9) == 0)
805 sysfs_set_str(sra, NULL, "array_state", "clean");
806
807 err = sysfs_freeze_array(sra);
808 sysfs_free(sra);
809 return err;
810 }
811 }
812
813 static void unfreeze(struct supertype *st)
814 {
815 if (st->ss->external)
816 return unfreeze_container(st);
817 else {
818 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
819 char buf[20];
820
821 if (sra &&
822 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
823 && strcmp(buf, "frozen\n") == 0)
824 sysfs_set_str(sra, NULL, "sync_action", "idle");
825 sysfs_free(sra);
826 }
827 }
828
829 static void wait_reshape(struct mdinfo *sra)
830 {
831 int fd = sysfs_get_fd(sra, NULL, "sync_action");
832 char action[20];
833
834 if (fd < 0)
835 return;
836
837 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
838 strncmp(action, "reshape", 7) == 0)
839 sysfs_wait(fd, NULL);
840 close(fd);
841 }
842
843 static int reshape_super(struct supertype *st, unsigned long long size,
844 int level, int layout, int chunksize, int raid_disks,
845 int delta_disks, char *backup_file, char *dev,
846 int direction, int verbose)
847 {
848 /* nothing extra to check in the native case */
849 if (!st->ss->external)
850 return 0;
851 if (!st->ss->reshape_super ||
852 !st->ss->manage_reshape) {
853 pr_err("%s metadata does not support reshape\n",
854 st->ss->name);
855 return 1;
856 }
857
858 return st->ss->reshape_super(st, size, level, layout, chunksize,
859 raid_disks, delta_disks, backup_file, dev,
860 direction, verbose);
861 }
862
863 static void sync_metadata(struct supertype *st)
864 {
865 if (st->ss->external) {
866 if (st->update_tail) {
867 flush_metadata_updates(st);
868 st->update_tail = &st->updates;
869 } else
870 st->ss->sync_metadata(st);
871 }
872 }
873
874 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
875 {
876 /* when dealing with external metadata subarrays we need to be
877 * prepared to handle EAGAIN. The kernel may need to wait for
878 * mdmon to mark the array active so the kernel can handle
879 * allocations/writeback when preparing the reshape action
880 * (md_allow_write()). We temporarily disable safe_mode_delay
881 * to close a race with the array_state going clean before the
882 * next write to raid_disks / stripe_cache_size
883 */
884 char safe[50];
885 int rc;
886
887 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
888 if (!container ||
889 (strcmp(name, "raid_disks") != 0 &&
890 strcmp(name, "stripe_cache_size") != 0))
891 return sysfs_set_num(sra, NULL, name, n);
892
893 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
894 if (rc <= 0)
895 return -1;
896 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
897 rc = sysfs_set_num(sra, NULL, name, n);
898 if (rc < 0 && errno == EAGAIN) {
899 ping_monitor(container);
900 /* if we get EAGAIN here then the monitor is not active
901 * so stop trying
902 */
903 rc = sysfs_set_num(sra, NULL, name, n);
904 }
905 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
906 return rc;
907 }
908
909 int start_reshape(struct mdinfo *sra, int already_running,
910 int before_data_disks, int data_disks)
911 {
912 int err;
913 unsigned long long sync_max_to_set;
914
915 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
916 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
917 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
918 sra->reshape_progress);
919 if (before_data_disks <= data_disks)
920 sync_max_to_set = sra->reshape_progress / data_disks;
921 else
922 sync_max_to_set = (sra->component_size * data_disks
923 - sra->reshape_progress) / data_disks;
924 if (!already_running)
925 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
926 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
927 if (!already_running && err == 0) {
928 int cnt = 5;
929 do {
930 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
931 if (err)
932 sleep(1);
933 } while (err && errno == EBUSY && cnt-- > 0);
934 }
935 return err;
936 }
937
938 void abort_reshape(struct mdinfo *sra)
939 {
940 sysfs_set_str(sra, NULL, "sync_action", "idle");
941 /*
942 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
943 * suspend_hi to decrease as well as increase.")
944 * you could only increase suspend_{lo,hi} unless the region they
945 * covered was empty. So to reset to 0, you need to push suspend_lo
946 * up past suspend_hi first. So to maximize the chance of mdadm
947 * working on all kernels, we want to keep doing that.
948 */
949 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
950 sysfs_set_num(sra, NULL, "suspend_hi", 0);
951 sysfs_set_num(sra, NULL, "suspend_lo", 0);
952 sysfs_set_num(sra, NULL, "sync_min", 0);
953 // It isn't safe to reset sync_max as we aren't monitoring.
954 // Array really should be stopped at this point.
955 }
956
957 int remove_disks_for_takeover(struct supertype *st,
958 struct mdinfo *sra,
959 int layout)
960 {
961 int nr_of_copies;
962 struct mdinfo *remaining;
963 int slot;
964
965 if (st->ss->external) {
966 int rv = 0;
967 struct mdinfo *arrays = st->ss->container_content(st, NULL);
968 /*
969 * containter_content returns list of arrays in container
970 * If arrays->next is not NULL it means that there are
971 * 2 arrays in container and operation should be blocked
972 */
973 if (arrays) {
974 if (arrays->next)
975 rv = 1;
976 sysfs_free(arrays);
977 if (rv) {
978 pr_err("Error. Cannot perform operation on /dev/%s\n", st->devnm);
979 pr_err("For this operation it MUST be single array in container\n");
980 return rv;
981 }
982 }
983 }
984
985 if (sra->array.level == 10)
986 nr_of_copies = layout & 0xff;
987 else if (sra->array.level == 1)
988 nr_of_copies = sra->array.raid_disks;
989 else
990 return 1;
991
992 remaining = sra->devs;
993 sra->devs = NULL;
994 /* for each 'copy', select one device and remove from the list. */
995 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
996 struct mdinfo **diskp;
997 int found = 0;
998
999 /* Find a working device to keep */
1000 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
1001 struct mdinfo *disk = *diskp;
1002
1003 if (disk->disk.raid_disk < slot)
1004 continue;
1005 if (disk->disk.raid_disk >= slot + nr_of_copies)
1006 continue;
1007 if (disk->disk.state & (1<<MD_DISK_REMOVED))
1008 continue;
1009 if (disk->disk.state & (1<<MD_DISK_FAULTY))
1010 continue;
1011 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
1012 continue;
1013
1014 /* We have found a good disk to use! */
1015 *diskp = disk->next;
1016 disk->next = sra->devs;
1017 sra->devs = disk;
1018 found = 1;
1019 break;
1020 }
1021 if (!found)
1022 break;
1023 }
1024
1025 if (slot < sra->array.raid_disks) {
1026 /* didn't find all slots */
1027 struct mdinfo **e;
1028 e = &remaining;
1029 while (*e)
1030 e = &(*e)->next;
1031 *e = sra->devs;
1032 sra->devs = remaining;
1033 return 1;
1034 }
1035
1036 /* Remove all 'remaining' devices from the array */
1037 while (remaining) {
1038 struct mdinfo *sd = remaining;
1039 remaining = sd->next;
1040
1041 sysfs_set_str(sra, sd, "state", "faulty");
1042 sysfs_set_str(sra, sd, "slot", "none");
1043 /* for external metadata disks should be removed in mdmon */
1044 if (!st->ss->external)
1045 sysfs_set_str(sra, sd, "state", "remove");
1046 sd->disk.state |= (1<<MD_DISK_REMOVED);
1047 sd->disk.state &= ~(1<<MD_DISK_SYNC);
1048 sd->next = sra->devs;
1049 sra->devs = sd;
1050 }
1051 return 0;
1052 }
1053
1054 void reshape_free_fdlist(int *fdlist,
1055 unsigned long long *offsets,
1056 int size)
1057 {
1058 int i;
1059
1060 for (i = 0; i < size; i++)
1061 if (fdlist[i] >= 0)
1062 close(fdlist[i]);
1063
1064 free(fdlist);
1065 free(offsets);
1066 }
1067
1068 int reshape_prepare_fdlist(char *devname,
1069 struct mdinfo *sra,
1070 int raid_disks,
1071 int nrdisks,
1072 unsigned long blocks,
1073 char *backup_file,
1074 int *fdlist,
1075 unsigned long long *offsets)
1076 {
1077 int d = 0;
1078 struct mdinfo *sd;
1079
1080 enable_fds(nrdisks);
1081 for (d = 0; d <= nrdisks; d++)
1082 fdlist[d] = -1;
1083 d = raid_disks;
1084 for (sd = sra->devs; sd; sd = sd->next) {
1085 if (sd->disk.state & (1<<MD_DISK_FAULTY))
1086 continue;
1087 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
1088 sd->disk.raid_disk < raid_disks) {
1089 char *dn = map_dev(sd->disk.major,
1090 sd->disk.minor, 1);
1091 fdlist[sd->disk.raid_disk]
1092 = dev_open(dn, O_RDONLY);
1093 offsets[sd->disk.raid_disk] = sd->data_offset*512;
1094 if (fdlist[sd->disk.raid_disk] < 0) {
1095 pr_err("%s: cannot open component %s\n",
1096 devname, dn ? dn : "-unknown-");
1097 d = -1;
1098 goto release;
1099 }
1100 } else if (backup_file == NULL) {
1101 /* spare */
1102 char *dn = map_dev(sd->disk.major,
1103 sd->disk.minor, 1);
1104 fdlist[d] = dev_open(dn, O_RDWR);
1105 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
1106 if (fdlist[d] < 0) {
1107 pr_err("%s: cannot open component %s\n",
1108 devname, dn ? dn : "-unknown-");
1109 d = -1;
1110 goto release;
1111 }
1112 d++;
1113 }
1114 }
1115 release:
1116 return d;
1117 }
1118
1119 int reshape_open_backup_file(char *backup_file,
1120 int fd,
1121 char *devname,
1122 long blocks,
1123 int *fdlist,
1124 unsigned long long *offsets,
1125 char *sys_name,
1126 int restart)
1127 {
1128 /* Return 1 on success, 0 on any form of failure */
1129 /* need to check backup file is large enough */
1130 char buf[512];
1131 struct stat stb;
1132 unsigned int dev;
1133 int i;
1134
1135 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
1136 S_IRUSR | S_IWUSR);
1137 *offsets = 8 * 512;
1138 if (*fdlist < 0) {
1139 pr_err("%s: cannot create backup file %s: %s\n",
1140 devname, backup_file, strerror(errno));
1141 return 0;
1142 }
1143 /* Guard against backup file being on array device.
1144 * If array is partitioned or if LVM etc is in the
1145 * way this will not notice, but it is better than
1146 * nothing.
1147 */
1148 fstat(*fdlist, &stb);
1149 dev = stb.st_dev;
1150 fstat(fd, &stb);
1151 if (stb.st_rdev == dev) {
1152 pr_err("backup file must NOT be on the array being reshaped.\n");
1153 close(*fdlist);
1154 return 0;
1155 }
1156
1157 memset(buf, 0, 512);
1158 for (i=0; i < blocks + 8 ; i++) {
1159 if (write(*fdlist, buf, 512) != 512) {
1160 pr_err("%s: cannot create backup file %s: %s\n",
1161 devname, backup_file, strerror(errno));
1162 return 0;
1163 }
1164 }
1165 if (fsync(*fdlist) != 0) {
1166 pr_err("%s: cannot create backup file %s: %s\n",
1167 devname, backup_file, strerror(errno));
1168 return 0;
1169 }
1170
1171 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
1172 char *bu = make_backup(sys_name);
1173 if (symlink(backup_file, bu))
1174 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
1175 strerror(errno));
1176 free(bu);
1177 }
1178
1179 return 1;
1180 }
1181
1182 unsigned long compute_backup_blocks(int nchunk, int ochunk,
1183 unsigned int ndata, unsigned int odata)
1184 {
1185 unsigned long a, b, blocks;
1186 /* So how much do we need to backup.
1187 * We need an amount of data which is both a whole number of
1188 * old stripes and a whole number of new stripes.
1189 * So LCM for (chunksize*datadisks).
1190 */
1191 a = (ochunk/512) * odata;
1192 b = (nchunk/512) * ndata;
1193 /* Find GCD */
1194 a = GCD(a, b);
1195 /* LCM == product / GCD */
1196 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
1197
1198 return blocks;
1199 }
1200
1201 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
1202 {
1203 /* Based on the current array state in info->array and
1204 * the changes in info->new_* etc, determine:
1205 * - whether the change is possible
1206 * - Intermediate level/raid_disks/layout
1207 * - whether a restriping reshape is needed
1208 * - number of sectors in minimum change unit. This
1209 * will cover a whole number of stripes in 'before' and
1210 * 'after'.
1211 *
1212 * Return message if the change should be rejected
1213 * NULL if the change can be achieved
1214 *
1215 * This can be called as part of starting a reshape, or
1216 * when assembling an array that is undergoing reshape.
1217 */
1218 int near, far, offset, copies;
1219 int new_disks;
1220 int old_chunk, new_chunk;
1221 /* delta_parity records change in number of devices
1222 * caused by level change
1223 */
1224 int delta_parity = 0;
1225
1226 memset(re, 0, sizeof(*re));
1227
1228 /* If a new level not explicitly given, we assume no-change */
1229 if (info->new_level == UnSet)
1230 info->new_level = info->array.level;
1231
1232 if (info->new_chunk)
1233 switch (info->new_level) {
1234 case 0:
1235 case 4:
1236 case 5:
1237 case 6:
1238 case 10:
1239 /* chunk size is meaningful, must divide component_size
1240 * evenly
1241 */
1242 if (info->component_size % (info->new_chunk/512)) {
1243 unsigned long long shrink = info->component_size;
1244 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1245 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1246 info->new_chunk/1024, info->component_size/2);
1247 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1248 devname, shrink/2);
1249 pr_err("will shrink the array so the given chunk size would work.\n");
1250 return "";
1251 }
1252 break;
1253 default:
1254 return "chunk size not meaningful for this level";
1255 }
1256 else
1257 info->new_chunk = info->array.chunk_size;
1258
1259 switch (info->array.level) {
1260 default:
1261 return "No reshape is possibly for this RAID level";
1262 case LEVEL_LINEAR:
1263 if (info->delta_disks != UnSet)
1264 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1265 else
1266 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1267 case 1:
1268 /* RAID1 can convert to RAID1 with different disks, or
1269 * raid5 with 2 disks, or
1270 * raid0 with 1 disk
1271 */
1272 if (info->new_level > 1 &&
1273 (info->component_size & 7))
1274 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1275 if (info->new_level == 0) {
1276 if (info->delta_disks != UnSet &&
1277 info->delta_disks != 0)
1278 return "Cannot change number of disks with RAID1->RAID0 conversion";
1279 re->level = 0;
1280 re->before.data_disks = 1;
1281 re->after.data_disks = 1;
1282 return NULL;
1283 }
1284 if (info->new_level == 1) {
1285 if (info->delta_disks == UnSet)
1286 /* Don't know what to do */
1287 return "no change requested for Growing RAID1";
1288 re->level = 1;
1289 return NULL;
1290 }
1291 if (info->array.raid_disks != 2 &&
1292 info->new_level == 5)
1293 return "Can only convert a 2-device array to RAID5";
1294 if (info->array.raid_disks == 2 &&
1295 info->new_level == 5) {
1296
1297 re->level = 5;
1298 re->before.data_disks = 1;
1299 if (info->delta_disks != UnSet &&
1300 info->delta_disks != 0)
1301 re->after.data_disks = 1 + info->delta_disks;
1302 else
1303 re->after.data_disks = 1;
1304 if (re->after.data_disks < 1)
1305 return "Number of disks too small for RAID5";
1306
1307 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1308 info->array.chunk_size = 65536;
1309 break;
1310 }
1311 /* Could do some multi-stage conversions, but leave that to
1312 * later.
1313 */
1314 return "Impossibly level change request for RAID1";
1315
1316 case 10:
1317 /* RAID10 can be converted from near mode to
1318 * RAID0 by removing some devices.
1319 * It can also be reshaped if the kernel supports
1320 * new_data_offset.
1321 */
1322 switch (info->new_level) {
1323 case 0:
1324 if ((info->array.layout & ~0xff) != 0x100)
1325 return "Cannot Grow RAID10 with far/offset layout";
1326 /* number of devices must be multiple of number of copies */
1327 if (info->array.raid_disks % (info->array.layout & 0xff))
1328 return "RAID10 layout too complex for Grow operation";
1329
1330 new_disks = (info->array.raid_disks
1331 / (info->array.layout & 0xff));
1332 if (info->delta_disks == UnSet)
1333 info->delta_disks = (new_disks
1334 - info->array.raid_disks);
1335
1336 if (info->delta_disks != new_disks - info->array.raid_disks)
1337 return "New number of raid-devices impossible for RAID10";
1338 if (info->new_chunk &&
1339 info->new_chunk != info->array.chunk_size)
1340 return "Cannot change chunk-size with RAID10 Grow";
1341
1342 /* looks good */
1343 re->level = 0;
1344 re->before.data_disks = new_disks;
1345 re->after.data_disks = re->before.data_disks;
1346 return NULL;
1347
1348 case 10:
1349 near = info->array.layout & 0xff;
1350 far = (info->array.layout >> 8) & 0xff;
1351 offset = info->array.layout & 0x10000;
1352 if (far > 1 && !offset)
1353 return "Cannot reshape RAID10 in far-mode";
1354 copies = near * far;
1355
1356 old_chunk = info->array.chunk_size * far;
1357
1358 if (info->new_layout == UnSet)
1359 info->new_layout = info->array.layout;
1360 else {
1361 near = info->new_layout & 0xff;
1362 far = (info->new_layout >> 8) & 0xff;
1363 offset = info->new_layout & 0x10000;
1364 if (far > 1 && !offset)
1365 return "Cannot reshape RAID10 to far-mode";
1366 if (near * far != copies)
1367 return "Cannot change number of copies when reshaping RAID10";
1368 }
1369 if (info->delta_disks == UnSet)
1370 info->delta_disks = 0;
1371 new_disks = (info->array.raid_disks +
1372 info->delta_disks);
1373
1374 new_chunk = info->new_chunk * far;
1375
1376 re->level = 10;
1377 re->before.layout = info->array.layout;
1378 re->before.data_disks = info->array.raid_disks;
1379 re->after.layout = info->new_layout;
1380 re->after.data_disks = new_disks;
1381 /* For RAID10 we don't do backup but do allow reshape,
1382 * so set backup_blocks to INVALID_SECTORS rather than
1383 * zero.
1384 * And there is no need to synchronise stripes on both
1385 * 'old' and 'new'. So the important
1386 * number is the minimum data_offset difference
1387 * which is the larger of (offset copies * chunk).
1388 */
1389 re->backup_blocks = INVALID_SECTORS;
1390 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1391 if (new_disks < re->before.data_disks &&
1392 info->space_after < re->min_offset_change)
1393 /* Reduce component size by one chunk */
1394 re->new_size = (info->component_size -
1395 re->min_offset_change);
1396 else
1397 re->new_size = info->component_size;
1398 re->new_size = re->new_size * new_disks / copies;
1399 return NULL;
1400
1401 default:
1402 return "RAID10 can only be changed to RAID0";
1403 }
1404 case 0:
1405 /* RAID0 can be converted to RAID10, or to RAID456 */
1406 if (info->new_level == 10) {
1407 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1408 /* Assume near=2 layout */
1409 info->new_layout = 0x102;
1410 info->delta_disks = info->array.raid_disks;
1411 }
1412 if (info->new_layout == UnSet) {
1413 int copies = 1 + (info->delta_disks
1414 / info->array.raid_disks);
1415 if (info->array.raid_disks * (copies-1)
1416 != info->delta_disks)
1417 return "Impossible number of devices for RAID0->RAID10";
1418 info->new_layout = 0x100 + copies;
1419 }
1420 if (info->delta_disks == UnSet) {
1421 int copies = info->new_layout & 0xff;
1422 if (info->new_layout != 0x100 + copies)
1423 return "New layout impossible for RAID0->RAID10";;
1424 info->delta_disks = (copies - 1) *
1425 info->array.raid_disks;
1426 }
1427 if (info->new_chunk &&
1428 info->new_chunk != info->array.chunk_size)
1429 return "Cannot change chunk-size with RAID0->RAID10";
1430 /* looks good */
1431 re->level = 10;
1432 re->before.data_disks = (info->array.raid_disks +
1433 info->delta_disks);
1434 re->after.data_disks = re->before.data_disks;
1435 re->before.layout = info->new_layout;
1436 return NULL;
1437 }
1438
1439 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1440 * a raid4 style layout of the final level.
1441 */
1442 switch (info->new_level) {
1443 case 4:
1444 delta_parity = 1;
1445 case 0:
1446 re->level = 4;
1447 re->before.layout = 0;
1448 break;
1449 case 5:
1450 delta_parity = 1;
1451 re->level = 5;
1452 re->before.layout = ALGORITHM_PARITY_N;
1453 if (info->new_layout == UnSet)
1454 info->new_layout = map_name(r5layout, "default");
1455 break;
1456 case 6:
1457 delta_parity = 2;
1458 re->level = 6;
1459 re->before.layout = ALGORITHM_PARITY_N;
1460 if (info->new_layout == UnSet)
1461 info->new_layout = map_name(r6layout, "default");
1462 break;
1463 default:
1464 return "Impossible level change requested";
1465 }
1466 re->before.data_disks = info->array.raid_disks;
1467 /* determining 'after' layout happens outside this 'switch' */
1468 break;
1469
1470 case 4:
1471 info->array.layout = ALGORITHM_PARITY_N;
1472 case 5:
1473 switch (info->new_level) {
1474 case 0:
1475 delta_parity = -1;
1476 case 4:
1477 re->level = info->array.level;
1478 re->before.data_disks = info->array.raid_disks - 1;
1479 re->before.layout = info->array.layout;
1480 break;
1481 case 5:
1482 re->level = 5;
1483 re->before.data_disks = info->array.raid_disks - 1;
1484 re->before.layout = info->array.layout;
1485 break;
1486 case 6:
1487 delta_parity = 1;
1488 re->level = 6;
1489 re->before.data_disks = info->array.raid_disks - 1;
1490 switch (info->array.layout) {
1491 case ALGORITHM_LEFT_ASYMMETRIC:
1492 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1493 break;
1494 case ALGORITHM_RIGHT_ASYMMETRIC:
1495 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1496 break;
1497 case ALGORITHM_LEFT_SYMMETRIC:
1498 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1499 break;
1500 case ALGORITHM_RIGHT_SYMMETRIC:
1501 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1502 break;
1503 case ALGORITHM_PARITY_0:
1504 re->before.layout = ALGORITHM_PARITY_0_6;
1505 break;
1506 case ALGORITHM_PARITY_N:
1507 re->before.layout = ALGORITHM_PARITY_N_6;
1508 break;
1509 default:
1510 return "Cannot convert an array with this layout";
1511 }
1512 break;
1513 case 1:
1514 if (info->array.raid_disks != 2)
1515 return "Can only convert a 2-device array to RAID1";
1516 if (info->delta_disks != UnSet &&
1517 info->delta_disks != 0)
1518 return "Cannot set raid_disk when converting RAID5->RAID1";
1519 re->level = 1;
1520 info->new_chunk = 0;
1521 return NULL;
1522 default:
1523 return "Impossible level change requested";
1524 }
1525 break;
1526 case 6:
1527 switch (info->new_level) {
1528 case 4:
1529 case 5:
1530 delta_parity = -1;
1531 case 6:
1532 re->level = 6;
1533 re->before.data_disks = info->array.raid_disks - 2;
1534 re->before.layout = info->array.layout;
1535 break;
1536 default:
1537 return "Impossible level change requested";
1538 }
1539 break;
1540 }
1541
1542 /* If we reached here then it looks like a re-stripe is
1543 * happening. We have determined the intermediate level
1544 * and initial raid_disks/layout and stored these in 're'.
1545 *
1546 * We need to deduce the final layout that can be atomically
1547 * converted to the end state.
1548 */
1549 switch (info->new_level) {
1550 case 0:
1551 /* We can only get to RAID0 from RAID4 or RAID5
1552 * with appropriate layout and one extra device
1553 */
1554 if (re->level != 4 && re->level != 5)
1555 return "Cannot covert to RAID0 from this level";
1556
1557 switch (re->level) {
1558 case 4:
1559 re->before.layout = 0;
1560 re->after.layout = 0;
1561 break;
1562 case 5:
1563 re->after.layout = ALGORITHM_PARITY_N;
1564 break;
1565 }
1566 break;
1567
1568 case 4:
1569 /* We can only get to RAID4 from RAID5 */
1570 if (re->level != 4 && re->level != 5)
1571 return "Cannot convert to RAID4 from this level";
1572
1573 switch (re->level) {
1574 case 4:
1575 re->after.layout = 0;
1576 break;
1577 case 5:
1578 re->after.layout = ALGORITHM_PARITY_N;
1579 break;
1580 }
1581 break;
1582
1583 case 5:
1584 /* We get to RAID5 from RAID5 or RAID6 */
1585 if (re->level != 5 && re->level != 6)
1586 return "Cannot convert to RAID5 from this level";
1587
1588 switch (re->level) {
1589 case 5:
1590 if (info->new_layout == UnSet)
1591 re->after.layout = re->before.layout;
1592 else
1593 re->after.layout = info->new_layout;
1594 break;
1595 case 6:
1596 if (info->new_layout == UnSet)
1597 info->new_layout = re->before.layout;
1598
1599 /* after.layout needs to be raid6 version of new_layout */
1600 if (info->new_layout == ALGORITHM_PARITY_N)
1601 re->after.layout = ALGORITHM_PARITY_N;
1602 else {
1603 char layout[40];
1604 char *ls = map_num(r5layout, info->new_layout);
1605 int l;
1606 if (ls) {
1607 /* Current RAID6 layout has a RAID5
1608 * equivalent - good
1609 */
1610 strcat(strcpy(layout, ls), "-6");
1611 l = map_name(r6layout, layout);
1612 if (l == UnSet)
1613 return "Cannot find RAID6 layout to convert to";
1614 } else {
1615 /* Current RAID6 has no equivalent.
1616 * If it is already a '-6' layout we
1617 * can leave it unchanged, else we must
1618 * fail
1619 */
1620 ls = map_num(r6layout, info->new_layout);
1621 if (!ls ||
1622 strcmp(ls+strlen(ls)-2, "-6") != 0)
1623 return "Please specify new layout";
1624 l = info->new_layout;
1625 }
1626 re->after.layout = l;
1627 }
1628 }
1629 break;
1630
1631 case 6:
1632 /* We must already be at level 6 */
1633 if (re->level != 6)
1634 return "Impossible level change";
1635 if (info->new_layout == UnSet)
1636 re->after.layout = info->array.layout;
1637 else
1638 re->after.layout = info->new_layout;
1639 break;
1640 default:
1641 return "Impossible level change requested";
1642 }
1643 if (info->delta_disks == UnSet)
1644 info->delta_disks = delta_parity;
1645
1646 re->after.data_disks = (re->before.data_disks
1647 + info->delta_disks
1648 - delta_parity);
1649 switch (re->level) {
1650 case 6: re->parity = 2;
1651 break;
1652 case 4:
1653 case 5: re->parity = 1;
1654 break;
1655 default: re->parity = 0;
1656 break;
1657 }
1658 /* So we have a restripe operation, we need to calculate the number
1659 * of blocks per reshape operation.
1660 */
1661 re->new_size = info->component_size * re->before.data_disks;
1662 if (info->new_chunk == 0)
1663 info->new_chunk = info->array.chunk_size;
1664 if (re->after.data_disks == re->before.data_disks &&
1665 re->after.layout == re->before.layout &&
1666 info->new_chunk == info->array.chunk_size) {
1667 /* Nothing to change, can change level immediately. */
1668 re->level = info->new_level;
1669 re->backup_blocks = 0;
1670 return NULL;
1671 }
1672 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1673 /* chunk and layout changes make no difference */
1674 re->level = info->new_level;
1675 re->backup_blocks = 0;
1676 return NULL;
1677 }
1678
1679 if (re->after.data_disks == re->before.data_disks &&
1680 get_linux_version() < 2006032)
1681 return "in-place reshape is not safe before 2.6.32 - sorry.";
1682
1683 if (re->after.data_disks < re->before.data_disks &&
1684 get_linux_version() < 2006030)
1685 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1686
1687 re->backup_blocks = compute_backup_blocks(
1688 info->new_chunk, info->array.chunk_size,
1689 re->after.data_disks,
1690 re->before.data_disks);
1691 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1692
1693 re->new_size = info->component_size * re->after.data_disks;
1694 return NULL;
1695 }
1696
1697 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1698 char *text_version)
1699 {
1700 struct mdinfo *info;
1701 char *subarray;
1702 int ret_val = -1;
1703
1704 if ((st == NULL) || (sra == NULL))
1705 return ret_val;
1706
1707 if (text_version == NULL)
1708 text_version = sra->text_version;
1709 subarray = strchr(text_version+1, '/')+1;
1710 info = st->ss->container_content(st, subarray);
1711 if (info) {
1712 unsigned long long current_size = 0;
1713 unsigned long long new_size =
1714 info->custom_array_size/2;
1715
1716 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1717 new_size > current_size) {
1718 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1719 < 0)
1720 dprintf("Error: Cannot set array size");
1721 else {
1722 ret_val = 0;
1723 dprintf("Array size changed");
1724 }
1725 dprintf_cont(" from %llu to %llu.\n",
1726 current_size, new_size);
1727 }
1728 sysfs_free(info);
1729 } else
1730 dprintf("Error: set_array_size(): info pointer in NULL\n");
1731
1732 return ret_val;
1733 }
1734
1735 static int reshape_array(char *container, int fd, char *devname,
1736 struct supertype *st, struct mdinfo *info,
1737 int force, struct mddev_dev *devlist,
1738 unsigned long long data_offset,
1739 char *backup_file, int verbose, int forked,
1740 int restart, int freeze_reshape);
1741 static int reshape_container(char *container, char *devname,
1742 int mdfd,
1743 struct supertype *st,
1744 struct mdinfo *info,
1745 int force,
1746 char *backup_file, int verbose,
1747 int forked, int restart, int freeze_reshape);
1748
1749 int Grow_reshape(char *devname, int fd,
1750 struct mddev_dev *devlist,
1751 unsigned long long data_offset,
1752 struct context *c, struct shape *s)
1753 {
1754 /* Make some changes in the shape of an array.
1755 * The kernel must support the change.
1756 *
1757 * There are three different changes. Each can trigger
1758 * a resync or recovery so we freeze that until we have
1759 * requested everything (if kernel supports freezing - 2.6.30).
1760 * The steps are:
1761 * - change size (i.e. component_size)
1762 * - change level
1763 * - change layout/chunksize/ndisks
1764 *
1765 * The last can require a reshape. It is different on different
1766 * levels so we need to check the level before actioning it.
1767 * Some times the level change needs to be requested after the
1768 * reshape (e.g. raid6->raid5, raid5->raid0)
1769 *
1770 */
1771 struct mdu_array_info_s array;
1772 int rv = 0;
1773 struct supertype *st;
1774 char *subarray = NULL;
1775
1776 int frozen;
1777 int changed = 0;
1778 char *container = NULL;
1779 int cfd = -1;
1780
1781 struct mddev_dev *dv;
1782 int added_disks;
1783
1784 struct mdinfo info;
1785 struct mdinfo *sra;
1786
1787 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1788 pr_err("%s is not an active md array - aborting\n",
1789 devname);
1790 return 1;
1791 }
1792 if (data_offset != INVALID_SECTORS && array.level != 10
1793 && (array.level < 4 || array.level > 6)) {
1794 pr_err("--grow --data-offset not yet supported\n");
1795 return 1;
1796 }
1797
1798 if (s->size > 0 &&
1799 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1800 pr_err("cannot change component size at the same time as other changes.\n"
1801 " Change size first, then check data is intact before making other changes.\n");
1802 return 1;
1803 }
1804
1805 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1806 get_linux_version() < 2006032 &&
1807 !check_env("MDADM_FORCE_FEWER")) {
1808 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1809 " Please use a newer kernel\n");
1810 return 1;
1811 }
1812
1813 st = super_by_fd(fd, &subarray);
1814 if (!st) {
1815 pr_err("Unable to determine metadata format for %s\n", devname);
1816 return 1;
1817 }
1818 if (s->raiddisks > st->max_devs) {
1819 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1820 return 1;
1821 }
1822 if (s->level == 0 &&
1823 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1824 !(array.state & (1<<MD_SB_CLUSTERED))) {
1825 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1826 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
1827 pr_err("failed to remove internal bitmap.\n");
1828 return 1;
1829 }
1830 }
1831
1832 /* in the external case we need to check that the requested reshape is
1833 * supported, and perform an initial check that the container holds the
1834 * pre-requisite spare devices (mdmon owns final validation)
1835 */
1836 if (st->ss->external) {
1837 int rv;
1838
1839 if (subarray) {
1840 container = st->container_devnm;
1841 cfd = open_dev_excl(st->container_devnm);
1842 } else {
1843 container = st->devnm;
1844 close(fd);
1845 cfd = open_dev_excl(st->devnm);
1846 fd = cfd;
1847 }
1848 if (cfd < 0) {
1849 pr_err("Unable to open container for %s\n",
1850 devname);
1851 free(subarray);
1852 return 1;
1853 }
1854
1855 rv = st->ss->load_container(st, cfd, NULL);
1856
1857 if (rv) {
1858 pr_err("Cannot read superblock for %s\n",
1859 devname);
1860 free(subarray);
1861 return 1;
1862 }
1863
1864 /* check if operation is supported for metadata handler */
1865 if (st->ss->container_content) {
1866 struct mdinfo *cc = NULL;
1867 struct mdinfo *content = NULL;
1868
1869 cc = st->ss->container_content(st, subarray);
1870 for (content = cc; content ; content = content->next) {
1871 int allow_reshape = 1;
1872
1873 /* check if reshape is allowed based on metadata
1874 * indications stored in content.array.status
1875 */
1876 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1877 allow_reshape = 0;
1878 if (content->array.state
1879 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1880 allow_reshape = 0;
1881 if (!allow_reshape) {
1882 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1883 devname, container);
1884 sysfs_free(cc);
1885 free(subarray);
1886 return 1;
1887 }
1888 }
1889 sysfs_free(cc);
1890 }
1891 if (mdmon_running(container))
1892 st->update_tail = &st->updates;
1893 }
1894
1895 added_disks = 0;
1896 for (dv = devlist; dv; dv = dv->next)
1897 added_disks++;
1898 if (s->raiddisks > array.raid_disks &&
1899 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1900 !c->force) {
1901 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1902 " Use --force to over-ride this check.\n",
1903 s->raiddisks - array.raid_disks,
1904 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1905 array.spare_disks + added_disks);
1906 return 1;
1907 }
1908
1909 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1910 | GET_STATE | GET_VERSION);
1911 if (sra) {
1912 if (st->ss->external && subarray == NULL) {
1913 array.level = LEVEL_CONTAINER;
1914 sra->array.level = LEVEL_CONTAINER;
1915 }
1916 } else {
1917 pr_err("failed to read sysfs parameters for %s\n",
1918 devname);
1919 return 1;
1920 }
1921 frozen = freeze(st);
1922 if (frozen < -1) {
1923 /* freeze() already spewed the reason */
1924 sysfs_free(sra);
1925 return 1;
1926 } else if (frozen < 0) {
1927 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1928 sysfs_free(sra);
1929 return 1;
1930 }
1931
1932 /* ========= set size =============== */
1933 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1934 unsigned long long orig_size = get_component_size(fd)/2;
1935 unsigned long long min_csize;
1936 struct mdinfo *mdi;
1937 int raid0_takeover = 0;
1938
1939 if (orig_size == 0)
1940 orig_size = (unsigned) array.size;
1941
1942 if (orig_size == 0) {
1943 pr_err("Cannot set device size in this type of array.\n");
1944 rv = 1;
1945 goto release;
1946 }
1947
1948 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1949 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1950 rv = 1;
1951 goto release;
1952 }
1953 sync_metadata(st);
1954 if (st->ss->external) {
1955 /* metadata can have size limitation
1956 * update size value according to metadata information
1957 */
1958 struct mdinfo *sizeinfo =
1959 st->ss->container_content(st, subarray);
1960 if (sizeinfo) {
1961 unsigned long long new_size =
1962 sizeinfo->custom_array_size/2;
1963 int data_disks = get_data_disks(
1964 sizeinfo->array.level,
1965 sizeinfo->array.layout,
1966 sizeinfo->array.raid_disks);
1967 new_size /= data_disks;
1968 dprintf("Metadata size correction from %llu to %llu (%llu)\n", orig_size, new_size,
1969 new_size * data_disks);
1970 s->size = new_size;
1971 sysfs_free(sizeinfo);
1972 }
1973 }
1974
1975 /* Update the size of each member device in case
1976 * they have been resized. This will never reduce
1977 * below the current used-size. The "size" attribute
1978 * understands '0' to mean 'max'.
1979 */
1980 min_csize = 0;
1981 rv = 0;
1982 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1983 if (sysfs_set_num(sra, mdi, "size",
1984 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1985 /* Probably kernel refusing to let us
1986 * reduce the size - not an error.
1987 */
1988 break;
1989 }
1990 if (array.not_persistent == 0 &&
1991 array.major_version == 0 &&
1992 get_linux_version() < 3001000) {
1993 /* Dangerous to allow size to exceed 2TB */
1994 unsigned long long csize;
1995 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1996 if (csize >= 2ULL*1024*1024*1024)
1997 csize = 2ULL*1024*1024*1024;
1998 if ((min_csize == 0 || (min_csize
1999 > csize)))
2000 min_csize = csize;
2001 }
2002 }
2003 }
2004 if (rv) {
2005 pr_err("Cannot set size on array members.\n");
2006 goto size_change_error;
2007 }
2008 if (min_csize && s->size > min_csize) {
2009 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
2010 rv = 1;
2011 goto size_change_error;
2012 }
2013 if (min_csize && s->size == MAX_SIZE) {
2014 /* Don't let the kernel choose a size - it will get
2015 * it wrong
2016 */
2017 pr_err("Limited v0.90 array to 2TB per device\n");
2018 s->size = min_csize;
2019 }
2020 if (st->ss->external) {
2021 if (sra->array.level == 0) {
2022 rv = sysfs_set_str(sra, NULL, "level",
2023 "raid5");
2024 if (!rv) {
2025 raid0_takeover = 1;
2026 /* get array parameters after takeover
2027 * to change one parameter at time only
2028 */
2029 rv = ioctl(fd, GET_ARRAY_INFO, &array);
2030 }
2031 }
2032 /* make sure mdmon is
2033 * aware of the new level */
2034 if (!mdmon_running(st->container_devnm))
2035 start_mdmon(st->container_devnm);
2036 ping_monitor(container);
2037 if (mdmon_running(st->container_devnm) &&
2038 st->update_tail == NULL)
2039 st->update_tail = &st->updates;
2040 }
2041
2042 if (s->size == MAX_SIZE)
2043 s->size = 0;
2044 array.size = s->size;
2045 if (s->size & ~INT32_MAX) {
2046 /* got truncated to 32bit, write to
2047 * component_size instead
2048 */
2049 if (sra)
2050 rv = sysfs_set_num(sra, NULL,
2051 "component_size", s->size);
2052 else
2053 rv = -1;
2054 } else {
2055 rv = ioctl(fd, SET_ARRAY_INFO, &array);
2056
2057 /* manage array size when it is managed externally
2058 */
2059 if ((rv == 0) && st->ss->external)
2060 rv = set_array_size(st, sra, sra->text_version);
2061 }
2062
2063 if (raid0_takeover) {
2064 /* do not recync non-existing parity,
2065 * we will drop it anyway
2066 */
2067 sysfs_set_str(sra, NULL, "sync_action", "frozen");
2068 /* go back to raid0, drop parity disk
2069 */
2070 sysfs_set_str(sra, NULL, "level", "raid0");
2071 ioctl(fd, GET_ARRAY_INFO, &array);
2072 }
2073
2074 size_change_error:
2075 if (rv != 0) {
2076 int err = errno;
2077
2078 /* restore metadata */
2079 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
2080 UnSet, NULL, devname,
2081 ROLLBACK_METADATA_CHANGES,
2082 c->verbose) == 0)
2083 sync_metadata(st);
2084 pr_err("Cannot set device size for %s: %s\n",
2085 devname, strerror(err));
2086 if (err == EBUSY &&
2087 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2088 cont_err("Bitmap must be removed before size can be changed\n");
2089 rv = 1;
2090 goto release;
2091 }
2092 if (s->assume_clean) {
2093 /* This will fail on kernels older than 3.0 unless
2094 * a backport has been arranged.
2095 */
2096 if (sra == NULL ||
2097 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
2098 pr_err("--assume-clean not supported with --grow on this kernel\n");
2099 }
2100 ioctl(fd, GET_ARRAY_INFO, &array);
2101 s->size = get_component_size(fd)/2;
2102 if (s->size == 0)
2103 s->size = array.size;
2104 if (c->verbose >= 0) {
2105 if (s->size == orig_size)
2106 pr_err("component size of %s unchanged at %lluK\n",
2107 devname, s->size);
2108 else
2109 pr_err("component size of %s has been set to %lluK\n",
2110 devname, s->size);
2111 }
2112 changed = 1;
2113 } else if (array.level != LEVEL_CONTAINER) {
2114 s->size = get_component_size(fd)/2;
2115 if (s->size == 0)
2116 s->size = array.size;
2117 }
2118
2119 /* See if there is anything else to do */
2120 if ((s->level == UnSet || s->level == array.level) &&
2121 (s->layout_str == NULL) &&
2122 (s->chunk == 0 || s->chunk == array.chunk_size) &&
2123 data_offset == INVALID_SECTORS &&
2124 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
2125 /* Nothing more to do */
2126 if (!changed && c->verbose >= 0)
2127 pr_err("%s: no change requested\n",
2128 devname);
2129 goto release;
2130 }
2131
2132 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
2133 * current implementation assumes that following conditions must be met:
2134 * - RAID10:
2135 * - far_copies == 1
2136 * - near_copies == 2
2137 */
2138 if ((s->level == 0 && array.level == 10 && sra &&
2139 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
2140 (s->level == 0 && array.level == 1 && sra)) {
2141 int err;
2142 err = remove_disks_for_takeover(st, sra, array.layout);
2143 if (err) {
2144 dprintf("Array cannot be reshaped\n");
2145 if (cfd > -1)
2146 close(cfd);
2147 rv = 1;
2148 goto release;
2149 }
2150 /* Make sure mdmon has seen the device removal
2151 * and updated metadata before we continue with
2152 * level change
2153 */
2154 if (container)
2155 ping_monitor(container);
2156 }
2157
2158 memset(&info, 0, sizeof(info));
2159 info.array = array;
2160 sysfs_init(&info, fd, NULL);
2161 strcpy(info.text_version, sra->text_version);
2162 info.component_size = s->size*2;
2163 info.new_level = s->level;
2164 info.new_chunk = s->chunk * 1024;
2165 if (info.array.level == LEVEL_CONTAINER) {
2166 info.delta_disks = UnSet;
2167 info.array.raid_disks = s->raiddisks;
2168 } else if (s->raiddisks)
2169 info.delta_disks = s->raiddisks - info.array.raid_disks;
2170 else
2171 info.delta_disks = UnSet;
2172 if (s->layout_str == NULL) {
2173 info.new_layout = UnSet;
2174 if (info.array.level == 6 &&
2175 (info.new_level == 6 || info.new_level == UnSet) &&
2176 info.array.layout >= 16) {
2177 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
2178 cont_err("during the reshape, please specify --layout=preserve\n");
2179 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
2180 rv = 1;
2181 goto release;
2182 }
2183 } else if (strcmp(s->layout_str, "normalise") == 0 ||
2184 strcmp(s->layout_str, "normalize") == 0) {
2185 /* If we have a -6 RAID6 layout, remove the '-6'. */
2186 info.new_layout = UnSet;
2187 if (info.array.level == 6 && info.new_level == UnSet) {
2188 char l[40], *h;
2189 strcpy(l, map_num(r6layout, info.array.layout));
2190 h = strrchr(l, '-');
2191 if (h && strcmp(h, "-6") == 0) {
2192 *h = 0;
2193 info.new_layout = map_name(r6layout, l);
2194 }
2195 } else {
2196 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
2197 rv = 1;
2198 goto release;
2199 }
2200 } else if (strcmp(s->layout_str, "preserve") == 0) {
2201 /* This means that a non-standard RAID6 layout
2202 * is OK.
2203 * In particular:
2204 * - When reshape a RAID6 (e.g. adding a device)
2205 * which is in a non-standard layout, it is OK
2206 * to preserve that layout.
2207 * - When converting a RAID5 to RAID6, leave it in
2208 * the XXX-6 layout, don't re-layout.
2209 */
2210 if (info.array.level == 6 && info.new_level == UnSet)
2211 info.new_layout = info.array.layout;
2212 else if (info.array.level == 5 && info.new_level == 6) {
2213 char l[40];
2214 strcpy(l, map_num(r5layout, info.array.layout));
2215 strcat(l, "-6");
2216 info.new_layout = map_name(r6layout, l);
2217 } else {
2218 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2219 rv = 1;
2220 goto release;
2221 }
2222 } else {
2223 int l = info.new_level;
2224 if (l == UnSet)
2225 l = info.array.level;
2226 switch (l) {
2227 case 5:
2228 info.new_layout = map_name(r5layout, s->layout_str);
2229 break;
2230 case 6:
2231 info.new_layout = map_name(r6layout, s->layout_str);
2232 break;
2233 case 10:
2234 info.new_layout = parse_layout_10(s->layout_str);
2235 break;
2236 case LEVEL_FAULTY:
2237 info.new_layout = parse_layout_faulty(s->layout_str);
2238 break;
2239 default:
2240 pr_err("layout not meaningful with this level\n");
2241 rv = 1;
2242 goto release;
2243 }
2244 if (info.new_layout == UnSet) {
2245 pr_err("layout %s not understood for this level\n",
2246 s->layout_str);
2247 rv = 1;
2248 goto release;
2249 }
2250 }
2251
2252 if (array.level == LEVEL_FAULTY) {
2253 if (s->level != UnSet && s->level != array.level) {
2254 pr_err("cannot change level of Faulty device\n");
2255 rv =1 ;
2256 }
2257 if (s->chunk) {
2258 pr_err("cannot set chunksize of Faulty device\n");
2259 rv =1 ;
2260 }
2261 if (s->raiddisks && s->raiddisks != 1) {
2262 pr_err("cannot set raid_disks of Faulty device\n");
2263 rv =1 ;
2264 }
2265 if (s->layout_str) {
2266 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2267 dprintf("Cannot get array information.\n");
2268 goto release;
2269 }
2270 array.layout = info.new_layout;
2271 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2272 pr_err("failed to set new layout\n");
2273 rv = 1;
2274 } else if (c->verbose >= 0)
2275 printf("layout for %s set to %d\n",
2276 devname, array.layout);
2277 }
2278 } else if (array.level == LEVEL_CONTAINER) {
2279 /* This change is to be applied to every array in the
2280 * container. This is only needed when the metadata imposes
2281 * restraints of the various arrays in the container.
2282 * Currently we only know that IMSM requires all arrays
2283 * to have the same number of devices so changing the
2284 * number of devices (On-Line Capacity Expansion) must be
2285 * performed at the level of the container
2286 */
2287 if (fd > 0) {
2288 close(fd);
2289 fd = -1;
2290 }
2291 rv = reshape_container(container, devname, -1, st, &info,
2292 c->force, c->backup_file, c->verbose, 0, 0, 0);
2293 frozen = 0;
2294 } else {
2295 /* get spare devices from external metadata
2296 */
2297 if (st->ss->external) {
2298 struct mdinfo *info2;
2299
2300 info2 = st->ss->container_content(st, subarray);
2301 if (info2) {
2302 info.array.spare_disks =
2303 info2->array.spare_disks;
2304 sysfs_free(info2);
2305 }
2306 }
2307
2308 /* Impose these changes on a single array. First
2309 * check that the metadata is OK with the change. */
2310
2311 if (reshape_super(st, 0, info.new_level,
2312 info.new_layout, info.new_chunk,
2313 info.array.raid_disks, info.delta_disks,
2314 c->backup_file, devname, APPLY_METADATA_CHANGES,
2315 c->verbose)) {
2316 rv = 1;
2317 goto release;
2318 }
2319 sync_metadata(st);
2320 rv = reshape_array(container, fd, devname, st, &info, c->force,
2321 devlist, data_offset, c->backup_file, c->verbose,
2322 0, 0, 0);
2323 frozen = 0;
2324 }
2325 release:
2326 sysfs_free(sra);
2327 if (frozen > 0)
2328 unfreeze(st);
2329 return rv;
2330 }
2331
2332 /* verify_reshape_position()
2333 * Function checks if reshape position in metadata is not farther
2334 * than position in md.
2335 * Return value:
2336 * 0 : not valid sysfs entry
2337 * it can be caused by not started reshape, it should be started
2338 * by reshape array or raid0 array is before takeover
2339 * -1 : error, reshape position is obviously wrong
2340 * 1 : success, reshape progress correct or updated
2341 */
2342 static int verify_reshape_position(struct mdinfo *info, int level)
2343 {
2344 int ret_val = 0;
2345 char buf[40];
2346 int rv;
2347
2348 /* read sync_max, failure can mean raid0 array */
2349 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2350
2351 if (rv > 0) {
2352 char *ep;
2353 unsigned long long position = strtoull(buf, &ep, 0);
2354
2355 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2356 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2357 position *= get_data_disks(level,
2358 info->new_layout,
2359 info->array.raid_disks);
2360 if (info->reshape_progress < position) {
2361 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2362 info->reshape_progress, position);
2363 info->reshape_progress = position;
2364 ret_val = 1;
2365 } else if (info->reshape_progress > position) {
2366 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2367 position, info->reshape_progress);
2368 ret_val = -1;
2369 } else {
2370 dprintf("Reshape position in md and metadata are the same;");
2371 ret_val = 1;
2372 }
2373 }
2374 } else if (rv == 0) {
2375 /* for valid sysfs entry, 0-length content
2376 * should be indicated as error
2377 */
2378 ret_val = -1;
2379 }
2380
2381 return ret_val;
2382 }
2383
2384 static unsigned long long choose_offset(unsigned long long lo,
2385 unsigned long long hi,
2386 unsigned long long min,
2387 unsigned long long max)
2388 {
2389 /* Choose a new offset between hi and lo.
2390 * It must be between min and max, but
2391 * we would prefer something near the middle of hi/lo, and also
2392 * prefer to be aligned to a big power of 2.
2393 *
2394 * So we start with the middle, then for each bit,
2395 * starting at '1' and increasing, if it is set, we either
2396 * add it or subtract it if possible, preferring the option
2397 * which is furthest from the boundary.
2398 *
2399 * We stop once we get a 1MB alignment. As units are in sectors,
2400 * 1MB = 2*1024 sectors.
2401 */
2402 unsigned long long choice = (lo + hi) / 2;
2403 unsigned long long bit = 1;
2404
2405 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2406 unsigned long long bigger, smaller;
2407 if (! (bit & choice))
2408 continue;
2409 bigger = choice + bit;
2410 smaller = choice - bit;
2411 if (bigger > max && smaller < min)
2412 break;
2413 if (bigger > max)
2414 choice = smaller;
2415 else if (smaller < min)
2416 choice = bigger;
2417 else if (hi - bigger > smaller - lo)
2418 choice = bigger;
2419 else
2420 choice = smaller;
2421 }
2422 return choice;
2423 }
2424
2425 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2426 char *devname, int delta_disks,
2427 unsigned long long data_offset,
2428 unsigned long long min,
2429 int can_fallback)
2430 {
2431 struct mdinfo *sd;
2432 int dir = 0;
2433 int err = 0;
2434 unsigned long long before, after;
2435
2436 /* Need to find min space before and after so same is used
2437 * on all devices
2438 */
2439 before = UINT64_MAX;
2440 after = UINT64_MAX;
2441 for (sd = sra->devs; sd; sd = sd->next) {
2442 char *dn;
2443 int dfd;
2444 int rv;
2445 struct supertype *st2;
2446 struct mdinfo info2;
2447
2448 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2449 continue;
2450 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2451 dfd = dev_open(dn, O_RDONLY);
2452 if (dfd < 0) {
2453 pr_err("%s: cannot open component %s\n",
2454 devname, dn ? dn : "-unknown-");
2455 goto release;
2456 }
2457 st2 = dup_super(st);
2458 rv = st2->ss->load_super(st2,dfd, NULL);
2459 close(dfd);
2460 if (rv) {
2461 free(st2);
2462 pr_err("%s: cannot get superblock from %s\n",
2463 devname, dn);
2464 goto release;
2465 }
2466 st2->ss->getinfo_super(st2, &info2, NULL);
2467 st2->ss->free_super(st2);
2468 free(st2);
2469 if (info2.space_before == 0 &&
2470 info2.space_after == 0) {
2471 /* Metadata doesn't support data_offset changes */
2472 if (!can_fallback)
2473 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2474 devname);
2475 goto fallback;
2476 }
2477 if (before > info2.space_before)
2478 before = info2.space_before;
2479 if (after > info2.space_after)
2480 after = info2.space_after;
2481
2482 if (data_offset != INVALID_SECTORS) {
2483 if (dir == 0) {
2484 if (info2.data_offset == data_offset) {
2485 pr_err("%s: already has that data_offset\n",
2486 dn);
2487 goto release;
2488 }
2489 if (data_offset < info2.data_offset)
2490 dir = -1;
2491 else
2492 dir = 1;
2493 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2494 (data_offset >= info2.data_offset && dir == -1)) {
2495 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2496 dn);
2497 goto release;
2498 }
2499 }
2500 }
2501 if (before == UINT64_MAX)
2502 /* impossible really, there must be no devices */
2503 return 1;
2504
2505 for (sd = sra->devs; sd; sd = sd->next) {
2506 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2507 unsigned long long new_data_offset;
2508
2509 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2510 continue;
2511 if (delta_disks < 0) {
2512 /* Don't need any space as array is shrinking
2513 * just move data_offset up by min
2514 */
2515 if (data_offset == INVALID_SECTORS)
2516 new_data_offset = sd->data_offset + min;
2517 else {
2518 if (data_offset < sd->data_offset + min) {
2519 pr_err("--data-offset too small for %s\n",
2520 dn);
2521 goto release;
2522 }
2523 new_data_offset = data_offset;
2524 }
2525 } else if (delta_disks > 0) {
2526 /* need space before */
2527 if (before < min) {
2528 if (can_fallback)
2529 goto fallback;
2530 pr_err("Insufficient head-space for reshape on %s\n",
2531 dn);
2532 goto release;
2533 }
2534 if (data_offset == INVALID_SECTORS)
2535 new_data_offset = sd->data_offset - min;
2536 else {
2537 if (data_offset > sd->data_offset - min) {
2538 pr_err("--data-offset too large for %s\n",
2539 dn);
2540 goto release;
2541 }
2542 new_data_offset = data_offset;
2543 }
2544 } else {
2545 if (dir == 0) {
2546 /* can move up or down. If 'data_offset'
2547 * was set we would have already decided,
2548 * so just choose direction with most space.
2549 */
2550 if (before > after)
2551 dir = -1;
2552 else
2553 dir = 1;
2554 }
2555 sysfs_set_str(sra, NULL, "reshape_direction",
2556 dir == 1 ? "backwards" : "forwards");
2557 if (dir > 0) {
2558 /* Increase data offset */
2559 if (after < min) {
2560 if (can_fallback)
2561 goto fallback;
2562 pr_err("Insufficient tail-space for reshape on %s\n",
2563 dn);
2564 goto release;
2565 }
2566 if (data_offset != INVALID_SECTORS &&
2567 data_offset < sd->data_offset + min) {
2568 pr_err("--data-offset too small on %s\n",
2569 dn);
2570 goto release;
2571 }
2572 if (data_offset != INVALID_SECTORS)
2573 new_data_offset = data_offset;
2574 else
2575 new_data_offset = choose_offset(sd->data_offset,
2576 sd->data_offset + after,
2577 sd->data_offset + min,
2578 sd->data_offset + after);
2579 } else {
2580 /* Decrease data offset */
2581 if (before < min) {
2582 if (can_fallback)
2583 goto fallback;
2584 pr_err("insufficient head-room on %s\n",
2585 dn);
2586 goto release;
2587 }
2588 if (data_offset != INVALID_SECTORS &&
2589 data_offset < sd->data_offset - min) {
2590 pr_err("--data-offset too small on %s\n",
2591 dn);
2592 goto release;
2593 }
2594 if (data_offset != INVALID_SECTORS)
2595 new_data_offset = data_offset;
2596 else
2597 new_data_offset = choose_offset(sd->data_offset - before,
2598 sd->data_offset,
2599 sd->data_offset - before,
2600 sd->data_offset - min);
2601 }
2602 }
2603 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2604 if (err < 0 && errno == E2BIG) {
2605 /* try again after increasing data size to max */
2606 err = sysfs_set_num(sra, sd, "size", 0);
2607 if (err < 0 && errno == EINVAL &&
2608 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2609 /* some kernels have a bug where you cannot
2610 * use '0' on spare devices. */
2611 sysfs_set_num(sra, sd, "size",
2612 (sra->component_size + after)/2);
2613 }
2614 err = sysfs_set_num(sra, sd, "new_offset",
2615 new_data_offset);
2616 }
2617 if (err < 0) {
2618 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2619 pr_err("data-offset is too big for %s\n",
2620 dn);
2621 goto release;
2622 }
2623 if (sd == sra->devs &&
2624 (errno == ENOENT || errno == E2BIG))
2625 /* Early kernel, no 'new_offset' file,
2626 * or kernel doesn't like us.
2627 * For RAID5/6 this is not fatal
2628 */
2629 return 1;
2630 pr_err("Cannot set new_offset for %s\n",
2631 dn);
2632 break;
2633 }
2634 }
2635 return err;
2636 release:
2637 return -1;
2638 fallback:
2639 /* Just use a backup file */
2640 return 1;
2641 }
2642
2643 static int raid10_reshape(char *container, int fd, char *devname,
2644 struct supertype *st, struct mdinfo *info,
2645 struct reshape *reshape,
2646 unsigned long long data_offset,
2647 int force, int verbose)
2648 {
2649 /* Changing raid_disks, layout, chunksize or possibly
2650 * just data_offset for a RAID10.
2651 * We must always change data_offset. We change by at least
2652 * ->min_offset_change which is the largest of the old and new
2653 * chunk sizes.
2654 * If raid_disks is increasing, then data_offset must decrease
2655 * by at least this copy size.
2656 * If raid_disks is unchanged, data_offset must increase or
2657 * decrease by at least min_offset_change but preferably by much more.
2658 * We choose half of the available space.
2659 * If raid_disks is decreasing, data_offset must increase by
2660 * at least min_offset_change. To allow of this, component_size
2661 * must be decreased by the same amount.
2662 *
2663 * So we calculate the required minimum and direction, possibly
2664 * reduce the component_size, then iterate through the devices
2665 * and set the new_data_offset.
2666 * If that all works, we set chunk_size, layout, raid_disks, and start
2667 * 'reshape'
2668 */
2669 struct mdinfo *sra;
2670 unsigned long long min;
2671 int err = 0;
2672
2673 sra = sysfs_read(fd, NULL,
2674 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2675 );
2676 if (!sra) {
2677 pr_err("%s: Cannot get array details from sysfs\n",
2678 devname);
2679 goto release;
2680 }
2681 min = reshape->min_offset_change;
2682
2683 if (info->delta_disks)
2684 sysfs_set_str(sra, NULL, "reshape_direction",
2685 info->delta_disks < 0 ? "backwards" : "forwards");
2686 if (info->delta_disks < 0 &&
2687 info->space_after < min) {
2688 int rv = sysfs_set_num(sra, NULL, "component_size",
2689 (sra->component_size -
2690 min)/2);
2691 if (rv) {
2692 pr_err("cannot reduce component size\n");
2693 goto release;
2694 }
2695 }
2696 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2697 min, 0);
2698 if (err == 1) {
2699 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2700 cont_err("supported on this kernel\n");
2701 err = -1;
2702 }
2703 if (err < 0)
2704 goto release;
2705
2706 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2707 err = errno;
2708 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2709 err = errno;
2710 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2711 info->array.raid_disks + info->delta_disks) < 0)
2712 err = errno;
2713 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2714 err = errno;
2715 if (err) {
2716 pr_err("Cannot set array shape for %s\n",
2717 devname);
2718 if (err == EBUSY &&
2719 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2720 cont_err(" Bitmap must be removed before shape can be changed\n");
2721 goto release;
2722 }
2723 sysfs_free(sra);
2724 return 0;
2725 release:
2726 sysfs_free(sra);
2727 return 1;
2728 }
2729
2730 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2731 {
2732 struct mdinfo *sra, *sd;
2733 /* Initialisation to silence compiler warning */
2734 unsigned long long min_space_before = 0, min_space_after = 0;
2735 int first = 1;
2736
2737 sra = sysfs_read(fd, NULL, GET_DEVS);
2738 if (!sra)
2739 return;
2740 for (sd = sra->devs; sd; sd = sd->next) {
2741 char *dn;
2742 int dfd;
2743 struct supertype *st2;
2744 struct mdinfo info2;
2745
2746 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2747 continue;
2748 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2749 dfd = dev_open(dn, O_RDONLY);
2750 if (dfd < 0)
2751 break;
2752 st2 = dup_super(st);
2753 if (st2->ss->load_super(st2,dfd, NULL)) {
2754 close(dfd);
2755 free(st2);
2756 break;
2757 }
2758 close(dfd);
2759 st2->ss->getinfo_super(st2, &info2, NULL);
2760 st2->ss->free_super(st2);
2761 free(st2);
2762 if (first ||
2763 min_space_before > info2.space_before)
2764 min_space_before = info2.space_before;
2765 if (first ||
2766 min_space_after > info2.space_after)
2767 min_space_after = info2.space_after;
2768 first = 0;
2769 }
2770 if (sd == NULL && !first) {
2771 info->space_after = min_space_after;
2772 info->space_before = min_space_before;
2773 }
2774 sysfs_free(sra);
2775 }
2776
2777 static void update_cache_size(char *container, struct mdinfo *sra,
2778 struct mdinfo *info,
2779 int disks, unsigned long long blocks)
2780 {
2781 /* Check that the internal stripe cache is
2782 * large enough, or it won't work.
2783 * It must hold at least 4 stripes of the larger
2784 * chunk size
2785 */
2786 unsigned long cache;
2787 cache = max(info->array.chunk_size, info->new_chunk);
2788 cache *= 4; /* 4 stripes minimum */
2789 cache /= 512; /* convert to sectors */
2790 /* make sure there is room for 'blocks' with a bit to spare */
2791 if (cache < 16 + blocks / disks)
2792 cache = 16 + blocks / disks;
2793 cache /= (4096/512); /* Convert from sectors to pages */
2794
2795 if (sra->cache_size < cache)
2796 subarray_set_num(container, sra, "stripe_cache_size",
2797 cache+1);
2798 }
2799
2800 static int impose_reshape(struct mdinfo *sra,
2801 struct mdinfo *info,
2802 struct supertype *st,
2803 int fd,
2804 int restart,
2805 char *devname, char *container,
2806 struct reshape *reshape)
2807 {
2808 struct mdu_array_info_s array;
2809
2810 sra->new_chunk = info->new_chunk;
2811
2812 if (restart) {
2813 /* for external metadata checkpoint saved by mdmon can be lost
2814 * or missed /due to e.g. crash/. Check if md is not during
2815 * restart farther than metadata points to.
2816 * If so, this means metadata information is obsolete.
2817 */
2818 if (st->ss->external)
2819 verify_reshape_position(info, reshape->level);
2820 sra->reshape_progress = info->reshape_progress;
2821 } else {
2822 sra->reshape_progress = 0;
2823 if (reshape->after.data_disks < reshape->before.data_disks)
2824 /* start from the end of the new array */
2825 sra->reshape_progress = (sra->component_size
2826 * reshape->after.data_disks);
2827 }
2828
2829 ioctl(fd, GET_ARRAY_INFO, &array);
2830 if (info->array.chunk_size == info->new_chunk &&
2831 reshape->before.layout == reshape->after.layout &&
2832 st->ss->external == 0) {
2833 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2834 array.raid_disks = reshape->after.data_disks + reshape->parity;
2835 if (!restart &&
2836 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2837 int err = errno;
2838
2839 pr_err("Cannot set device shape for %s: %s\n",
2840 devname, strerror(errno));
2841
2842 if (err == EBUSY &&
2843 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2844 cont_err("Bitmap must be removed before shape can be changed\n");
2845
2846 goto release;
2847 }
2848 } else if (!restart) {
2849 /* set them all just in case some old 'new_*' value
2850 * persists from some earlier problem.
2851 */
2852 int err = 0;
2853 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2854 err = errno;
2855 if (!err && sysfs_set_num(sra, NULL, "layout",
2856 reshape->after.layout) < 0)
2857 err = errno;
2858 if (!err && subarray_set_num(container, sra, "raid_disks",
2859 reshape->after.data_disks +
2860 reshape->parity) < 0)
2861 err = errno;
2862 if (err) {
2863 pr_err("Cannot set device shape for %s\n",
2864 devname);
2865
2866 if (err == EBUSY &&
2867 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2868 cont_err("Bitmap must be removed before shape can be changed\n");
2869 goto release;
2870 }
2871 }
2872 return 0;
2873 release:
2874 return -1;
2875 }
2876
2877 static int impose_level(int fd, int level, char *devname, int verbose)
2878 {
2879 char *c;
2880 struct mdu_array_info_s array;
2881 struct mdinfo info;
2882 sysfs_init(&info, fd, NULL);
2883
2884 ioctl(fd, GET_ARRAY_INFO, &array);
2885 if (level == 0 &&
2886 (array.level >= 4 && array.level <= 6)) {
2887 /* To convert to RAID0 we need to fail and
2888 * remove any non-data devices. */
2889 int found = 0;
2890 int d;
2891 int data_disks = array.raid_disks - 1;
2892 if (array.level == 6)
2893 data_disks -= 1;
2894 if (array.level == 5 &&
2895 array.layout != ALGORITHM_PARITY_N)
2896 return -1;
2897 if (array.level == 6 &&
2898 array.layout != ALGORITHM_PARITY_N_6)
2899 return -1;
2900 sysfs_set_str(&info, NULL,"sync_action", "idle");
2901 /* First remove any spares so no recovery starts */
2902 for (d = 0, found = 0;
2903 d < MAX_DISKS && found < array.nr_disks;
2904 d++) {
2905 mdu_disk_info_t disk;
2906 disk.number = d;
2907 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2908 continue;
2909 if (disk.major == 0 && disk.minor == 0)
2910 continue;
2911 found++;
2912 if ((disk.state & (1 << MD_DISK_ACTIVE))
2913 && disk.raid_disk < data_disks)
2914 /* keep this */
2915 continue;
2916 ioctl(fd, HOT_REMOVE_DISK,
2917 makedev(disk.major, disk.minor));
2918 }
2919 /* Now fail anything left */
2920 ioctl(fd, GET_ARRAY_INFO, &array);
2921 for (d = 0, found = 0;
2922 d < MAX_DISKS && found < array.nr_disks;
2923 d++) {
2924 mdu_disk_info_t disk;
2925 disk.number = d;
2926 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2927 continue;
2928 if (disk.major == 0 && disk.minor == 0)
2929 continue;
2930 found++;
2931 if ((disk.state & (1 << MD_DISK_ACTIVE))
2932 && disk.raid_disk < data_disks)
2933 /* keep this */
2934 continue;
2935 ioctl(fd, SET_DISK_FAULTY,
2936 makedev(disk.major, disk.minor));
2937 hot_remove_disk(fd, makedev(disk.major, disk.minor), 1);
2938 }
2939 }
2940 c = map_num(pers, level);
2941 if (c) {
2942 int err = sysfs_set_str(&info, NULL, "level", c);
2943 if (err) {
2944 err = errno;
2945 pr_err("%s: could not set level to %s\n",
2946 devname, c);
2947 if (err == EBUSY &&
2948 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2949 cont_err("Bitmap must be removed before level can be changed\n");
2950 return err;
2951 }
2952 if (verbose >= 0)
2953 pr_err("level of %s changed to %s\n",
2954 devname, c);
2955 }
2956 return 0;
2957 }
2958
2959 int sigterm = 0;
2960 static void catch_term(int sig)
2961 {
2962 sigterm = 1;
2963 }
2964
2965 static int continue_via_systemd(char *devnm)
2966 {
2967 int skipped, i, pid, status;
2968 char pathbuf[1024];
2969 /* In a systemd/udev world, it is best to get systemd to
2970 * run "mdadm --grow --continue" rather than running in the
2971 * background.
2972 */
2973 switch(fork()) {
2974 case 0:
2975 /* FIXME yuk. CLOSE_EXEC?? */
2976 skipped = 0;
2977 for (i = 3; skipped < 20; i++)
2978 if (close(i) < 0)
2979 skipped++;
2980 else
2981 skipped = 0;
2982
2983 /* Don't want to see error messages from
2984 * systemctl. If the service doesn't exist,
2985 * we fork ourselves.
2986 */
2987 close(2);
2988 open("/dev/null", O_WRONLY);
2989 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
2990 devnm);
2991 status = execl("/usr/bin/systemctl", "systemctl",
2992 "start",
2993 pathbuf, NULL);
2994 status = execl("/bin/systemctl", "systemctl", "start",
2995 pathbuf, NULL);
2996 exit(1);
2997 case -1: /* Just do it ourselves. */
2998 break;
2999 default: /* parent - good */
3000 pid = wait(&status);
3001 if (pid >= 0 && status == 0)
3002 return 1;
3003 }
3004 return 0;
3005 }
3006
3007 static int reshape_array(char *container, int fd, char *devname,
3008 struct supertype *st, struct mdinfo *info,
3009 int force, struct mddev_dev *devlist,
3010 unsigned long long data_offset,
3011 char *backup_file, int verbose, int forked,
3012 int restart, int freeze_reshape)
3013 {
3014 struct reshape reshape;
3015 int spares_needed;
3016 char *msg;
3017 int orig_level = UnSet;
3018 int odisks;
3019 int delayed;
3020
3021 struct mdu_array_info_s array;
3022 char *c;
3023
3024 struct mddev_dev *dv;
3025 int added_disks;
3026
3027 int *fdlist = NULL;
3028 unsigned long long *offsets = NULL;
3029 int d;
3030 int nrdisks;
3031 int err;
3032 unsigned long blocks;
3033 unsigned long long array_size;
3034 int done;
3035 struct mdinfo *sra = NULL;
3036 char buf[20];
3037
3038 /* when reshaping a RAID0, the component_size might be zero.
3039 * So try to fix that up.
3040 */
3041 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3042 dprintf("Cannot get array information.\n");
3043 goto release;
3044 }
3045 if (array.level == 0 && info->component_size == 0) {
3046 get_dev_size(fd, NULL, &array_size);
3047 info->component_size = array_size / array.raid_disks;
3048 }
3049
3050 if (array.level == 10)
3051 /* Need space_after info */
3052 get_space_after(fd, st, info);
3053
3054 if (info->reshape_active) {
3055 int new_level = info->new_level;
3056 info->new_level = UnSet;
3057 if (info->delta_disks > 0)
3058 info->array.raid_disks -= info->delta_disks;
3059 msg = analyse_change(devname, info, &reshape);
3060 info->new_level = new_level;
3061 if (info->delta_disks > 0)
3062 info->array.raid_disks += info->delta_disks;
3063 if (!restart)
3064 /* Make sure the array isn't read-only */
3065 ioctl(fd, RESTART_ARRAY_RW, 0);
3066 } else
3067 msg = analyse_change(devname, info, &reshape);
3068 if (msg) {
3069 /* if msg == "", error has already been printed */
3070 if (msg[0])
3071 pr_err("%s\n", msg);
3072 goto release;
3073 }
3074 if (restart &&
3075 (reshape.level != info->array.level ||
3076 reshape.before.layout != info->array.layout ||
3077 reshape.before.data_disks + reshape.parity
3078 != info->array.raid_disks - max(0, info->delta_disks))) {
3079 pr_err("reshape info is not in native format - cannot continue.\n");
3080 goto release;
3081 }
3082
3083 if (st->ss->external && restart && (info->reshape_progress == 0) &&
3084 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
3085 (strncmp(buf, "reshape", 7) == 0))) {
3086 /* When reshape is restarted from '0', very begin of array
3087 * it is possible that for external metadata reshape and array
3088 * configuration doesn't happen.
3089 * Check if md has the same opinion, and reshape is restarted
3090 * from 0. If so, this is regular reshape start after reshape
3091 * switch in metadata to next array only.
3092 */
3093 if ((verify_reshape_position(info, reshape.level) >= 0) &&
3094 (info->reshape_progress == 0))
3095 restart = 0;
3096 }
3097 if (restart) {
3098 /* reshape already started. just skip to monitoring the reshape */
3099 if (reshape.backup_blocks == 0)
3100 return 0;
3101 if (restart & RESHAPE_NO_BACKUP)
3102 return 0;
3103
3104 /* Need 'sra' down at 'started:' */
3105 sra = sysfs_read(fd, NULL,
3106 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3107 GET_CACHE);
3108 if (!sra) {
3109 pr_err("%s: Cannot get array details from sysfs\n",
3110 devname);
3111 goto release;
3112 }
3113
3114 if (!backup_file)
3115 backup_file = locate_backup(sra->sys_name);
3116
3117 goto started;
3118 }
3119 /* The container is frozen but the array may not be.
3120 * So freeze the array so spares don't get put to the wrong use
3121 * FIXME there should probably be a cleaner separation between
3122 * freeze_array and freeze_container.
3123 */
3124 sysfs_freeze_array(info);
3125 /* Check we have enough spares to not be degraded */
3126 added_disks = 0;
3127 for (dv = devlist; dv ; dv=dv->next)
3128 added_disks++;
3129 spares_needed = max(reshape.before.data_disks,
3130 reshape.after.data_disks)
3131 + reshape.parity - array.raid_disks;
3132
3133 if (!force &&
3134 info->new_level > 1 && info->array.level > 1 &&
3135 spares_needed > info->array.spare_disks + added_disks) {
3136 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
3137 " Use --force to over-ride this check.\n",
3138 spares_needed,
3139 spares_needed == 1 ? "" : "s",
3140 info->array.spare_disks + added_disks);
3141 goto release;
3142 }
3143 /* Check we have enough spares to not fail */
3144 spares_needed = max(reshape.before.data_disks,
3145 reshape.after.data_disks)
3146 - array.raid_disks;
3147 if ((info->new_level > 1 || info->new_level == 0) &&
3148 spares_needed > info->array.spare_disks +added_disks) {
3149 pr_err("Need %d spare%s to create working array, and only have %d.\n",
3150 spares_needed,
3151 spares_needed == 1 ? "" : "s",
3152 info->array.spare_disks + added_disks);
3153 goto release;
3154 }
3155
3156 if (reshape.level != array.level) {
3157 int err = impose_level(fd, reshape.level, devname, verbose);
3158 if (err)
3159 goto release;
3160 info->new_layout = UnSet; /* after level change,
3161 * layout is meaningless */
3162 orig_level = array.level;
3163 sysfs_freeze_array(info);
3164
3165 if (reshape.level > 0 && st->ss->external) {
3166 /* make sure mdmon is aware of the new level */
3167 if (mdmon_running(container))
3168 flush_mdmon(container);
3169
3170 if (!mdmon_running(container))
3171 start_mdmon(container);
3172 ping_monitor(container);
3173 if (mdmon_running(container) &&
3174 st->update_tail == NULL)
3175 st->update_tail = &st->updates;
3176 }
3177 }
3178 /* ->reshape_super might have chosen some spares from the
3179 * container that it wants to be part of the new array.
3180 * We can collect them with ->container_content and give
3181 * them to the kernel.
3182 */
3183 if (st->ss->reshape_super && st->ss->container_content) {
3184 char *subarray = strchr(info->text_version+1, '/')+1;
3185 struct mdinfo *info2 =
3186 st->ss->container_content(st, subarray);
3187 struct mdinfo *d;
3188
3189 if (info2) {
3190 sysfs_init(info2, fd, st->devnm);
3191 /* When increasing number of devices, we need to set
3192 * new raid_disks before adding these, or they might
3193 * be rejected.
3194 */
3195 if (reshape.backup_blocks &&
3196 reshape.after.data_disks > reshape.before.data_disks)
3197 subarray_set_num(container, info2, "raid_disks",
3198 reshape.after.data_disks +
3199 reshape.parity);
3200 for (d = info2->devs; d; d = d->next) {
3201 if (d->disk.state == 0 &&
3202 d->disk.raid_disk >= 0) {
3203 /* This is a spare that wants to
3204 * be part of the array.
3205 */
3206 add_disk(fd, st, info2, d);
3207 }
3208 }
3209 sysfs_free(info2);
3210 }
3211 }
3212 /* We might have been given some devices to add to the
3213 * array. Now that the array has been changed to the right
3214 * level and frozen, we can safely add them.
3215 */
3216 if (devlist) {
3217 if (Manage_subdevs(devname, fd, devlist, verbose,
3218 0, NULL, 0))
3219 goto release;
3220 }
3221
3222 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3223 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3224 if (reshape.backup_blocks == 0) {
3225 /* No restriping needed, but we might need to impose
3226 * some more changes: layout, raid_disks, chunk_size
3227 */
3228 /* read current array info */
3229 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3230 dprintf("Cannot get array information.\n");
3231 goto release;
3232 }
3233 /* compare current array info with new values and if
3234 * it is different update them to new */
3235 if (info->new_layout != UnSet &&
3236 info->new_layout != array.layout) {
3237 array.layout = info->new_layout;
3238 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3239 pr_err("failed to set new layout\n");
3240 goto release;
3241 } else if (verbose >= 0)
3242 printf("layout for %s set to %d\n",
3243 devname, array.layout);
3244 }
3245 if (info->delta_disks != UnSet &&
3246 info->delta_disks != 0 &&
3247 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3248 array.raid_disks += info->delta_disks;
3249 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3250 pr_err("failed to set raid disks\n");
3251 goto release;
3252 } else if (verbose >= 0) {
3253 printf("raid_disks for %s set to %d\n",
3254 devname, array.raid_disks);
3255 }
3256 }
3257 if (info->new_chunk != 0 &&
3258 info->new_chunk != array.chunk_size) {
3259 if (sysfs_set_num(info, NULL,
3260 "chunk_size", info->new_chunk) != 0) {
3261 pr_err("failed to set chunk size\n");
3262 goto release;
3263 } else if (verbose >= 0)
3264 printf("chunk size for %s set to %d\n",
3265 devname, array.chunk_size);
3266 }
3267 unfreeze(st);
3268 return 0;
3269 }
3270
3271 /*
3272 * There are three possibilities.
3273 * 1/ The array will shrink.
3274 * We need to ensure the reshape will pause before reaching
3275 * the 'critical section'. We also need to fork and wait for
3276 * that to happen. When it does we
3277 * suspend/backup/complete/unfreeze
3278 *
3279 * 2/ The array will not change size.
3280 * This requires that we keep a backup of a sliding window
3281 * so that we can restore data after a crash. So we need
3282 * to fork and monitor progress.
3283 * In future we will allow the data_offset to change, so
3284 * a sliding backup becomes unnecessary.
3285 *
3286 * 3/ The array will grow. This is relatively easy.
3287 * However the kernel's restripe routines will cheerfully
3288 * overwrite some early data before it is safe. So we
3289 * need to make a backup of the early parts of the array
3290 * and be ready to restore it if rebuild aborts very early.
3291 * For externally managed metadata, we still need a forked
3292 * child to monitor the reshape and suspend IO over the region
3293 * that is being reshaped.
3294 *
3295 * We backup data by writing it to one spare, or to a
3296 * file which was given on command line.
3297 *
3298 * In each case, we first make sure that storage is available
3299 * for the required backup.
3300 * Then we:
3301 * - request the shape change.
3302 * - fork to handle backup etc.
3303 */
3304 /* Check that we can hold all the data */
3305 get_dev_size(fd, NULL, &array_size);
3306 if (reshape.new_size < (array_size/512)) {
3307 pr_err("this change will reduce the size of the array.\n"
3308 " use --grow --array-size first to truncate array.\n"
3309 " e.g. mdadm --grow %s --array-size %llu\n",
3310 devname, reshape.new_size/2);
3311 goto release;
3312 }
3313
3314 if (array.level == 10) {
3315 /* Reshaping RAID10 does not require any data backup by
3316 * user-space. Instead it requires that the data_offset
3317 * is changed to avoid the need for backup.
3318 * So this is handled very separately
3319 */
3320 if (restart)
3321 /* Nothing to do. */
3322 return 0;
3323 return raid10_reshape(container, fd, devname, st, info,
3324 &reshape, data_offset,
3325 force, verbose);
3326 }
3327 sra = sysfs_read(fd, NULL,
3328 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3329 GET_CACHE);
3330 if (!sra) {
3331 pr_err("%s: Cannot get array details from sysfs\n",
3332 devname);
3333 goto release;
3334 }
3335
3336 if (!backup_file)
3337 switch(set_new_data_offset(sra, st, devname,
3338 reshape.after.data_disks - reshape.before.data_disks,
3339 data_offset,
3340 reshape.min_offset_change, 1)) {
3341 case -1:
3342 goto release;
3343 case 0:
3344 /* Updated data_offset, so it's easy now */
3345 update_cache_size(container, sra, info,
3346 min(reshape.before.data_disks,
3347 reshape.after.data_disks),
3348 reshape.backup_blocks);
3349
3350 /* Right, everything seems fine. Let's kick things off.
3351 */
3352 sync_metadata(st);
3353
3354 if (impose_reshape(sra, info, st, fd, restart,
3355 devname, container, &reshape) < 0)
3356 goto release;
3357 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3358 struct mdinfo *sd;
3359 if (errno != EINVAL) {
3360 pr_err("Failed to initiate reshape!\n");
3361 goto release;
3362 }
3363 /* revert data_offset and try the old way */
3364 for (sd = sra->devs; sd; sd = sd->next) {
3365 sysfs_set_num(sra, sd, "new_offset",
3366 sd->data_offset);
3367 sysfs_set_str(sra, NULL, "reshape_direction",
3368 "forwards");
3369 }
3370 break;
3371 }
3372 if (info->new_level == reshape.level)
3373 return 0;
3374 /* need to adjust level when reshape completes */
3375 switch(fork()) {
3376 case -1: /* ignore error, but don't wait */
3377 return 0;
3378 default: /* parent */
3379 return 0;
3380 case 0:
3381 map_fork();
3382 break;
3383 }
3384 close(fd);
3385 wait_reshape(sra);
3386 fd = open_dev(sra->sys_name);
3387 if (fd >= 0)
3388 impose_level(fd, info->new_level, devname, verbose);
3389 return 0;
3390 case 1: /* Couldn't set data_offset, try the old way */
3391 if (data_offset != INVALID_SECTORS) {
3392 pr_err("Cannot update data_offset on this array\n");
3393 goto release;
3394 }
3395 break;
3396 }
3397
3398 started:
3399 /* Decide how many blocks (sectors) for a reshape
3400 * unit. The number we have so far is just a minimum
3401 */
3402 blocks = reshape.backup_blocks;
3403 if (reshape.before.data_disks ==
3404 reshape.after.data_disks) {
3405 /* Make 'blocks' bigger for better throughput, but
3406 * not so big that we reject it below.
3407 * Try for 16 megabytes
3408 */
3409 while (blocks * 32 < sra->component_size &&
3410 blocks < 16*1024*2)
3411 blocks *= 2;
3412 } else
3413 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3414
3415 if (blocks >= sra->component_size/2) {
3416 pr_err("%s: Something wrong - reshape aborted\n",
3417 devname);
3418 goto release;
3419 }
3420
3421 /* Now we need to open all these devices so we can read/write.
3422 */
3423 nrdisks = max(reshape.before.data_disks,
3424 reshape.after.data_disks) + reshape.parity
3425 + sra->array.spare_disks;
3426 fdlist = xcalloc((1+nrdisks), sizeof(int));
3427 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3428
3429 odisks = reshape.before.data_disks + reshape.parity;
3430 d = reshape_prepare_fdlist(devname, sra, odisks,
3431 nrdisks, blocks, backup_file,
3432 fdlist, offsets);
3433 if (d < odisks) {
3434 goto release;
3435 }
3436 if ((st->ss->manage_reshape == NULL) ||
3437 (st->ss->recover_backup == NULL)) {
3438 if (backup_file == NULL) {
3439 if (reshape.after.data_disks <=
3440 reshape.before.data_disks) {
3441 pr_err("%s: Cannot grow - need backup-file\n",
3442 devname);
3443 pr_err(" Please provide one with \"--backup=...\"\n");
3444 goto release;
3445 } else if (d == odisks) {
3446 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3447 goto release;
3448 }
3449 } else {
3450 if (!reshape_open_backup_file(backup_file, fd, devname,
3451 (signed)blocks,
3452 fdlist+d, offsets+d,
3453 sra->sys_name,
3454 restart)) {
3455 goto release;
3456 }
3457 d++;
3458 }
3459 }
3460
3461 update_cache_size(container, sra, info,
3462 min(reshape.before.data_disks, reshape.after.data_disks),
3463 blocks);
3464
3465 /* Right, everything seems fine. Let's kick things off.
3466 * If only changing raid_disks, use ioctl, else use
3467 * sysfs.
3468 */
3469 sync_metadata(st);
3470
3471 if (impose_reshape(sra, info, st, fd, restart,
3472 devname, container, &reshape) < 0)
3473 goto release;
3474
3475 err = start_reshape(sra, restart, reshape.before.data_disks,
3476 reshape.after.data_disks);
3477 if (err) {
3478 pr_err("Cannot %s reshape for %s\n",
3479 restart ? "continue" : "start",
3480 devname);
3481 goto release;
3482 }
3483 if (restart)
3484 sysfs_set_str(sra, NULL, "array_state", "active");
3485 if (freeze_reshape) {
3486 free(fdlist);
3487 free(offsets);
3488 sysfs_free(sra);
3489 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3490 sra->reshape_progress);
3491 return 1;
3492 }
3493
3494 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3495 if (continue_via_systemd(container ?: sra->sys_name)) {
3496 free(fdlist);
3497 free(offsets);
3498 sysfs_free(sra);
3499 return 0;
3500 }
3501
3502 /* Now we just need to kick off the reshape and watch, while
3503 * handling backups of the data...
3504 * This is all done by a forked background process.
3505 */
3506 switch(forked ? 0 : fork()) {
3507 case -1:
3508 pr_err("Cannot run child to monitor reshape: %s\n",
3509 strerror(errno));
3510 abort_reshape(sra);
3511 goto release;
3512 default:
3513 free(fdlist);
3514 free(offsets);
3515 sysfs_free(sra);
3516 return 0;
3517 case 0:
3518 map_fork();
3519 break;
3520 }
3521
3522 /* If another array on the same devices is busy, the
3523 * reshape will wait for them. This would mean that
3524 * the first section that we suspend will stay suspended
3525 * for a long time. So check on that possibility
3526 * by looking for "DELAYED" in /proc/mdstat, and if found,
3527 * wait a while
3528 */
3529 do {
3530 struct mdstat_ent *mds, *m;
3531 delayed = 0;
3532 mds = mdstat_read(1, 0);
3533 for (m = mds; m; m = m->next)
3534 if (strcmp(m->devnm, sra->sys_name) == 0) {
3535 if (m->resync &&
3536 m->percent == RESYNC_DELAYED)
3537 delayed = 1;
3538 if (m->resync == 0)
3539 /* Haven't started the reshape thread
3540 * yet, wait a bit
3541 */
3542 delayed = 2;
3543 break;
3544 }
3545 free_mdstat(mds);
3546 if (delayed == 1 && get_linux_version() < 3007000) {
3547 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3548 " You might experience problems until other reshapes complete.\n");
3549 delayed = 0;
3550 }
3551 if (delayed)
3552 mdstat_wait(30 - (delayed-1) * 25);
3553 } while (delayed);
3554 mdstat_close();
3555 close(fd);
3556 if (check_env("MDADM_GROW_VERIFY"))
3557 fd = open(devname, O_RDONLY | O_DIRECT);
3558 else
3559 fd = -1;
3560 mlockall(MCL_FUTURE);
3561
3562 signal(SIGTERM, catch_term);
3563
3564 if (st->ss->external) {
3565 /* metadata handler takes it from here */
3566 done = st->ss->manage_reshape(
3567 fd, sra, &reshape, st, blocks,
3568 fdlist, offsets,
3569 d - odisks, fdlist+odisks,
3570 offsets+odisks);
3571 } else
3572 done = child_monitor(
3573 fd, sra, &reshape, st, blocks,
3574 fdlist, offsets,
3575 d - odisks, fdlist+odisks,
3576 offsets+odisks);
3577
3578 free(fdlist);
3579 free(offsets);
3580
3581 if (backup_file && done) {
3582 char *bul;
3583 bul = make_backup(sra->sys_name);
3584 if (bul) {
3585 char buf[1024];
3586 int l = readlink(bul, buf, sizeof(buf) - 1);
3587 if (l > 0) {
3588 buf[l]=0;
3589 unlink(buf);
3590 }
3591 unlink(bul);
3592 free(bul);
3593 }
3594 unlink(backup_file);
3595 }
3596 if (!done) {
3597 abort_reshape(sra);
3598 goto out;
3599 }
3600
3601 if (!st->ss->external &&
3602 !(reshape.before.data_disks != reshape.after.data_disks
3603 && info->custom_array_size) &&
3604 info->new_level == reshape.level &&
3605 !forked) {
3606 /* no need to wait for the reshape to finish as
3607 * there is nothing more to do.
3608 */
3609 sysfs_free(sra);
3610 exit(0);
3611 }
3612 wait_reshape(sra);
3613
3614 if (st->ss->external) {
3615 /* Re-load the metadata as much could have changed */
3616 int cfd = open_dev(st->container_devnm);
3617 if (cfd >= 0) {
3618 flush_mdmon(container);
3619 st->ss->free_super(st);
3620 st->ss->load_container(st, cfd, container);
3621 close(cfd);
3622 }
3623 }
3624
3625 /* set new array size if required customer_array_size is used
3626 * by this metadata.
3627 */
3628 if (reshape.before.data_disks !=
3629 reshape.after.data_disks &&
3630 info->custom_array_size)
3631 set_array_size(st, info, info->text_version);
3632
3633 if (info->new_level != reshape.level) {
3634 if (fd < 0)
3635 fd = open(devname, O_RDONLY);
3636 impose_level(fd, info->new_level, devname, verbose);
3637 close(fd);
3638 if (info->new_level == 0)
3639 st->update_tail = NULL;
3640 }
3641 out:
3642 sysfs_free(sra);
3643 if (forked)
3644 return 0;
3645 unfreeze(st);
3646 exit(0);
3647
3648 release:
3649 free(fdlist);
3650 free(offsets);
3651 if (orig_level != UnSet && sra) {
3652 c = map_num(pers, orig_level);
3653 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3654 pr_err("aborting level change\n");
3655 }
3656 sysfs_free(sra);
3657 if (!forked)
3658 unfreeze(st);
3659 return 1;
3660 }
3661
3662 /* mdfd handle is passed to be closed in child process (after fork).
3663 */
3664 int reshape_container(char *container, char *devname,
3665 int mdfd,
3666 struct supertype *st,
3667 struct mdinfo *info,
3668 int force,
3669 char *backup_file, int verbose,
3670 int forked, int restart, int freeze_reshape)
3671 {
3672 struct mdinfo *cc = NULL;
3673 int rv = restart;
3674 char last_devnm[32] = "";
3675
3676 /* component_size is not meaningful for a container,
3677 * so pass '0' meaning 'no change'
3678 */
3679 if (!restart &&
3680 reshape_super(st, 0, info->new_level,
3681 info->new_layout, info->new_chunk,
3682 info->array.raid_disks, info->delta_disks,
3683 backup_file, devname, APPLY_METADATA_CHANGES,
3684 verbose)) {
3685 unfreeze(st);
3686 return 1;
3687 }
3688
3689 sync_metadata(st);
3690
3691 /* ping monitor to be sure that update is on disk
3692 */
3693 ping_monitor(container);
3694
3695 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3696 if (continue_via_systemd(container))
3697 return 0;
3698
3699 switch (forked ? 0 : fork()) {
3700 case -1: /* error */
3701 perror("Cannot fork to complete reshape\n");
3702 unfreeze(st);
3703 return 1;
3704 default: /* parent */
3705 if (!freeze_reshape)
3706 printf("%s: multi-array reshape continues in background\n", Name);
3707 return 0;
3708 case 0: /* child */
3709 map_fork();
3710 break;
3711 }
3712
3713 /* close unused handle in child process
3714 */
3715 if (mdfd > -1)
3716 close(mdfd);
3717
3718 while(1) {
3719 /* For each member array with reshape_active,
3720 * we need to perform the reshape.
3721 * We pick the first array that needs reshaping and
3722 * reshape it. reshape_array() will re-read the metadata
3723 * so the next time through a different array should be
3724 * ready for reshape.
3725 * It is possible that the 'different' array will not
3726 * be assembled yet. In that case we simple exit.
3727 * When it is assembled, the mdadm which assembles it
3728 * will take over the reshape.
3729 */
3730 struct mdinfo *content;
3731 int fd;
3732 struct mdstat_ent *mdstat;
3733 char *adev;
3734 dev_t devid;
3735
3736 sysfs_free(cc);
3737
3738 cc = st->ss->container_content(st, NULL);
3739
3740 for (content = cc; content ; content = content->next) {
3741 char *subarray;
3742 if (!content->reshape_active)
3743 continue;
3744
3745 subarray = strchr(content->text_version+1, '/')+1;
3746 mdstat = mdstat_by_subdev(subarray, container);
3747 if (!mdstat)
3748 continue;
3749 if (mdstat->active == 0) {
3750 pr_err("Skipping inactive array %s.\n",
3751 mdstat->devnm);
3752 free_mdstat(mdstat);
3753 mdstat = NULL;
3754 continue;
3755 }
3756 break;
3757 }
3758 if (!content)
3759 break;
3760
3761 devid = devnm2devid(mdstat->devnm);
3762 adev = map_dev(major(devid), minor(devid), 0);
3763 if (!adev)
3764 adev = content->text_version;
3765
3766 fd = open_dev(mdstat->devnm);
3767 if (fd < 0) {
3768 pr_err("Device %s cannot be opened for reshape.\n", adev);
3769 break;
3770 }
3771
3772 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3773 /* Do not allow for multiple reshape_array() calls for
3774 * the same array.
3775 * It can happen when reshape_array() returns without
3776 * error, when reshape is not finished (wrong reshape
3777 * starting/continuation conditions). Mdmon doesn't
3778 * switch to next array in container and reentry
3779 * conditions for the same array occur.
3780 * This is possibly interim until the behaviour of
3781 * reshape_array is resolved().
3782 */
3783 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3784 close(fd);
3785 break;
3786 }
3787 strcpy(last_devnm, mdstat->devnm);
3788
3789 sysfs_init(content, fd, mdstat->devnm);
3790
3791 if (mdmon_running(container))
3792 flush_mdmon(container);
3793
3794 rv = reshape_array(container, fd, adev, st,
3795 content, force, NULL, INVALID_SECTORS,
3796 backup_file, verbose, 1, restart,
3797 freeze_reshape);
3798 close(fd);
3799
3800 if (freeze_reshape) {
3801 sysfs_free(cc);
3802 exit(0);
3803 }
3804
3805 restart = 0;
3806 if (rv)
3807 break;
3808
3809 if (mdmon_running(container))
3810 flush_mdmon(container);
3811 }
3812 if (!rv)
3813 unfreeze(st);
3814 sysfs_free(cc);
3815 exit(0);
3816 }
3817
3818 /*
3819 * We run a child process in the background which performs the following
3820 * steps:
3821 * - wait for resync to reach a certain point
3822 * - suspend io to the following section
3823 * - backup that section
3824 * - allow resync to proceed further
3825 * - resume io
3826 * - discard the backup.
3827 *
3828 * When are combined in slightly different ways in the three cases.
3829 * Grow:
3830 * - suspend/backup/allow/wait/resume/discard
3831 * Shrink:
3832 * - allow/wait/suspend/backup/allow/wait/resume/discard
3833 * same-size:
3834 * - wait/resume/discard/suspend/backup/allow
3835 *
3836 * suspend/backup/allow always come together
3837 * wait/resume/discard do too.
3838 * For the same-size case we have two backups to improve flow.
3839 *
3840 */
3841
3842 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3843 unsigned long long backup_point,
3844 unsigned long long wait_point,
3845 unsigned long long *suspend_point,
3846 unsigned long long *reshape_completed, int *frozen)
3847 {
3848 /* This function is called repeatedly by the reshape manager.
3849 * It determines how much progress can safely be made and allows
3850 * that progress.
3851 * - 'info' identifies the array and particularly records in
3852 * ->reshape_progress the metadata's knowledge of progress
3853 * This is a sector offset from the start of the array
3854 * of the next array block to be relocated. This number
3855 * may increase from 0 or decrease from array_size, depending
3856 * on the type of reshape that is happening.
3857 * Note that in contrast, 'sync_completed' is a block count of the
3858 * reshape so far. It gives the distance between the start point
3859 * (head or tail of device) and the next place that data will be
3860 * written. It always increases.
3861 * - 'reshape' is the structure created by analyse_change
3862 * - 'backup_point' shows how much the metadata manager has backed-up
3863 * data. For reshapes with increasing progress, it is the next address
3864 * to be backed up, previous addresses have been backed-up. For
3865 * decreasing progress, it is the earliest address that has been
3866 * backed up - later address are also backed up.
3867 * So addresses between reshape_progress and backup_point are
3868 * backed up providing those are in the 'correct' order.
3869 * - 'wait_point' is an array address. When reshape_completed
3870 * passes this point, progress_reshape should return. It might
3871 * return earlier if it determines that ->reshape_progress needs
3872 * to be updated or further backup is needed.
3873 * - suspend_point is maintained by progress_reshape and the caller
3874 * should not touch it except to initialise to zero.
3875 * It is an array address and it only increases in 2.6.37 and earlier.
3876 * This makes it difficult to handle reducing reshapes with
3877 * external metadata.
3878 * However: it is similar to backup_point in that it records the
3879 * other end of a suspended region from reshape_progress.
3880 * it is moved to extend the region that is safe to backup and/or
3881 * reshape
3882 * - reshape_completed is read from sysfs and returned. The caller
3883 * should copy this into ->reshape_progress when it has reason to
3884 * believe that the metadata knows this, and any backup outside this
3885 * has been erased.
3886 *
3887 * Return value is:
3888 * 1 if more data from backup_point - but only as far as suspend_point,
3889 * should be backed up
3890 * 0 if things are progressing smoothly
3891 * -1 if the reshape is finished because it is all done,
3892 * -2 if the reshape is finished due to an error.
3893 */
3894
3895 int advancing = (reshape->after.data_disks
3896 >= reshape->before.data_disks);
3897 unsigned long long need_backup; /* All data between start of array and
3898 * here will at some point need to
3899 * be backed up.
3900 */
3901 unsigned long long read_offset, write_offset;
3902 unsigned long long write_range;
3903 unsigned long long max_progress, target, completed;
3904 unsigned long long array_size = (info->component_size
3905 * reshape->before.data_disks);
3906 int fd;
3907 char buf[20];
3908
3909 /* First, we unsuspend any region that is now known to be safe.
3910 * If suspend_point is on the 'wrong' side of reshape_progress, then
3911 * we don't have or need suspension at the moment. This is true for
3912 * native metadata when we don't need to back-up.
3913 */
3914 if (advancing) {
3915 if (info->reshape_progress <= *suspend_point)
3916 sysfs_set_num(info, NULL, "suspend_lo",
3917 info->reshape_progress);
3918 } else {
3919 /* Note: this won't work in 2.6.37 and before.
3920 * Something somewhere should make sure we don't need it!
3921 */
3922 if (info->reshape_progress >= *suspend_point)
3923 sysfs_set_num(info, NULL, "suspend_hi",
3924 info->reshape_progress);
3925 }
3926
3927 /* Now work out how far it is safe to progress.
3928 * If the read_offset for ->reshape_progress is less than
3929 * 'blocks' beyond the write_offset, we can only progress as far
3930 * as a backup.
3931 * Otherwise we can progress until the write_offset for the new location
3932 * reaches (within 'blocks' of) the read_offset at the current location.
3933 * However that region must be suspended unless we are using native
3934 * metadata.
3935 * If we need to suspend more, we limit it to 128M per device, which is
3936 * rather arbitrary and should be some time-based calculation.
3937 */
3938 read_offset = info->reshape_progress / reshape->before.data_disks;
3939 write_offset = info->reshape_progress / reshape->after.data_disks;
3940 write_range = info->new_chunk/512;
3941 if (reshape->before.data_disks == reshape->after.data_disks)
3942 need_backup = array_size;
3943 else
3944 need_backup = reshape->backup_blocks;
3945 if (advancing) {
3946 if (read_offset < write_offset + write_range)
3947 max_progress = backup_point;
3948 else
3949 max_progress =
3950 read_offset *
3951 reshape->after.data_disks;
3952 } else {
3953 if (read_offset > write_offset - write_range)
3954 /* Can only progress as far as has been backed up,
3955 * which must be suspended */
3956 max_progress = backup_point;
3957 else if (info->reshape_progress <= need_backup)
3958 max_progress = backup_point;
3959 else {
3960 if (info->array.major_version >= 0)
3961 /* Can progress until backup is needed */
3962 max_progress = need_backup;
3963 else {
3964 /* Can progress until metadata update is required */
3965 max_progress =
3966 read_offset *
3967 reshape->after.data_disks;
3968 /* but data must be suspended */
3969 if (max_progress < *suspend_point)
3970 max_progress = *suspend_point;
3971 }
3972 }
3973 }
3974
3975 /* We know it is safe to progress to 'max_progress' providing
3976 * it is suspended or we are using native metadata.
3977 * Consider extending suspend_point 128M per device if it
3978 * is less than 64M per device beyond reshape_progress.
3979 * But always do a multiple of 'blocks'
3980 * FIXME this is too big - it takes to long to complete
3981 * this much.
3982 */
3983 target = 64*1024*2 * min(reshape->before.data_disks,
3984 reshape->after.data_disks);
3985 target /= reshape->backup_blocks;
3986 if (target < 2)
3987 target = 2;
3988 target *= reshape->backup_blocks;
3989
3990 /* For externally managed metadata we always need to suspend IO to
3991 * the area being reshaped so we regularly push suspend_point forward.
3992 * For native metadata we only need the suspend if we are going to do
3993 * a backup.
3994 */
3995 if (advancing) {
3996 if ((need_backup > info->reshape_progress
3997 || info->array.major_version < 0) &&
3998 *suspend_point < info->reshape_progress + target) {
3999 if (need_backup < *suspend_point + 2 * target)
4000 *suspend_point = need_backup;
4001 else if (*suspend_point + 2 * target < array_size)
4002 *suspend_point += 2 * target;
4003 else
4004 *suspend_point = array_size;
4005 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
4006 if (max_progress > *suspend_point)
4007 max_progress = *suspend_point;
4008 }
4009 } else {
4010 if (info->array.major_version >= 0) {
4011 /* Only need to suspend when about to backup */
4012 if (info->reshape_progress < need_backup * 2 &&
4013 *suspend_point > 0) {
4014 *suspend_point = 0;
4015 sysfs_set_num(info, NULL, "suspend_lo", 0);
4016 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
4017 }
4018 } else {
4019 /* Need to suspend continually */
4020 if (info->reshape_progress < *suspend_point)
4021 *suspend_point = info->reshape_progress;
4022 if (*suspend_point + target < info->reshape_progress)
4023 /* No need to move suspend region yet */;
4024 else {
4025 if (*suspend_point >= 2 * target)
4026 *suspend_point -= 2 * target;
4027 else
4028 *suspend_point = 0;
4029 sysfs_set_num(info, NULL, "suspend_lo",
4030 *suspend_point);
4031 }
4032 if (max_progress < *suspend_point)
4033 max_progress = *suspend_point;
4034 }
4035 }
4036
4037 /* now set sync_max to allow that progress. sync_max, like
4038 * sync_completed is a count of sectors written per device, so
4039 * we find the difference between max_progress and the start point,
4040 * and divide that by after.data_disks to get a sync_max
4041 * number.
4042 * At the same time we convert wait_point to a similar number
4043 * for comparing against sync_completed.
4044 */
4045 /* scale down max_progress to per_disk */
4046 max_progress /= reshape->after.data_disks;
4047 /* Round to chunk size as some kernels give an erroneously high number */
4048 max_progress /= info->new_chunk/512;
4049 max_progress *= info->new_chunk/512;
4050 /* And round to old chunk size as the kernel wants that */
4051 max_progress /= info->array.chunk_size/512;
4052 max_progress *= info->array.chunk_size/512;
4053 /* Limit progress to the whole device */
4054 if (max_progress > info->component_size)
4055 max_progress = info->component_size;
4056 wait_point /= reshape->after.data_disks;
4057 if (!advancing) {
4058 /* switch from 'device offset' to 'processed block count' */
4059 max_progress = info->component_size - max_progress;
4060 wait_point = info->component_size - wait_point;
4061 }
4062
4063 if (!*frozen)
4064 sysfs_set_num(info, NULL, "sync_max", max_progress);
4065
4066 /* Now wait. If we have already reached the point that we were
4067 * asked to wait to, don't wait at all, else wait for any change.
4068 * We need to select on 'sync_completed' as that is the place that
4069 * notifications happen, but we are really interested in
4070 * 'reshape_position'
4071 */
4072 fd = sysfs_get_fd(info, NULL, "sync_completed");
4073 if (fd < 0)
4074 goto check_progress;
4075
4076 if (sysfs_fd_get_ll(fd, &completed) < 0)
4077 goto check_progress;
4078
4079 while (completed < max_progress && completed < wait_point) {
4080 /* Check that sync_action is still 'reshape' to avoid
4081 * waiting forever on a dead array
4082 */
4083 char action[20];
4084 if (sysfs_get_str(info, NULL, "sync_action",
4085 action, 20) <= 0 ||
4086 strncmp(action, "reshape", 7) != 0)
4087 break;
4088 /* Some kernels reset 'sync_completed' to zero
4089 * before setting 'sync_action' to 'idle'.
4090 * So we need these extra tests.
4091 */
4092 if (completed == 0 && advancing
4093 && strncmp(action, "idle", 4) == 0
4094 && info->reshape_progress > 0)
4095 break;
4096 if (completed == 0 && !advancing
4097 && strncmp(action, "idle", 4) == 0
4098 && info->reshape_progress < (info->component_size
4099 * reshape->after.data_disks))
4100 break;
4101 sysfs_wait(fd, NULL);
4102 if (sysfs_fd_get_ll(fd, &completed) < 0)
4103 goto check_progress;
4104 }
4105 /* Some kernels reset 'sync_completed' to zero,
4106 * we need to have real point we are in md.
4107 * So in that case, read 'reshape_position' from sysfs.
4108 */
4109 if (completed == 0) {
4110 unsigned long long reshapep;
4111 char action[20];
4112 if (sysfs_get_str(info, NULL, "sync_action",
4113 action, 20) > 0 &&
4114 strncmp(action, "idle", 4) == 0 &&
4115 sysfs_get_ll(info, NULL,
4116 "reshape_position", &reshapep) == 0)
4117 *reshape_completed = reshapep;
4118 } else {
4119 /* some kernels can give an incorrectly high
4120 * 'completed' number, so round down */
4121 completed /= (info->new_chunk/512);
4122 completed *= (info->new_chunk/512);
4123 /* Convert 'completed' back in to a 'progress' number */
4124 completed *= reshape->after.data_disks;
4125 if (!advancing)
4126 completed = (info->component_size
4127 * reshape->after.data_disks
4128 - completed);
4129 *reshape_completed = completed;
4130 }
4131
4132 close(fd);
4133
4134 /* We return the need_backup flag. Caller will decide
4135 * how much - a multiple of ->backup_blocks up to *suspend_point
4136 */
4137 if (advancing)
4138 return need_backup > info->reshape_progress;
4139 else
4140 return need_backup >= info->reshape_progress;
4141
4142 check_progress:
4143 /* if we couldn't read a number from sync_completed, then
4144 * either the reshape did complete, or it aborted.
4145 * We can tell which by checking for 'none' in reshape_position.
4146 * If it did abort, then it might immediately restart if it
4147 * it was just a device failure that leaves us degraded but
4148 * functioning.
4149 */
4150 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
4151 || strncmp(buf, "none", 4) != 0) {
4152 /* The abort might only be temporary. Wait up to 10
4153 * seconds for fd to contain a valid number again.
4154 */
4155 int wait = 10000;
4156 int rv = -2;
4157 unsigned long long new_sync_max;
4158 while (fd >= 0 && rv < 0 && wait > 0) {
4159 if (sysfs_wait(fd, &wait) != 1)
4160 break;
4161 switch (sysfs_fd_get_ll(fd, &completed)) {
4162 case 0:
4163 /* all good again */
4164 rv = 1;
4165 /* If "sync_max" is no longer max_progress
4166 * we need to freeze things
4167 */
4168 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
4169 *frozen = (new_sync_max != max_progress);
4170 break;
4171 case -2: /* read error - abort */
4172 wait = 0;
4173 break;
4174 }
4175 }
4176 if (fd >= 0)
4177 close(fd);
4178 return rv; /* abort */
4179 } else {
4180 /* Maybe racing with array shutdown - check state */
4181 if (fd >= 0)
4182 close(fd);
4183 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
4184 || strncmp(buf, "inactive", 8) == 0
4185 || strncmp(buf, "clear",5) == 0)
4186 return -2; /* abort */
4187 return -1; /* complete */
4188 }
4189 }
4190
4191 /* FIXME return status is never checked */
4192 static int grow_backup(struct mdinfo *sra,
4193 unsigned long long offset, /* per device */
4194 unsigned long stripes, /* per device, in old chunks */
4195 int *sources, unsigned long long *offsets,
4196 int disks, int chunk, int level, int layout,
4197 int dests, int *destfd, unsigned long long *destoffsets,
4198 int part, int *degraded,
4199 char *buf)
4200 {
4201 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4202 * to storage 'destfd' (offset 'destoffsets'), after first
4203 * suspending IO. Then allow resync to continue
4204 * over the suspended section.
4205 * Use part 'part' of the backup-super-block.
4206 */
4207 int odata = disks;
4208 int rv = 0;
4209 int i;
4210 unsigned long long ll;
4211 int new_degraded;
4212 //printf("offset %llu\n", offset);
4213 if (level >= 4)
4214 odata--;
4215 if (level == 6)
4216 odata--;
4217
4218 /* Check that array hasn't become degraded, else we might backup the wrong data */
4219 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4220 return -1; /* FIXME this error is ignored */
4221 new_degraded = (int)ll;
4222 if (new_degraded != *degraded) {
4223 /* check each device to ensure it is still working */
4224 struct mdinfo *sd;
4225 for (sd = sra->devs ; sd ; sd = sd->next) {
4226 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4227 continue;
4228 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4229 char sbuf[100];
4230
4231 if (sysfs_get_str(sra, sd, "state",
4232 sbuf, sizeof(sbuf)) < 0 ||
4233 strstr(sbuf, "faulty") ||
4234 strstr(sbuf, "in_sync") == NULL) {
4235 /* this device is dead */
4236 sd->disk.state = (1<<MD_DISK_FAULTY);
4237 if (sd->disk.raid_disk >= 0 &&
4238 sources[sd->disk.raid_disk] >= 0) {
4239 close(sources[sd->disk.raid_disk]);
4240 sources[sd->disk.raid_disk] = -1;
4241 }
4242 }
4243 }
4244 }
4245 *degraded = new_degraded;
4246 }
4247 if (part) {
4248 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4249 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4250 } else {
4251 bsb.arraystart = __cpu_to_le64(offset * odata);
4252 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4253 }
4254 if (part)
4255 bsb.magic[15] = '2';
4256 for (i = 0; i < dests; i++)
4257 if (part)
4258 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4259 else
4260 lseek64(destfd[i], destoffsets[i], 0);
4261
4262 rv = save_stripes(sources, offsets,
4263 disks, chunk, level, layout,
4264 dests, destfd,
4265 offset*512*odata, stripes * chunk * odata,
4266 buf);
4267
4268 if (rv)
4269 return rv;
4270 bsb.mtime = __cpu_to_le64(time(0));
4271 for (i = 0; i < dests; i++) {
4272 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4273
4274 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4275 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4276 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4277 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4278
4279 rv = -1;
4280 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
4281 != destoffsets[i] - 4096)
4282 break;
4283 if (write(destfd[i], &bsb, 512) != 512)
4284 break;
4285 if (destoffsets[i] > 4096) {
4286 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4287 destoffsets[i]+stripes*chunk*odata)
4288 break;
4289 if (write(destfd[i], &bsb, 512) != 512)
4290 break;
4291 }
4292 fsync(destfd[i]);
4293 rv = 0;
4294 }
4295
4296 return rv;
4297 }
4298
4299 /* in 2.6.30, the value reported by sync_completed can be
4300 * less that it should be by one stripe.
4301 * This only happens when reshape hits sync_max and pauses.
4302 * So allow wait_backup to either extent sync_max further
4303 * than strictly necessary, or return before the
4304 * sync has got quite as far as we would really like.
4305 * This is what 'blocks2' is for.
4306 * The various caller give appropriate values so that
4307 * every works.
4308 */
4309 /* FIXME return value is often ignored */
4310 static int forget_backup(int dests, int *destfd,
4311 unsigned long long *destoffsets,
4312 int part)
4313 {
4314 /*
4315 * Erase backup 'part' (which is 0 or 1)
4316 */
4317 int i;
4318 int rv;
4319
4320 if (part) {
4321 bsb.arraystart2 = __cpu_to_le64(0);
4322 bsb.length2 = __cpu_to_le64(0);
4323 } else {
4324 bsb.arraystart = __cpu_to_le64(0);
4325 bsb.length = __cpu_to_le64(0);
4326 }
4327 bsb.mtime = __cpu_to_le64(time(0));
4328 rv = 0;
4329 for (i = 0; i < dests; i++) {
4330 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4331 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4332 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4333 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4334 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4335 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4336 destoffsets[i]-4096)
4337 rv = -1;
4338 if (rv == 0 &&
4339 write(destfd[i], &bsb, 512) != 512)
4340 rv = -1;
4341 fsync(destfd[i]);
4342 }
4343 return rv;
4344 }
4345
4346 static void fail(char *msg)
4347 {
4348 int rv;
4349 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4350 rv |= (write(2, "\n", 1) != 1);
4351 exit(rv ? 1 : 2);
4352 }
4353
4354 static char *abuf, *bbuf;
4355 static unsigned long long abuflen;
4356 static void validate(int afd, int bfd, unsigned long long offset)
4357 {
4358 /* check that the data in the backup against the array.
4359 * This is only used for regression testing and should not
4360 * be used while the array is active
4361 */
4362 if (afd < 0)
4363 return;
4364 lseek64(bfd, offset - 4096, 0);
4365 if (read(bfd, &bsb2, 512) != 512)
4366 fail("cannot read bsb");
4367 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4368 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4369 fail("first csum bad");
4370 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4371 fail("magic is bad");
4372 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4373 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4374 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4375 fail("second csum bad");
4376
4377 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4378 fail("devstart is wrong");
4379
4380 if (bsb2.length) {
4381 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4382
4383 if (abuflen < len) {
4384 free(abuf);
4385 free(bbuf);
4386 abuflen = len;
4387 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4388 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4389 abuflen = 0;
4390 /* just stop validating on mem-alloc failure */
4391 return;
4392 }
4393 }
4394
4395 lseek64(bfd, offset, 0);
4396 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4397 //printf("len %llu\n", len);
4398 fail("read first backup failed");
4399 }
4400 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4401 if ((unsigned long long)read(afd, abuf, len) != len)
4402 fail("read first from array failed");
4403 if (memcmp(bbuf, abuf, len) != 0) {
4404 #if 0
4405 int i;
4406 printf("offset=%llu len=%llu\n",
4407 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4408 for (i=0; i<len; i++)
4409 if (bbuf[i] != abuf[i]) {
4410 printf("first diff byte %d\n", i);
4411 break;
4412 }
4413 #endif
4414 fail("data1 compare failed");
4415 }
4416 }
4417 if (bsb2.length2) {
4418 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4419
4420 if (abuflen < len) {
4421 free(abuf);
4422 free(bbuf);
4423 abuflen = len;
4424 abuf = xmalloc(abuflen);
4425 bbuf = xmalloc(abuflen);
4426 }
4427
4428 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4429 if ((unsigned long long)read(bfd, bbuf, len) != len)
4430 fail("read second backup failed");
4431 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4432 if ((unsigned long long)read(afd, abuf, len) != len)
4433 fail("read second from array failed");
4434 if (memcmp(bbuf, abuf, len) != 0)
4435 fail("data2 compare failed");
4436 }
4437 }
4438
4439 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4440 struct supertype *st, unsigned long blocks,
4441 int *fds, unsigned long long *offsets,
4442 int dests, int *destfd, unsigned long long *destoffsets)
4443 {
4444 /* Monitor a reshape where backup is being performed using
4445 * 'native' mechanism - either to a backup file, or
4446 * to some space in a spare.
4447 */
4448 char *buf;
4449 int degraded = -1;
4450 unsigned long long speed;
4451 unsigned long long suspend_point, array_size;
4452 unsigned long long backup_point, wait_point;
4453 unsigned long long reshape_completed;
4454 int done = 0;
4455 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4456 int part = 0; /* The next part of the backup area to fill. It may already
4457 * be full, so we need to check */
4458 int level = reshape->level;
4459 int layout = reshape->before.layout;
4460 int data = reshape->before.data_disks;
4461 int disks = reshape->before.data_disks + reshape->parity;
4462 int chunk = sra->array.chunk_size;
4463 struct mdinfo *sd;
4464 unsigned long stripes;
4465 int uuid[4];
4466 int frozen = 0;
4467
4468 /* set up the backup-super-block. This requires the
4469 * uuid from the array.
4470 */
4471 /* Find a superblock */
4472 for (sd = sra->devs; sd; sd = sd->next) {
4473 char *dn;
4474 int devfd;
4475 int ok;
4476 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4477 continue;
4478 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4479 devfd = dev_open(dn, O_RDONLY);
4480 if (devfd < 0)
4481 continue;
4482 ok = st->ss->load_super(st, devfd, NULL);
4483 close(devfd);
4484 if (ok == 0)
4485 break;
4486 }
4487 if (!sd) {
4488 pr_err("Cannot find a superblock\n");
4489 return 0;
4490 }
4491
4492 memset(&bsb, 0, 512);
4493 memcpy(bsb.magic, "md_backup_data-1", 16);
4494 st->ss->uuid_from_super(st, uuid);
4495 memcpy(bsb.set_uuid, uuid, 16);
4496 bsb.mtime = __cpu_to_le64(time(0));
4497 bsb.devstart2 = blocks;
4498
4499 stripes = blocks / (sra->array.chunk_size/512) /
4500 reshape->before.data_disks;
4501
4502 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4503 /* Don't start the 'reshape' */
4504 return 0;
4505 if (reshape->before.data_disks == reshape->after.data_disks) {
4506 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4507 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4508 }
4509
4510 if (increasing) {
4511 array_size = sra->component_size * reshape->after.data_disks;
4512 backup_point = sra->reshape_progress;
4513 suspend_point = 0;
4514 } else {
4515 array_size = sra->component_size * reshape->before.data_disks;
4516 backup_point = reshape->backup_blocks;
4517 suspend_point = array_size;
4518 }
4519
4520 while (!done) {
4521 int rv;
4522
4523 /* Want to return as soon the oldest backup slot can
4524 * be released as that allows us to start backing up
4525 * some more, providing suspend_point has been
4526 * advanced, which it should have.
4527 */
4528 if (increasing) {
4529 wait_point = array_size;
4530 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4531 wait_point = (__le64_to_cpu(bsb.arraystart) +
4532 __le64_to_cpu(bsb.length));
4533 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4534 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4535 __le64_to_cpu(bsb.length2));
4536 } else {
4537 wait_point = 0;
4538 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4539 wait_point = __le64_to_cpu(bsb.arraystart);
4540 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4541 wait_point = __le64_to_cpu(bsb.arraystart2);
4542 }
4543
4544 reshape_completed = sra->reshape_progress;
4545 rv = progress_reshape(sra, reshape,
4546 backup_point, wait_point,
4547 &suspend_point, &reshape_completed,
4548 &frozen);
4549 /* external metadata would need to ping_monitor here */
4550 sra->reshape_progress = reshape_completed;
4551
4552 /* Clear any backup region that is before 'here' */
4553 if (increasing) {
4554 if (__le64_to_cpu(bsb.length) > 0 &&
4555 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4556 __le64_to_cpu(bsb.length)))
4557 forget_backup(dests, destfd,
4558 destoffsets, 0);
4559 if (__le64_to_cpu(bsb.length2) > 0 &&
4560 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4561 __le64_to_cpu(bsb.length2)))
4562 forget_backup(dests, destfd,
4563 destoffsets, 1);
4564 } else {
4565 if (__le64_to_cpu(bsb.length) > 0 &&
4566 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4567 forget_backup(dests, destfd,
4568 destoffsets, 0);
4569 if (__le64_to_cpu(bsb.length2) > 0 &&
4570 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4571 forget_backup(dests, destfd,
4572 destoffsets, 1);
4573 }
4574 if (sigterm)
4575 rv = -2;
4576 if (rv < 0) {
4577 if (rv == -1)
4578 done = 1;
4579 break;
4580 }
4581 if (rv == 0 && increasing && !st->ss->external) {
4582 /* No longer need to monitor this reshape */
4583 sysfs_set_str(sra, NULL, "sync_max", "max");
4584 done = 1;
4585 break;
4586 }
4587
4588 while (rv) {
4589 unsigned long long offset;
4590 unsigned long actual_stripes;
4591 /* Need to backup some data.
4592 * If 'part' is not used and the desired
4593 * backup size is suspended, do a backup,
4594 * then consider the next part.
4595 */
4596 /* Check that 'part' is unused */
4597 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4598 break;
4599 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4600 break;
4601
4602 offset = backup_point / data;
4603 actual_stripes = stripes;
4604 if (increasing) {
4605 if (offset + actual_stripes * (chunk/512) >
4606 sra->component_size)
4607 actual_stripes = ((sra->component_size - offset)
4608 / (chunk/512));
4609 if (offset + actual_stripes * (chunk/512) >
4610 suspend_point/data)
4611 break;
4612 } else {
4613 if (offset < actual_stripes * (chunk/512))
4614 actual_stripes = offset / (chunk/512);
4615 offset -= actual_stripes * (chunk/512);
4616 if (offset < suspend_point/data)
4617 break;
4618 }
4619 if (actual_stripes == 0)
4620 break;
4621 grow_backup(sra, offset, actual_stripes,
4622 fds, offsets,
4623 disks, chunk, level, layout,
4624 dests, destfd, destoffsets,
4625 part, &degraded, buf);
4626 validate(afd, destfd[0], destoffsets[0]);
4627 /* record where 'part' is up to */
4628 part = !part;
4629 if (increasing)
4630 backup_point += actual_stripes * (chunk/512) * data;
4631 else
4632 backup_point -= actual_stripes * (chunk/512) * data;
4633 }
4634 }
4635
4636 /* FIXME maybe call progress_reshape one more time instead */
4637 /* remove any remaining suspension */
4638 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4639 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4640 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4641 sysfs_set_num(sra, NULL, "sync_min", 0);
4642
4643 if (reshape->before.data_disks == reshape->after.data_disks)
4644 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4645 free(buf);
4646 return done;
4647 }
4648
4649 /*
4650 * If any spare contains md_back_data-1 which is recent wrt mtime,
4651 * write that data into the array and update the super blocks with
4652 * the new reshape_progress
4653 */
4654 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4655 char *backup_file, int verbose)
4656 {
4657 int i, j;
4658 int old_disks;
4659 unsigned long long *offsets;
4660 unsigned long long nstripe, ostripe;
4661 int ndata, odata;
4662
4663 odata = info->array.raid_disks - info->delta_disks - 1;
4664 if (info->array.level == 6) odata--; /* number of data disks */
4665 ndata = info->array.raid_disks - 1;
4666 if (info->new_level == 6) ndata--;
4667
4668 old_disks = info->array.raid_disks - info->delta_disks;
4669
4670 if (info->delta_disks <= 0)
4671 /* Didn't grow, so the backup file must have
4672 * been used
4673 */
4674 old_disks = cnt;
4675 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4676 struct mdinfo dinfo;
4677 int fd;
4678 int bsbsize;
4679 char *devname, namebuf[20];
4680 unsigned long long lo, hi;
4681
4682 /* This was a spare and may have some saved data on it.
4683 * Load the superblock, find and load the
4684 * backup_super_block.
4685 * If either fail, go on to next device.
4686 * If the backup contains no new info, just return
4687 * else restore data and update all superblocks
4688 */
4689 if (i == old_disks-1) {
4690 fd = open(backup_file, O_RDONLY);
4691 if (fd<0) {
4692 pr_err("backup file %s inaccessible: %s\n",
4693 backup_file, strerror(errno));
4694 continue;
4695 }
4696 devname = backup_file;
4697 } else {
4698 fd = fdlist[i];
4699 if (fd < 0)
4700 continue;
4701 if (st->ss->load_super(st, fd, NULL))
4702 continue;
4703
4704 st->ss->getinfo_super(st, &dinfo, NULL);
4705 st->ss->free_super(st);
4706
4707 if (lseek64(fd,
4708 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4709 0) < 0) {
4710 pr_err("Cannot seek on device %d\n", i);
4711 continue; /* Cannot seek */
4712 }
4713 sprintf(namebuf, "device-%d", i);
4714 devname = namebuf;
4715 }
4716 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4717 if (verbose)
4718 pr_err("Cannot read from %s\n", devname);
4719 continue; /* Cannot read */
4720 }
4721 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4722 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4723 if (verbose)
4724 pr_err("No backup metadata on %s\n", devname);
4725 continue;
4726 }
4727 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4728 if (verbose)
4729 pr_err("Bad backup-metadata checksum on %s\n", devname);
4730 continue; /* bad checksum */
4731 }
4732 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4733 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4734 if (verbose)
4735 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4736 continue; /* Bad second checksum */
4737 }
4738 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4739 if (verbose)
4740 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4741 continue; /* Wrong uuid */
4742 }
4743
4744 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4745 * sometimes they aren't... So allow considerable flexability in matching, and allow
4746 * this test to be overridden by an environment variable.
4747 */
4748 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4749 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4750 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4751 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4752 (unsigned long)__le64_to_cpu(bsb.mtime),
4753 (unsigned long)info->array.utime);
4754 } else {
4755 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4756 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4757 continue; /* time stamp is too bad */
4758 }
4759 }
4760
4761 if (bsb.magic[15] == '1') {
4762 if (bsb.length == 0)
4763 continue;
4764 if (info->delta_disks >= 0) {
4765 /* reshape_progress is increasing */
4766 if (__le64_to_cpu(bsb.arraystart)
4767 + __le64_to_cpu(bsb.length)
4768 < info->reshape_progress) {
4769 nonew:
4770 if (verbose)
4771 pr_err("backup-metadata found on %s but is not needed\n", devname);
4772 continue; /* No new data here */
4773 }
4774 } else {
4775 /* reshape_progress is decreasing */
4776 if (__le64_to_cpu(bsb.arraystart) >=
4777 info->reshape_progress)
4778 goto nonew; /* No new data here */
4779 }
4780 } else {
4781 if (bsb.length == 0 && bsb.length2 == 0)
4782 continue;
4783 if (info->delta_disks >= 0) {
4784 /* reshape_progress is increasing */
4785 if ((__le64_to_cpu(bsb.arraystart)
4786 + __le64_to_cpu(bsb.length)
4787 < info->reshape_progress)
4788 &&
4789 (__le64_to_cpu(bsb.arraystart2)
4790 + __le64_to_cpu(bsb.length2)
4791 < info->reshape_progress))
4792 goto nonew; /* No new data here */
4793 } else {
4794 /* reshape_progress is decreasing */
4795 if (__le64_to_cpu(bsb.arraystart) >=
4796 info->reshape_progress &&
4797 __le64_to_cpu(bsb.arraystart2) >=
4798 info->reshape_progress)
4799 goto nonew; /* No new data here */
4800 }
4801 }
4802 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4803 second_fail:
4804 if (verbose)
4805 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4806 devname);
4807 continue; /* Cannot seek */
4808 }
4809 /* There should be a duplicate backup superblock 4k before here */
4810 if (lseek64(fd, -4096, 1) < 0 ||
4811 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4812 goto second_fail; /* Cannot find leading superblock */
4813 if (bsb.magic[15] == '1')
4814 bsbsize = offsetof(struct mdp_backup_super, pad1);
4815 else
4816 bsbsize = offsetof(struct mdp_backup_super, pad);
4817 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4818 goto second_fail; /* Cannot find leading superblock */
4819
4820 /* Now need the data offsets for all devices. */
4821 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4822 for(j=0; j<info->array.raid_disks; j++) {
4823 if (fdlist[j] < 0)
4824 continue;
4825 if (st->ss->load_super(st, fdlist[j], NULL))
4826 /* FIXME should be this be an error */
4827 continue;
4828 st->ss->getinfo_super(st, &dinfo, NULL);
4829 st->ss->free_super(st);
4830 offsets[j] = dinfo.data_offset * 512;
4831 }
4832 printf("%s: restoring critical section\n", Name);
4833
4834 if (restore_stripes(fdlist, offsets,
4835 info->array.raid_disks,
4836 info->new_chunk,
4837 info->new_level,
4838 info->new_layout,
4839 fd, __le64_to_cpu(bsb.devstart)*512,
4840 __le64_to_cpu(bsb.arraystart)*512,
4841 __le64_to_cpu(bsb.length)*512, NULL)) {
4842 /* didn't succeed, so giveup */
4843 if (verbose)
4844 pr_err("Error restoring backup from %s\n",
4845 devname);
4846 free(offsets);
4847 return 1;
4848 }
4849
4850 if (bsb.magic[15] == '2' &&
4851 restore_stripes(fdlist, offsets,
4852 info->array.raid_disks,
4853 info->new_chunk,
4854 info->new_level,
4855 info->new_layout,
4856 fd, __le64_to_cpu(bsb.devstart)*512 +
4857 __le64_to_cpu(bsb.devstart2)*512,
4858 __le64_to_cpu(bsb.arraystart2)*512,
4859 __le64_to_cpu(bsb.length2)*512, NULL)) {
4860 /* didn't succeed, so giveup */
4861 if (verbose)
4862 pr_err("Error restoring second backup from %s\n",
4863 devname);
4864 free(offsets);
4865 return 1;
4866 }
4867
4868 free(offsets);
4869
4870 /* Ok, so the data is restored. Let's update those superblocks. */
4871
4872 lo = hi = 0;
4873 if (bsb.length) {
4874 lo = __le64_to_cpu(bsb.arraystart);
4875 hi = lo + __le64_to_cpu(bsb.length);
4876 }
4877 if (bsb.magic[15] == '2' && bsb.length2) {
4878 unsigned long long lo1, hi1;
4879 lo1 = __le64_to_cpu(bsb.arraystart2);
4880 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4881 if (lo == hi) {
4882 lo = lo1;
4883 hi = hi1;
4884 } else if (lo < lo1)
4885 hi = hi1;
4886 else
4887 lo = lo1;
4888 }
4889 if (lo < hi &&
4890 (info->reshape_progress < lo ||
4891 info->reshape_progress > hi))
4892 /* backup does not affect reshape_progress*/ ;
4893 else if (info->delta_disks >= 0) {
4894 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4895 __le64_to_cpu(bsb.length);
4896 if (bsb.magic[15] == '2') {
4897 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4898 __le64_to_cpu(bsb.length2);
4899 if (p2 > info->reshape_progress)
4900 info->reshape_progress = p2;
4901 }
4902 } else {
4903 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4904 if (bsb.magic[15] == '2') {
4905 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4906 if (p2 < info->reshape_progress)
4907 info->reshape_progress = p2;
4908 }
4909 }
4910 for (j=0; j<info->array.raid_disks; j++) {
4911 if (fdlist[j] < 0)
4912 continue;
4913 if (st->ss->load_super(st, fdlist[j], NULL))
4914 continue;
4915 st->ss->getinfo_super(st, &dinfo, NULL);
4916 dinfo.reshape_progress = info->reshape_progress;
4917 st->ss->update_super(st, &dinfo,
4918 "_reshape_progress",
4919 NULL,0, 0, NULL);
4920 st->ss->store_super(st, fdlist[j]);
4921 st->ss->free_super(st);
4922 }
4923 return 0;
4924 }
4925 /* Didn't find any backup data, try to see if any
4926 * was needed.
4927 */
4928 if (info->delta_disks < 0) {
4929 /* When shrinking, the critical section is at the end.
4930 * So see if we are before the critical section.
4931 */
4932 unsigned long long first_block;
4933 nstripe = ostripe = 0;
4934 first_block = 0;
4935 while (ostripe >= nstripe) {
4936 ostripe += info->array.chunk_size / 512;
4937 first_block = ostripe * odata;
4938 nstripe = first_block / ndata / (info->new_chunk/512) *
4939 (info->new_chunk/512);
4940 }
4941
4942 if (info->reshape_progress >= first_block)
4943 return 0;
4944 }
4945 if (info->delta_disks > 0) {
4946 /* See if we are beyond the critical section. */
4947 unsigned long long last_block;
4948 nstripe = ostripe = 0;
4949 last_block = 0;
4950 while (nstripe >= ostripe) {
4951 nstripe += info->new_chunk / 512;
4952 last_block = nstripe * ndata;
4953 ostripe = last_block / odata / (info->array.chunk_size/512) *
4954 (info->array.chunk_size/512);
4955 }
4956
4957 if (info->reshape_progress >= last_block)
4958 return 0;
4959 }
4960 /* needed to recover critical section! */
4961 if (verbose)
4962 pr_err("Failed to find backup of critical section\n");
4963 return 1;
4964 }
4965
4966 int Grow_continue_command(char *devname, int fd,
4967 char *backup_file, int verbose)
4968 {
4969 int ret_val = 0;
4970 struct supertype *st = NULL;
4971 struct mdinfo *content = NULL;
4972 struct mdinfo array;
4973 char *subarray = NULL;
4974 struct mdinfo *cc = NULL;
4975 struct mdstat_ent *mdstat = NULL;
4976 int cfd = -1;
4977 int fd2;
4978
4979 dprintf("Grow continue from command line called for %s\n",
4980 devname);
4981
4982 st = super_by_fd(fd, &subarray);
4983 if (!st || !st->ss) {
4984 pr_err("Unable to determine metadata format for %s\n",
4985 devname);
4986 return 1;
4987 }
4988 dprintf("Grow continue is run for ");
4989 if (st->ss->external == 0) {
4990 int d;
4991 int cnt = 5;
4992 dprintf_cont("native array (%s)\n", devname);
4993 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4994 pr_err("%s is not an active md array - aborting\n", devname);
4995 ret_val = 1;
4996 goto Grow_continue_command_exit;
4997 }
4998 content = &array;
4999 /* Need to load a superblock.
5000 * FIXME we should really get what we need from
5001 * sysfs
5002 */
5003 do {
5004 for (d = 0; d < MAX_DISKS; d++) {
5005 mdu_disk_info_t disk;
5006 char *dv;
5007 int err;
5008 disk.number = d;
5009 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
5010 continue;
5011 if (disk.major == 0 && disk.minor == 0)
5012 continue;
5013 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
5014 continue;
5015 dv = map_dev(disk.major, disk.minor, 1);
5016 if (!dv)
5017 continue;
5018 fd2 = dev_open(dv, O_RDONLY);
5019 if (fd2 < 0)
5020 continue;
5021 err = st->ss->load_super(st, fd2, NULL);
5022 close(fd2);
5023 if (err)
5024 continue;
5025 break;
5026 }
5027 if (d == MAX_DISKS) {
5028 pr_err("Unable to load metadata for %s\n",
5029 devname);
5030 ret_val = 1;
5031 goto Grow_continue_command_exit;
5032 }
5033 st->ss->getinfo_super(st, content, NULL);
5034 if (!content->reshape_active)
5035 sleep(3);
5036 else
5037 break;
5038 } while (cnt-- > 0);
5039 } else {
5040 char *container;
5041
5042 if (subarray) {
5043 dprintf_cont("subarray (%s)\n", subarray);
5044 container = st->container_devnm;
5045 cfd = open_dev_excl(st->container_devnm);
5046 } else {
5047 container = st->devnm;
5048 close(fd);
5049 cfd = open_dev_excl(st->devnm);
5050 dprintf_cont("container (%s)\n", container);
5051 fd = cfd;
5052 }
5053 if (cfd < 0) {
5054 pr_err("Unable to open container for %s\n", devname);
5055 ret_val = 1;
5056 goto Grow_continue_command_exit;
5057 }
5058
5059 /* find in container array under reshape
5060 */
5061 ret_val = st->ss->load_container(st, cfd, NULL);
5062 if (ret_val) {
5063 pr_err("Cannot read superblock for %s\n",
5064 devname);
5065 ret_val = 1;
5066 goto Grow_continue_command_exit;
5067 }
5068
5069 cc = st->ss->container_content(st, subarray);
5070 for (content = cc; content ; content = content->next) {
5071 char *array;
5072 int allow_reshape = 1;
5073
5074 if (content->reshape_active == 0)
5075 continue;
5076 /* The decision about array or container wide
5077 * reshape is taken in Grow_continue based
5078 * content->reshape_active state, therefore we
5079 * need to check_reshape based on
5080 * reshape_active and subarray name
5081 */
5082 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
5083 allow_reshape = 0;
5084 if (content->reshape_active == CONTAINER_RESHAPE &&
5085 (content->array.state
5086 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
5087 allow_reshape = 0;
5088
5089 if (!allow_reshape) {
5090 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
5091 devname, container);
5092 ret_val = 1;
5093 goto Grow_continue_command_exit;
5094 }
5095
5096 array = strchr(content->text_version+1, '/')+1;
5097 mdstat = mdstat_by_subdev(array, container);
5098 if (!mdstat)
5099 continue;
5100 if (mdstat->active == 0) {
5101 pr_err("Skipping inactive array %s.\n",
5102 mdstat->devnm);
5103 free_mdstat(mdstat);
5104 mdstat = NULL;
5105 continue;
5106 }
5107 break;
5108 }
5109 if (!content) {
5110 pr_err("Unable to determine reshaped array for %s\n", devname);
5111 ret_val = 1;
5112 goto Grow_continue_command_exit;
5113 }
5114 fd2 = open_dev(mdstat->devnm);
5115 if (fd2 < 0) {
5116 pr_err("cannot open (%s)\n", mdstat->devnm);
5117 ret_val = 1;
5118 goto Grow_continue_command_exit;
5119 }
5120
5121 sysfs_init(content, fd2, mdstat->devnm);
5122
5123 close(fd2);
5124
5125 /* start mdmon in case it is not running
5126 */
5127 if (!mdmon_running(container))
5128 start_mdmon(container);
5129 ping_monitor(container);
5130
5131 if (mdmon_running(container))
5132 st->update_tail = &st->updates;
5133 else {
5134 pr_err("No mdmon found. Grow cannot continue.\n");
5135 ret_val = 1;
5136 goto Grow_continue_command_exit;
5137 }
5138 }
5139
5140 /* verify that array under reshape is started from
5141 * correct position
5142 */
5143 if (verify_reshape_position(content, content->array.level) < 0) {
5144 ret_val = 1;
5145 goto Grow_continue_command_exit;
5146 }
5147
5148 /* continue reshape
5149 */
5150 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
5151
5152 Grow_continue_command_exit:
5153 if (cfd > -1)
5154 close(cfd);
5155 st->ss->free_super(st);
5156 free_mdstat(mdstat);
5157 sysfs_free(cc);
5158 free(subarray);
5159
5160 return ret_val;
5161 }
5162
5163 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
5164 char *backup_file, int forked, int freeze_reshape)
5165 {
5166 int ret_val = 2;
5167
5168 if (!info->reshape_active)
5169 return ret_val;
5170
5171 if (st->ss->external) {
5172 int cfd = open_dev(st->container_devnm);
5173
5174 if (cfd < 0)
5175 return 1;
5176
5177 st->ss->load_container(st, cfd, st->container_devnm);
5178 close(cfd);
5179 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
5180 st, info, 0, backup_file,
5181 0, forked,
5182 1 | info->reshape_active,
5183 freeze_reshape);
5184 } else
5185 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
5186 NULL, INVALID_SECTORS,
5187 backup_file, 0, forked,
5188 1 | info->reshape_active,
5189 freeze_reshape);
5190
5191 return ret_val;
5192 }
5193
5194 char *make_backup(char *name)
5195 {
5196 char *base = "backup_file-";
5197 int len;
5198 char *fname;
5199
5200 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
5201 fname = xmalloc(len);
5202 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
5203 return fname;
5204 }
5205
5206 char *locate_backup(char *name)
5207 {
5208 char *fl = make_backup(name);
5209 struct stat stb;
5210
5211 if (stat(fl, &stb) == 0 &&
5212 S_ISREG(stb.st_mode))
5213 return fl;
5214
5215 free(fl);
5216 return NULL;
5217 }