]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Use dev_t for devnm2devid and devid2devnm
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 struct stat stb;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 fstat(nfd, &stb);
149 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
150 pr_err("%s is not a block device!\n", newdev);
151 close(nfd);
152 free(st);
153 return 1;
154 }
155 /* now check out all the devices and make sure we can read the
156 * superblock */
157 for (d=0 ; d < info.array.raid_disks ; d++) {
158 mdu_disk_info_t disk;
159 char *dv;
160
161 st->ss->free_super(st);
162
163 disk.number = d;
164 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
165 pr_err("cannot get device detail for device %d\n",
166 d);
167 close(nfd);
168 free(st);
169 return 1;
170 }
171 dv = map_dev(disk.major, disk.minor, 1);
172 if (!dv) {
173 pr_err("cannot find device file for device %d\n",
174 d);
175 close(nfd);
176 free(st);
177 return 1;
178 }
179 fd2 = dev_open(dv, O_RDWR);
180 if (fd2 < 0) {
181 pr_err("cannot open device file %s\n", dv);
182 close(nfd);
183 free(st);
184 return 1;
185 }
186
187 if (st->ss->load_super(st, fd2, NULL)) {
188 pr_err("cannot find super block on %s\n", dv);
189 close(nfd);
190 close(fd2);
191 free(st);
192 return 1;
193 }
194 close(fd2);
195 }
196 /* Ok, looks good. Lets update the superblock and write it out to
197 * newdev.
198 */
199
200 info.disk.number = d;
201 info.disk.major = major(stb.st_rdev);
202 info.disk.minor = minor(stb.st_rdev);
203 info.disk.raid_disk = d;
204 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
205 st->ss->update_super(st, &info, "linear-grow-new", newdev,
206 0, 0, NULL);
207
208 if (st->ss->store_super(st, nfd)) {
209 pr_err("Cannot store new superblock on %s\n",
210 newdev);
211 close(nfd);
212 return 1;
213 }
214 close(nfd);
215
216 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
217 pr_err("Cannot add new disk to this array\n");
218 return 1;
219 }
220 /* Well, that seems to have worked.
221 * Now go through and update all superblocks
222 */
223
224 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
225 pr_err("cannot get array info for %s\n", devname);
226 return 1;
227 }
228
229 nd = d;
230 for (d=0 ; d < info.array.raid_disks ; d++) {
231 mdu_disk_info_t disk;
232 char *dv;
233
234 disk.number = d;
235 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
236 pr_err("cannot get device detail for device %d\n",
237 d);
238 return 1;
239 }
240 dv = map_dev(disk.major, disk.minor, 1);
241 if (!dv) {
242 pr_err("cannot find device file for device %d\n",
243 d);
244 return 1;
245 }
246 fd2 = dev_open(dv, O_RDWR);
247 if (fd2 < 0) {
248 pr_err("cannot open device file %s\n", dv);
249 return 1;
250 }
251 if (st->ss->load_super(st, fd2, NULL)) {
252 pr_err("cannot find super block on %s\n", dv);
253 close(fd);
254 return 1;
255 }
256 info.array.raid_disks = nd+1;
257 info.array.nr_disks = nd+1;
258 info.array.active_disks = nd+1;
259 info.array.working_disks = nd+1;
260
261 st->ss->update_super(st, &info, "linear-grow-update", dv,
262 0, 0, NULL);
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 int vers = md_get_version(fd);
292 unsigned long long bitmapsize, array_size;
293
294 if (vers < 9003) {
295 major = BITMAP_MAJOR_HOSTENDIAN;
296 pr_err("Warning - bitmaps created on this kernel are not portable\n"
297 " between different architectures. Consider upgrading the Linux kernel.\n");
298 }
299
300 /*
301 * We only ever get called if s->bitmap_file is != NULL, so this check
302 * is just here to quiet down static code checkers.
303 */
304 if (!s->bitmap_file)
305 return 1;
306
307 if (strcmp(s->bitmap_file, "clustered") == 0)
308 major = BITMAP_MAJOR_CLUSTERED;
309
310 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
311 if (errno == ENOMEM)
312 pr_err("Memory allocation failure.\n");
313 else
314 pr_err("bitmaps not supported by this kernel.\n");
315 return 1;
316 }
317 if (bmf.pathname[0]) {
318 if (strcmp(s->bitmap_file,"none") == 0) {
319 if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) {
320 pr_err("failed to remove bitmap %s\n",
321 bmf.pathname);
322 return 1;
323 }
324 return 0;
325 }
326 pr_err("%s already has a bitmap (%s)\n",
327 devname, bmf.pathname);
328 return 1;
329 }
330 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
331 pr_err("cannot get array status for %s\n", devname);
332 return 1;
333 }
334 if (array.state & (1 << MD_SB_BITMAP_PRESENT)) {
335 if (strcmp(s->bitmap_file, "none")==0) {
336 array.state &= ~(1 << MD_SB_BITMAP_PRESENT);
337 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
338 if (array.state & (1 << MD_SB_CLUSTERED))
339 pr_err("failed to remove clustered bitmap.\n");
340 else
341 pr_err("failed to remove internal bitmap.\n");
342 return 1;
343 }
344 return 0;
345 }
346 pr_err("bitmap already present on %s\n", devname);
347 return 1;
348 }
349
350 if (strcmp(s->bitmap_file, "none") == 0) {
351 pr_err("no bitmap found on %s\n", devname);
352 return 1;
353 }
354 if (array.level <= 0) {
355 pr_err("Bitmaps not meaningful with level %s\n",
356 map_num(pers, array.level)?:"of this array");
357 return 1;
358 }
359 bitmapsize = array.size;
360 bitmapsize <<= 1;
361 if (get_dev_size(fd, NULL, &array_size) &&
362 array_size > (0x7fffffffULL << 9)) {
363 /* Array is big enough that we cannot trust array.size
364 * try other approaches
365 */
366 bitmapsize = get_component_size(fd);
367 }
368 if (bitmapsize == 0) {
369 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
370 return 1;
371 }
372
373 if (array.level == 10) {
374 int ncopies;
375
376 ncopies = (array.layout & 255) * ((array.layout >> 8) & 255);
377 bitmapsize = bitmapsize * array.raid_disks / ncopies;
378 }
379
380 st = super_by_fd(fd, &subarray);
381 if (!st) {
382 pr_err("Cannot understand version %d.%d\n",
383 array.major_version, array.minor_version);
384 return 1;
385 }
386 if (subarray) {
387 pr_err("Cannot add bitmaps to sub-arrays yet\n");
388 free(subarray);
389 free(st);
390 return 1;
391 }
392 if (strcmp(s->bitmap_file, "internal") == 0 ||
393 strcmp(s->bitmap_file, "clustered") == 0) {
394 int rv;
395 int d;
396 int offset_setable = 0;
397 struct mdinfo *mdi;
398 if (st->ss->add_internal_bitmap == NULL) {
399 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
400 return 1;
401 }
402 st->nodes = c->nodes;
403 st->cluster_name = c->homecluster;
404 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
405 if (mdi)
406 offset_setable = 1;
407 for (d = 0; d < st->max_devs; d++) {
408 mdu_disk_info_t disk;
409 char *dv;
410 int fd2;
411
412 disk.number = d;
413 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
414 continue;
415 if (disk.major == 0 && disk.minor == 0)
416 continue;
417 if ((disk.state & (1 << MD_DISK_SYNC)) == 0)
418 continue;
419 dv = map_dev(disk.major, disk.minor, 1);
420 if (!dv)
421 continue;
422 fd2 = dev_open(dv, O_RDWR);
423 if (fd2 < 0)
424 continue;
425 rv = st->ss->load_super(st, fd2, NULL);
426 if (!rv) {
427 rv = st->ss->add_internal_bitmap(
428 st, &s->bitmap_chunk, c->delay,
429 s->write_behind, bitmapsize,
430 offset_setable, major);
431 if (!rv) {
432 st->ss->write_bitmap(st, fd2,
433 NodeNumUpdate);
434 } else {
435 pr_err("failed to create internal bitmap - chunksize problem.\n");
436 }
437 } else {
438 pr_err("failed to load super-block.\n");
439 }
440 close(fd2);
441 if (rv)
442 return 1;
443 }
444 if (offset_setable) {
445 st->ss->getinfo_super(st, mdi, NULL);
446 sysfs_init(mdi, fd, NULL);
447 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
448 mdi->bitmap_offset);
449 } else {
450 if (strcmp(s->bitmap_file, "clustered") == 0)
451 array.state |= (1 << MD_SB_CLUSTERED);
452 array.state |= (1 << MD_SB_BITMAP_PRESENT);
453 rv = ioctl(fd, SET_ARRAY_INFO, &array);
454 }
455 if (rv < 0) {
456 if (errno == EBUSY)
457 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
458 pr_err("failed to set internal bitmap.\n");
459 return 1;
460 }
461 } else {
462 int uuid[4];
463 int bitmap_fd;
464 int d;
465 int max_devs = st->max_devs;
466
467 /* try to load a superblock */
468 for (d = 0; d < max_devs; d++) {
469 mdu_disk_info_t disk;
470 char *dv;
471 int fd2;
472 disk.number = d;
473 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
474 continue;
475 if ((disk.major==0 && disk.minor == 0) ||
476 (disk.state & (1 << MD_DISK_REMOVED)))
477 continue;
478 dv = map_dev(disk.major, disk.minor, 1);
479 if (!dv)
480 continue;
481 fd2 = dev_open(dv, O_RDONLY);
482 if (fd2 >= 0) {
483 if (st->ss->load_super(st, fd2, NULL) == 0) {
484 close(fd2);
485 st->ss->uuid_from_super(st, uuid);
486 break;
487 }
488 close(fd2);
489 }
490 }
491 if (d == max_devs) {
492 pr_err("cannot find UUID for array!\n");
493 return 1;
494 }
495 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid,
496 s->bitmap_chunk, c->delay, s->write_behind,
497 bitmapsize, major)) {
498 return 1;
499 }
500 bitmap_fd = open(s->bitmap_file, O_RDWR);
501 if (bitmap_fd < 0) {
502 pr_err("weird: %s cannot be opened\n", s->bitmap_file);
503 return 1;
504 }
505 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
506 int err = errno;
507 if (errno == EBUSY)
508 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
509 pr_err("Cannot set bitmap file for %s: %s\n",
510 devname, strerror(err));
511 return 1;
512 }
513 }
514
515 return 0;
516 }
517
518 /*
519 * When reshaping an array we might need to backup some data.
520 * This is written to all spares with a 'super_block' describing it.
521 * The superblock goes 4K from the end of the used space on the
522 * device.
523 * It if written after the backup is complete.
524 * It has the following structure.
525 */
526
527 static struct mdp_backup_super {
528 char magic[16]; /* md_backup_data-1 or -2 */
529 __u8 set_uuid[16];
530 __u64 mtime;
531 /* start/sizes in 512byte sectors */
532 __u64 devstart; /* address on backup device/file of data */
533 __u64 arraystart;
534 __u64 length;
535 __u32 sb_csum; /* csum of preceeding bytes. */
536 __u32 pad1;
537 __u64 devstart2; /* offset in to data of second section */
538 __u64 arraystart2;
539 __u64 length2;
540 __u32 sb_csum2; /* csum of preceeding bytes. */
541 __u8 pad[512-68-32];
542 } __attribute__((aligned(512))) bsb, bsb2;
543
544 static __u32 bsb_csum(char *buf, int len)
545 {
546 int i;
547 int csum = 0;
548 for (i = 0; i < len; i++)
549 csum = (csum<<3) + buf[0];
550 return __cpu_to_le32(csum);
551 }
552
553 static int check_idle(struct supertype *st)
554 {
555 /* Check that all member arrays for this container, or the
556 * container of this array, are idle
557 */
558 char *container = (st->container_devnm[0]
559 ? st->container_devnm : st->devnm);
560 struct mdstat_ent *ent, *e;
561 int is_idle = 1;
562
563 ent = mdstat_read(0, 0);
564 for (e = ent ; e; e = e->next) {
565 if (!is_container_member(e, container))
566 continue;
567 if (e->percent >= 0) {
568 is_idle = 0;
569 break;
570 }
571 }
572 free_mdstat(ent);
573 return is_idle;
574 }
575
576 static int freeze_container(struct supertype *st)
577 {
578 char *container = (st->container_devnm[0]
579 ? st->container_devnm : st->devnm);
580
581 if (!check_idle(st))
582 return -1;
583
584 if (block_monitor(container, 1)) {
585 pr_err("failed to freeze container\n");
586 return -2;
587 }
588
589 return 1;
590 }
591
592 static void unfreeze_container(struct supertype *st)
593 {
594 char *container = (st->container_devnm[0]
595 ? st->container_devnm : st->devnm);
596
597 unblock_monitor(container, 1);
598 }
599
600 static int freeze(struct supertype *st)
601 {
602 /* Try to freeze resync/rebuild on this array/container.
603 * Return -1 if the array is busy,
604 * return -2 container cannot be frozen,
605 * return 0 if this kernel doesn't support 'frozen'
606 * return 1 if it worked.
607 */
608 if (st->ss->external)
609 return freeze_container(st);
610 else {
611 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
612 int err;
613 char buf[20];
614
615 if (!sra)
616 return -1;
617 /* Need to clear any 'read-auto' status */
618 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
619 strncmp(buf, "read-auto", 9) == 0)
620 sysfs_set_str(sra, NULL, "array_state", "clean");
621
622 err = sysfs_freeze_array(sra);
623 sysfs_free(sra);
624 return err;
625 }
626 }
627
628 static void unfreeze(struct supertype *st)
629 {
630 if (st->ss->external)
631 return unfreeze_container(st);
632 else {
633 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
634 char buf[20];
635
636 if (sra &&
637 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
638 && strcmp(buf, "frozen\n") == 0)
639 sysfs_set_str(sra, NULL, "sync_action", "idle");
640 sysfs_free(sra);
641 }
642 }
643
644 static void wait_reshape(struct mdinfo *sra)
645 {
646 int fd = sysfs_get_fd(sra, NULL, "sync_action");
647 char action[20];
648
649 if (fd < 0)
650 return;
651
652 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
653 strncmp(action, "reshape", 7) == 0)
654 sysfs_wait(fd, NULL);
655 close(fd);
656 }
657
658 static int reshape_super(struct supertype *st, unsigned long long size,
659 int level, int layout, int chunksize, int raid_disks,
660 int delta_disks, char *backup_file, char *dev,
661 int direction, int verbose)
662 {
663 /* nothing extra to check in the native case */
664 if (!st->ss->external)
665 return 0;
666 if (!st->ss->reshape_super ||
667 !st->ss->manage_reshape) {
668 pr_err("%s metadata does not support reshape\n",
669 st->ss->name);
670 return 1;
671 }
672
673 return st->ss->reshape_super(st, size, level, layout, chunksize,
674 raid_disks, delta_disks, backup_file, dev,
675 direction, verbose);
676 }
677
678 static void sync_metadata(struct supertype *st)
679 {
680 if (st->ss->external) {
681 if (st->update_tail) {
682 flush_metadata_updates(st);
683 st->update_tail = &st->updates;
684 } else
685 st->ss->sync_metadata(st);
686 }
687 }
688
689 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
690 {
691 /* when dealing with external metadata subarrays we need to be
692 * prepared to handle EAGAIN. The kernel may need to wait for
693 * mdmon to mark the array active so the kernel can handle
694 * allocations/writeback when preparing the reshape action
695 * (md_allow_write()). We temporarily disable safe_mode_delay
696 * to close a race with the array_state going clean before the
697 * next write to raid_disks / stripe_cache_size
698 */
699 char safe[50];
700 int rc;
701
702 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
703 if (!container ||
704 (strcmp(name, "raid_disks") != 0 &&
705 strcmp(name, "stripe_cache_size") != 0))
706 return sysfs_set_num(sra, NULL, name, n);
707
708 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
709 if (rc <= 0)
710 return -1;
711 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
712 rc = sysfs_set_num(sra, NULL, name, n);
713 if (rc < 0 && errno == EAGAIN) {
714 ping_monitor(container);
715 /* if we get EAGAIN here then the monitor is not active
716 * so stop trying
717 */
718 rc = sysfs_set_num(sra, NULL, name, n);
719 }
720 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
721 return rc;
722 }
723
724 int start_reshape(struct mdinfo *sra, int already_running,
725 int before_data_disks, int data_disks)
726 {
727 int err;
728 unsigned long long sync_max_to_set;
729
730 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
731 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
732 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
733 sra->reshape_progress);
734 if (before_data_disks <= data_disks)
735 sync_max_to_set = sra->reshape_progress / data_disks;
736 else
737 sync_max_to_set = (sra->component_size * data_disks
738 - sra->reshape_progress) / data_disks;
739 if (!already_running)
740 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
741 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
742 if (!already_running && err == 0) {
743 int cnt = 5;
744 do {
745 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
746 if (err)
747 sleep(1);
748 } while (err && errno == EBUSY && cnt-- > 0);
749 }
750 return err;
751 }
752
753 void abort_reshape(struct mdinfo *sra)
754 {
755 sysfs_set_str(sra, NULL, "sync_action", "idle");
756 /*
757 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
758 * suspend_hi to decrease as well as increase.")
759 * you could only increase suspend_{lo,hi} unless the region they
760 * covered was empty. So to reset to 0, you need to push suspend_lo
761 * up past suspend_hi first. So to maximize the chance of mdadm
762 * working on all kernels, we want to keep doing that.
763 */
764 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
765 sysfs_set_num(sra, NULL, "suspend_hi", 0);
766 sysfs_set_num(sra, NULL, "suspend_lo", 0);
767 sysfs_set_num(sra, NULL, "sync_min", 0);
768 // It isn't safe to reset sync_max as we aren't monitoring.
769 // Array really should be stopped at this point.
770 }
771
772 int remove_disks_for_takeover(struct supertype *st,
773 struct mdinfo *sra,
774 int layout)
775 {
776 int nr_of_copies;
777 struct mdinfo *remaining;
778 int slot;
779
780 if (sra->array.level == 10)
781 nr_of_copies = layout & 0xff;
782 else if (sra->array.level == 1)
783 nr_of_copies = sra->array.raid_disks;
784 else
785 return 1;
786
787 remaining = sra->devs;
788 sra->devs = NULL;
789 /* for each 'copy', select one device and remove from the list. */
790 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
791 struct mdinfo **diskp;
792 int found = 0;
793
794 /* Find a working device to keep */
795 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
796 struct mdinfo *disk = *diskp;
797
798 if (disk->disk.raid_disk < slot)
799 continue;
800 if (disk->disk.raid_disk >= slot + nr_of_copies)
801 continue;
802 if (disk->disk.state & (1<<MD_DISK_REMOVED))
803 continue;
804 if (disk->disk.state & (1<<MD_DISK_FAULTY))
805 continue;
806 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
807 continue;
808
809 /* We have found a good disk to use! */
810 *diskp = disk->next;
811 disk->next = sra->devs;
812 sra->devs = disk;
813 found = 1;
814 break;
815 }
816 if (!found)
817 break;
818 }
819
820 if (slot < sra->array.raid_disks) {
821 /* didn't find all slots */
822 struct mdinfo **e;
823 e = &remaining;
824 while (*e)
825 e = &(*e)->next;
826 *e = sra->devs;
827 sra->devs = remaining;
828 return 1;
829 }
830
831 /* Remove all 'remaining' devices from the array */
832 while (remaining) {
833 struct mdinfo *sd = remaining;
834 remaining = sd->next;
835
836 sysfs_set_str(sra, sd, "state", "faulty");
837 sysfs_set_str(sra, sd, "slot", "none");
838 /* for external metadata disks should be removed in mdmon */
839 if (!st->ss->external)
840 sysfs_set_str(sra, sd, "state", "remove");
841 sd->disk.state |= (1<<MD_DISK_REMOVED);
842 sd->disk.state &= ~(1<<MD_DISK_SYNC);
843 sd->next = sra->devs;
844 sra->devs = sd;
845 }
846 return 0;
847 }
848
849 void reshape_free_fdlist(int *fdlist,
850 unsigned long long *offsets,
851 int size)
852 {
853 int i;
854
855 for (i = 0; i < size; i++)
856 if (fdlist[i] >= 0)
857 close(fdlist[i]);
858
859 free(fdlist);
860 free(offsets);
861 }
862
863 int reshape_prepare_fdlist(char *devname,
864 struct mdinfo *sra,
865 int raid_disks,
866 int nrdisks,
867 unsigned long blocks,
868 char *backup_file,
869 int *fdlist,
870 unsigned long long *offsets)
871 {
872 int d = 0;
873 struct mdinfo *sd;
874
875 enable_fds(nrdisks);
876 for (d = 0; d <= nrdisks; d++)
877 fdlist[d] = -1;
878 d = raid_disks;
879 for (sd = sra->devs; sd; sd = sd->next) {
880 if (sd->disk.state & (1<<MD_DISK_FAULTY))
881 continue;
882 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
883 sd->disk.raid_disk < raid_disks) {
884 char *dn = map_dev(sd->disk.major,
885 sd->disk.minor, 1);
886 fdlist[sd->disk.raid_disk]
887 = dev_open(dn, O_RDONLY);
888 offsets[sd->disk.raid_disk] = sd->data_offset*512;
889 if (fdlist[sd->disk.raid_disk] < 0) {
890 pr_err("%s: cannot open component %s\n",
891 devname, dn ? dn : "-unknown-");
892 d = -1;
893 goto release;
894 }
895 } else if (backup_file == NULL) {
896 /* spare */
897 char *dn = map_dev(sd->disk.major,
898 sd->disk.minor, 1);
899 fdlist[d] = dev_open(dn, O_RDWR);
900 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
901 if (fdlist[d] < 0) {
902 pr_err("%s: cannot open component %s\n",
903 devname, dn ? dn : "-unknown-");
904 d = -1;
905 goto release;
906 }
907 d++;
908 }
909 }
910 release:
911 return d;
912 }
913
914 int reshape_open_backup_file(char *backup_file,
915 int fd,
916 char *devname,
917 long blocks,
918 int *fdlist,
919 unsigned long long *offsets,
920 char *sys_name,
921 int restart)
922 {
923 /* Return 1 on success, 0 on any form of failure */
924 /* need to check backup file is large enough */
925 char buf[512];
926 struct stat stb;
927 unsigned int dev;
928 int i;
929
930 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
931 S_IRUSR | S_IWUSR);
932 *offsets = 8 * 512;
933 if (*fdlist < 0) {
934 pr_err("%s: cannot create backup file %s: %s\n",
935 devname, backup_file, strerror(errno));
936 return 0;
937 }
938 /* Guard against backup file being on array device.
939 * If array is partitioned or if LVM etc is in the
940 * way this will not notice, but it is better than
941 * nothing.
942 */
943 fstat(*fdlist, &stb);
944 dev = stb.st_dev;
945 fstat(fd, &stb);
946 if (stb.st_rdev == dev) {
947 pr_err("backup file must NOT be on the array being reshaped.\n");
948 close(*fdlist);
949 return 0;
950 }
951
952 memset(buf, 0, 512);
953 for (i=0; i < blocks + 8 ; i++) {
954 if (write(*fdlist, buf, 512) != 512) {
955 pr_err("%s: cannot create backup file %s: %s\n",
956 devname, backup_file, strerror(errno));
957 return 0;
958 }
959 }
960 if (fsync(*fdlist) != 0) {
961 pr_err("%s: cannot create backup file %s: %s\n",
962 devname, backup_file, strerror(errno));
963 return 0;
964 }
965
966 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
967 char *bu = make_backup(sys_name);
968 if (symlink(backup_file, bu))
969 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
970 strerror(errno));
971 free(bu);
972 }
973
974 return 1;
975 }
976
977 unsigned long compute_backup_blocks(int nchunk, int ochunk,
978 unsigned int ndata, unsigned int odata)
979 {
980 unsigned long a, b, blocks;
981 /* So how much do we need to backup.
982 * We need an amount of data which is both a whole number of
983 * old stripes and a whole number of new stripes.
984 * So LCM for (chunksize*datadisks).
985 */
986 a = (ochunk/512) * odata;
987 b = (nchunk/512) * ndata;
988 /* Find GCD */
989 a = GCD(a, b);
990 /* LCM == product / GCD */
991 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
992
993 return blocks;
994 }
995
996 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
997 {
998 /* Based on the current array state in info->array and
999 * the changes in info->new_* etc, determine:
1000 * - whether the change is possible
1001 * - Intermediate level/raid_disks/layout
1002 * - whether a restriping reshape is needed
1003 * - number of sectors in minimum change unit. This
1004 * will cover a whole number of stripes in 'before' and
1005 * 'after'.
1006 *
1007 * Return message if the change should be rejected
1008 * NULL if the change can be achieved
1009 *
1010 * This can be called as part of starting a reshape, or
1011 * when assembling an array that is undergoing reshape.
1012 */
1013 int near, far, offset, copies;
1014 int new_disks;
1015 int old_chunk, new_chunk;
1016 /* delta_parity records change in number of devices
1017 * caused by level change
1018 */
1019 int delta_parity = 0;
1020
1021 memset(re, 0, sizeof(*re));
1022
1023 /* If a new level not explicitly given, we assume no-change */
1024 if (info->new_level == UnSet)
1025 info->new_level = info->array.level;
1026
1027 if (info->new_chunk)
1028 switch (info->new_level) {
1029 case 0:
1030 case 4:
1031 case 5:
1032 case 6:
1033 case 10:
1034 /* chunk size is meaningful, must divide component_size
1035 * evenly
1036 */
1037 if (info->component_size % (info->new_chunk/512)) {
1038 unsigned long long shrink = info->component_size;
1039 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1040 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1041 info->new_chunk/1024, info->component_size/2);
1042 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1043 devname, shrink/2);
1044 pr_err("will shrink the array so the given chunk size would work.\n");
1045 return "";
1046 }
1047 break;
1048 default:
1049 return "chunk size not meaningful for this level";
1050 }
1051 else
1052 info->new_chunk = info->array.chunk_size;
1053
1054 switch (info->array.level) {
1055 default:
1056 return "No reshape is possibly for this RAID level";
1057 case LEVEL_LINEAR:
1058 if (info->delta_disks != UnSet)
1059 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1060 else
1061 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1062 case 1:
1063 /* RAID1 can convert to RAID1 with different disks, or
1064 * raid5 with 2 disks, or
1065 * raid0 with 1 disk
1066 */
1067 if (info->new_level > 1 &&
1068 (info->component_size & 7))
1069 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1070 if (info->new_level == 0) {
1071 if (info->delta_disks != UnSet &&
1072 info->delta_disks != 0)
1073 return "Cannot change number of disks with RAID1->RAID0 conversion";
1074 re->level = 0;
1075 re->before.data_disks = 1;
1076 re->after.data_disks = 1;
1077 return NULL;
1078 }
1079 if (info->new_level == 1) {
1080 if (info->delta_disks == UnSet)
1081 /* Don't know what to do */
1082 return "no change requested for Growing RAID1";
1083 re->level = 1;
1084 return NULL;
1085 }
1086 if (info->array.raid_disks != 2 &&
1087 info->new_level == 5)
1088 return "Can only convert a 2-device array to RAID5";
1089 if (info->array.raid_disks == 2 &&
1090 info->new_level == 5) {
1091
1092 re->level = 5;
1093 re->before.data_disks = 1;
1094 if (info->delta_disks != UnSet &&
1095 info->delta_disks != 0)
1096 re->after.data_disks = 1 + info->delta_disks;
1097 else
1098 re->after.data_disks = 1;
1099 if (re->after.data_disks < 1)
1100 return "Number of disks too small for RAID5";
1101
1102 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1103 info->array.chunk_size = 65536;
1104 break;
1105 }
1106 /* Could do some multi-stage conversions, but leave that to
1107 * later.
1108 */
1109 return "Impossibly level change request for RAID1";
1110
1111 case 10:
1112 /* RAID10 can be converted from near mode to
1113 * RAID0 by removing some devices.
1114 * It can also be reshaped if the kernel supports
1115 * new_data_offset.
1116 */
1117 switch (info->new_level) {
1118 case 0:
1119 if ((info->array.layout & ~0xff) != 0x100)
1120 return "Cannot Grow RAID10 with far/offset layout";
1121 /* number of devices must be multiple of number of copies */
1122 if (info->array.raid_disks % (info->array.layout & 0xff))
1123 return "RAID10 layout too complex for Grow operation";
1124
1125 new_disks = (info->array.raid_disks
1126 / (info->array.layout & 0xff));
1127 if (info->delta_disks == UnSet)
1128 info->delta_disks = (new_disks
1129 - info->array.raid_disks);
1130
1131 if (info->delta_disks != new_disks - info->array.raid_disks)
1132 return "New number of raid-devices impossible for RAID10";
1133 if (info->new_chunk &&
1134 info->new_chunk != info->array.chunk_size)
1135 return "Cannot change chunk-size with RAID10 Grow";
1136
1137 /* looks good */
1138 re->level = 0;
1139 re->before.data_disks = new_disks;
1140 re->after.data_disks = re->before.data_disks;
1141 return NULL;
1142
1143 case 10:
1144 near = info->array.layout & 0xff;
1145 far = (info->array.layout >> 8) & 0xff;
1146 offset = info->array.layout & 0x10000;
1147 if (far > 1 && !offset)
1148 return "Cannot reshape RAID10 in far-mode";
1149 copies = near * far;
1150
1151 old_chunk = info->array.chunk_size * far;
1152
1153 if (info->new_layout == UnSet)
1154 info->new_layout = info->array.layout;
1155 else {
1156 near = info->new_layout & 0xff;
1157 far = (info->new_layout >> 8) & 0xff;
1158 offset = info->new_layout & 0x10000;
1159 if (far > 1 && !offset)
1160 return "Cannot reshape RAID10 to far-mode";
1161 if (near * far != copies)
1162 return "Cannot change number of copies when reshaping RAID10";
1163 }
1164 if (info->delta_disks == UnSet)
1165 info->delta_disks = 0;
1166 new_disks = (info->array.raid_disks +
1167 info->delta_disks);
1168
1169 new_chunk = info->new_chunk * far;
1170
1171 re->level = 10;
1172 re->before.layout = info->array.layout;
1173 re->before.data_disks = info->array.raid_disks;
1174 re->after.layout = info->new_layout;
1175 re->after.data_disks = new_disks;
1176 /* For RAID10 we don't do backup but do allow reshape,
1177 * so set backup_blocks to INVALID_SECTORS rather than
1178 * zero.
1179 * And there is no need to synchronise stripes on both
1180 * 'old' and 'new'. So the important
1181 * number is the minimum data_offset difference
1182 * which is the larger of (offset copies * chunk).
1183 */
1184 re->backup_blocks = INVALID_SECTORS;
1185 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1186 if (new_disks < re->before.data_disks &&
1187 info->space_after < re->min_offset_change)
1188 /* Reduce component size by one chunk */
1189 re->new_size = (info->component_size -
1190 re->min_offset_change);
1191 else
1192 re->new_size = info->component_size;
1193 re->new_size = re->new_size * new_disks / copies;
1194 return NULL;
1195
1196 default:
1197 return "RAID10 can only be changed to RAID0";
1198 }
1199 case 0:
1200 /* RAID0 can be converted to RAID10, or to RAID456 */
1201 if (info->new_level == 10) {
1202 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1203 /* Assume near=2 layout */
1204 info->new_layout = 0x102;
1205 info->delta_disks = info->array.raid_disks;
1206 }
1207 if (info->new_layout == UnSet) {
1208 int copies = 1 + (info->delta_disks
1209 / info->array.raid_disks);
1210 if (info->array.raid_disks * (copies-1)
1211 != info->delta_disks)
1212 return "Impossible number of devices for RAID0->RAID10";
1213 info->new_layout = 0x100 + copies;
1214 }
1215 if (info->delta_disks == UnSet) {
1216 int copies = info->new_layout & 0xff;
1217 if (info->new_layout != 0x100 + copies)
1218 return "New layout impossible for RAID0->RAID10";;
1219 info->delta_disks = (copies - 1) *
1220 info->array.raid_disks;
1221 }
1222 if (info->new_chunk &&
1223 info->new_chunk != info->array.chunk_size)
1224 return "Cannot change chunk-size with RAID0->RAID10";
1225 /* looks good */
1226 re->level = 10;
1227 re->before.data_disks = (info->array.raid_disks +
1228 info->delta_disks);
1229 re->after.data_disks = re->before.data_disks;
1230 re->before.layout = info->new_layout;
1231 return NULL;
1232 }
1233
1234 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1235 * a raid4 style layout of the final level.
1236 */
1237 switch (info->new_level) {
1238 case 4:
1239 delta_parity = 1;
1240 case 0:
1241 re->level = 4;
1242 re->before.layout = 0;
1243 break;
1244 case 5:
1245 delta_parity = 1;
1246 re->level = 5;
1247 re->before.layout = ALGORITHM_PARITY_N;
1248 if (info->new_layout == UnSet)
1249 info->new_layout = map_name(r5layout, "default");
1250 break;
1251 case 6:
1252 delta_parity = 2;
1253 re->level = 6;
1254 re->before.layout = ALGORITHM_PARITY_N;
1255 if (info->new_layout == UnSet)
1256 info->new_layout = map_name(r6layout, "default");
1257 break;
1258 default:
1259 return "Impossible level change requested";
1260 }
1261 re->before.data_disks = info->array.raid_disks;
1262 /* determining 'after' layout happens outside this 'switch' */
1263 break;
1264
1265 case 4:
1266 info->array.layout = ALGORITHM_PARITY_N;
1267 case 5:
1268 switch (info->new_level) {
1269 case 0:
1270 delta_parity = -1;
1271 case 4:
1272 re->level = info->array.level;
1273 re->before.data_disks = info->array.raid_disks - 1;
1274 re->before.layout = info->array.layout;
1275 break;
1276 case 5:
1277 re->level = 5;
1278 re->before.data_disks = info->array.raid_disks - 1;
1279 re->before.layout = info->array.layout;
1280 break;
1281 case 6:
1282 delta_parity = 1;
1283 re->level = 6;
1284 re->before.data_disks = info->array.raid_disks - 1;
1285 switch (info->array.layout) {
1286 case ALGORITHM_LEFT_ASYMMETRIC:
1287 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1288 break;
1289 case ALGORITHM_RIGHT_ASYMMETRIC:
1290 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1291 break;
1292 case ALGORITHM_LEFT_SYMMETRIC:
1293 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1294 break;
1295 case ALGORITHM_RIGHT_SYMMETRIC:
1296 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1297 break;
1298 case ALGORITHM_PARITY_0:
1299 re->before.layout = ALGORITHM_PARITY_0_6;
1300 break;
1301 case ALGORITHM_PARITY_N:
1302 re->before.layout = ALGORITHM_PARITY_N_6;
1303 break;
1304 default:
1305 return "Cannot convert an array with this layout";
1306 }
1307 break;
1308 case 1:
1309 if (info->array.raid_disks != 2)
1310 return "Can only convert a 2-device array to RAID1";
1311 if (info->delta_disks != UnSet &&
1312 info->delta_disks != 0)
1313 return "Cannot set raid_disk when converting RAID5->RAID1";
1314 re->level = 1;
1315 info->new_chunk = 0;
1316 return NULL;
1317 default:
1318 return "Impossible level change requested";
1319 }
1320 break;
1321 case 6:
1322 switch (info->new_level) {
1323 case 4:
1324 case 5:
1325 delta_parity = -1;
1326 case 6:
1327 re->level = 6;
1328 re->before.data_disks = info->array.raid_disks - 2;
1329 re->before.layout = info->array.layout;
1330 break;
1331 default:
1332 return "Impossible level change requested";
1333 }
1334 break;
1335 }
1336
1337 /* If we reached here then it looks like a re-stripe is
1338 * happening. We have determined the intermediate level
1339 * and initial raid_disks/layout and stored these in 're'.
1340 *
1341 * We need to deduce the final layout that can be atomically
1342 * converted to the end state.
1343 */
1344 switch (info->new_level) {
1345 case 0:
1346 /* We can only get to RAID0 from RAID4 or RAID5
1347 * with appropriate layout and one extra device
1348 */
1349 if (re->level != 4 && re->level != 5)
1350 return "Cannot covert to RAID0 from this level";
1351
1352 switch (re->level) {
1353 case 4:
1354 re->before.layout = 0;
1355 re->after.layout = 0;
1356 break;
1357 case 5:
1358 re->after.layout = ALGORITHM_PARITY_N;
1359 break;
1360 }
1361 break;
1362
1363 case 4:
1364 /* We can only get to RAID4 from RAID5 */
1365 if (re->level != 4 && re->level != 5)
1366 return "Cannot convert to RAID4 from this level";
1367
1368 switch (re->level) {
1369 case 4:
1370 re->after.layout = 0;
1371 break;
1372 case 5:
1373 re->after.layout = ALGORITHM_PARITY_N;
1374 break;
1375 }
1376 break;
1377
1378 case 5:
1379 /* We get to RAID5 from RAID5 or RAID6 */
1380 if (re->level != 5 && re->level != 6)
1381 return "Cannot convert to RAID5 from this level";
1382
1383 switch (re->level) {
1384 case 5:
1385 if (info->new_layout == UnSet)
1386 re->after.layout = re->before.layout;
1387 else
1388 re->after.layout = info->new_layout;
1389 break;
1390 case 6:
1391 if (info->new_layout == UnSet)
1392 info->new_layout = re->before.layout;
1393
1394 /* after.layout needs to be raid6 version of new_layout */
1395 if (info->new_layout == ALGORITHM_PARITY_N)
1396 re->after.layout = ALGORITHM_PARITY_N;
1397 else {
1398 char layout[40];
1399 char *ls = map_num(r5layout, info->new_layout);
1400 int l;
1401 if (ls) {
1402 /* Current RAID6 layout has a RAID5
1403 * equivalent - good
1404 */
1405 strcat(strcpy(layout, ls), "-6");
1406 l = map_name(r6layout, layout);
1407 if (l == UnSet)
1408 return "Cannot find RAID6 layout to convert to";
1409 } else {
1410 /* Current RAID6 has no equivalent.
1411 * If it is already a '-6' layout we
1412 * can leave it unchanged, else we must
1413 * fail
1414 */
1415 ls = map_num(r6layout, info->new_layout);
1416 if (!ls ||
1417 strcmp(ls+strlen(ls)-2, "-6") != 0)
1418 return "Please specify new layout";
1419 l = info->new_layout;
1420 }
1421 re->after.layout = l;
1422 }
1423 }
1424 break;
1425
1426 case 6:
1427 /* We must already be at level 6 */
1428 if (re->level != 6)
1429 return "Impossible level change";
1430 if (info->new_layout == UnSet)
1431 re->after.layout = info->array.layout;
1432 else
1433 re->after.layout = info->new_layout;
1434 break;
1435 default:
1436 return "Impossible level change requested";
1437 }
1438 if (info->delta_disks == UnSet)
1439 info->delta_disks = delta_parity;
1440
1441 re->after.data_disks = (re->before.data_disks
1442 + info->delta_disks
1443 - delta_parity);
1444 switch (re->level) {
1445 case 6: re->parity = 2;
1446 break;
1447 case 4:
1448 case 5: re->parity = 1;
1449 break;
1450 default: re->parity = 0;
1451 break;
1452 }
1453 /* So we have a restripe operation, we need to calculate the number
1454 * of blocks per reshape operation.
1455 */
1456 re->new_size = info->component_size * re->before.data_disks;
1457 if (info->new_chunk == 0)
1458 info->new_chunk = info->array.chunk_size;
1459 if (re->after.data_disks == re->before.data_disks &&
1460 re->after.layout == re->before.layout &&
1461 info->new_chunk == info->array.chunk_size) {
1462 /* Nothing to change, can change level immediately. */
1463 re->level = info->new_level;
1464 re->backup_blocks = 0;
1465 return NULL;
1466 }
1467 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1468 /* chunk and layout changes make no difference */
1469 re->level = info->new_level;
1470 re->backup_blocks = 0;
1471 return NULL;
1472 }
1473
1474 if (re->after.data_disks == re->before.data_disks &&
1475 get_linux_version() < 2006032)
1476 return "in-place reshape is not safe before 2.6.32 - sorry.";
1477
1478 if (re->after.data_disks < re->before.data_disks &&
1479 get_linux_version() < 2006030)
1480 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1481
1482 re->backup_blocks = compute_backup_blocks(
1483 info->new_chunk, info->array.chunk_size,
1484 re->after.data_disks,
1485 re->before.data_disks);
1486 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1487
1488 re->new_size = info->component_size * re->after.data_disks;
1489 return NULL;
1490 }
1491
1492 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1493 char *text_version)
1494 {
1495 struct mdinfo *info;
1496 char *subarray;
1497 int ret_val = -1;
1498
1499 if ((st == NULL) || (sra == NULL))
1500 return ret_val;
1501
1502 if (text_version == NULL)
1503 text_version = sra->text_version;
1504 subarray = strchr(text_version+1, '/')+1;
1505 info = st->ss->container_content(st, subarray);
1506 if (info) {
1507 unsigned long long current_size = 0;
1508 unsigned long long new_size =
1509 info->custom_array_size/2;
1510
1511 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1512 new_size > current_size) {
1513 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1514 < 0)
1515 dprintf("Error: Cannot set array size");
1516 else {
1517 ret_val = 0;
1518 dprintf("Array size changed");
1519 }
1520 dprintf_cont(" from %llu to %llu.\n",
1521 current_size, new_size);
1522 }
1523 sysfs_free(info);
1524 } else
1525 dprintf("Error: set_array_size(): info pointer in NULL\n");
1526
1527 return ret_val;
1528 }
1529
1530 static int reshape_array(char *container, int fd, char *devname,
1531 struct supertype *st, struct mdinfo *info,
1532 int force, struct mddev_dev *devlist,
1533 unsigned long long data_offset,
1534 char *backup_file, int verbose, int forked,
1535 int restart, int freeze_reshape);
1536 static int reshape_container(char *container, char *devname,
1537 int mdfd,
1538 struct supertype *st,
1539 struct mdinfo *info,
1540 int force,
1541 char *backup_file, int verbose,
1542 int forked, int restart, int freeze_reshape);
1543
1544 int Grow_reshape(char *devname, int fd,
1545 struct mddev_dev *devlist,
1546 unsigned long long data_offset,
1547 struct context *c, struct shape *s)
1548 {
1549 /* Make some changes in the shape of an array.
1550 * The kernel must support the change.
1551 *
1552 * There are three different changes. Each can trigger
1553 * a resync or recovery so we freeze that until we have
1554 * requested everything (if kernel supports freezing - 2.6.30).
1555 * The steps are:
1556 * - change size (i.e. component_size)
1557 * - change level
1558 * - change layout/chunksize/ndisks
1559 *
1560 * The last can require a reshape. It is different on different
1561 * levels so we need to check the level before actioning it.
1562 * Some times the level change needs to be requested after the
1563 * reshape (e.g. raid6->raid5, raid5->raid0)
1564 *
1565 */
1566 struct mdu_array_info_s array;
1567 int rv = 0;
1568 struct supertype *st;
1569 char *subarray = NULL;
1570
1571 int frozen;
1572 int changed = 0;
1573 char *container = NULL;
1574 int cfd = -1;
1575
1576 struct mddev_dev *dv;
1577 int added_disks;
1578
1579 struct mdinfo info;
1580 struct mdinfo *sra;
1581
1582 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1583 pr_err("%s is not an active md array - aborting\n",
1584 devname);
1585 return 1;
1586 }
1587 if (data_offset != INVALID_SECTORS && array.level != 10
1588 && (array.level < 4 || array.level > 6)) {
1589 pr_err("--grow --data-offset not yet supported\n");
1590 return 1;
1591 }
1592
1593 if (s->size > 0 &&
1594 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1595 pr_err("cannot change component size at the same time as other changes.\n"
1596 " Change size first, then check data is intact before making other changes.\n");
1597 return 1;
1598 }
1599
1600 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1601 get_linux_version() < 2006032 &&
1602 !check_env("MDADM_FORCE_FEWER")) {
1603 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1604 " Please use a newer kernel\n");
1605 return 1;
1606 }
1607
1608 st = super_by_fd(fd, &subarray);
1609 if (!st) {
1610 pr_err("Unable to determine metadata format for %s\n", devname);
1611 return 1;
1612 }
1613 if (s->raiddisks > st->max_devs) {
1614 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1615 return 1;
1616 }
1617 if (s->level == 0 &&
1618 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1619 !(array.state & (1<<MD_SB_CLUSTERED))) {
1620 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1621 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
1622 pr_err("failed to remove internal bitmap.\n");
1623 return 1;
1624 }
1625 }
1626
1627 /* in the external case we need to check that the requested reshape is
1628 * supported, and perform an initial check that the container holds the
1629 * pre-requisite spare devices (mdmon owns final validation)
1630 */
1631 if (st->ss->external) {
1632 int rv;
1633
1634 if (subarray) {
1635 container = st->container_devnm;
1636 cfd = open_dev_excl(st->container_devnm);
1637 } else {
1638 container = st->devnm;
1639 close(fd);
1640 cfd = open_dev_excl(st->devnm);
1641 fd = cfd;
1642 }
1643 if (cfd < 0) {
1644 pr_err("Unable to open container for %s\n",
1645 devname);
1646 free(subarray);
1647 return 1;
1648 }
1649
1650 rv = st->ss->load_container(st, cfd, NULL);
1651
1652 if (rv) {
1653 pr_err("Cannot read superblock for %s\n",
1654 devname);
1655 free(subarray);
1656 return 1;
1657 }
1658
1659 /* check if operation is supported for metadata handler */
1660 if (st->ss->container_content) {
1661 struct mdinfo *cc = NULL;
1662 struct mdinfo *content = NULL;
1663
1664 cc = st->ss->container_content(st, subarray);
1665 for (content = cc; content ; content = content->next) {
1666 int allow_reshape = 1;
1667
1668 /* check if reshape is allowed based on metadata
1669 * indications stored in content.array.status
1670 */
1671 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1672 allow_reshape = 0;
1673 if (content->array.state
1674 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1675 allow_reshape = 0;
1676 if (!allow_reshape) {
1677 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1678 devname, container);
1679 sysfs_free(cc);
1680 free(subarray);
1681 return 1;
1682 }
1683 }
1684 sysfs_free(cc);
1685 }
1686 if (mdmon_running(container))
1687 st->update_tail = &st->updates;
1688 }
1689
1690 added_disks = 0;
1691 for (dv = devlist; dv; dv = dv->next)
1692 added_disks++;
1693 if (s->raiddisks > array.raid_disks &&
1694 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1695 !c->force) {
1696 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1697 " Use --force to over-ride this check.\n",
1698 s->raiddisks - array.raid_disks,
1699 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1700 array.spare_disks + added_disks);
1701 return 1;
1702 }
1703
1704 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1705 | GET_STATE | GET_VERSION);
1706 if (sra) {
1707 if (st->ss->external && subarray == NULL) {
1708 array.level = LEVEL_CONTAINER;
1709 sra->array.level = LEVEL_CONTAINER;
1710 }
1711 } else {
1712 pr_err("failed to read sysfs parameters for %s\n",
1713 devname);
1714 return 1;
1715 }
1716 frozen = freeze(st);
1717 if (frozen < -1) {
1718 /* freeze() already spewed the reason */
1719 sysfs_free(sra);
1720 return 1;
1721 } else if (frozen < 0) {
1722 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1723 sysfs_free(sra);
1724 return 1;
1725 }
1726
1727 /* ========= set size =============== */
1728 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1729 unsigned long long orig_size = get_component_size(fd)/2;
1730 unsigned long long min_csize;
1731 struct mdinfo *mdi;
1732 int raid0_takeover = 0;
1733
1734 if (orig_size == 0)
1735 orig_size = (unsigned) array.size;
1736
1737 if (orig_size == 0) {
1738 pr_err("Cannot set device size in this type of array.\n");
1739 rv = 1;
1740 goto release;
1741 }
1742
1743 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1744 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1745 rv = 1;
1746 goto release;
1747 }
1748 sync_metadata(st);
1749 if (st->ss->external) {
1750 /* metadata can have size limitation
1751 * update size value according to metadata information
1752 */
1753 struct mdinfo *sizeinfo =
1754 st->ss->container_content(st, subarray);
1755 if (sizeinfo) {
1756 unsigned long long new_size =
1757 sizeinfo->custom_array_size/2;
1758 int data_disks = get_data_disks(
1759 sizeinfo->array.level,
1760 sizeinfo->array.layout,
1761 sizeinfo->array.raid_disks);
1762 new_size /= data_disks;
1763 dprintf("Metadata size correction from %llu to %llu (%llu)\n", orig_size, new_size,
1764 new_size * data_disks);
1765 s->size = new_size;
1766 sysfs_free(sizeinfo);
1767 }
1768 }
1769
1770 /* Update the size of each member device in case
1771 * they have been resized. This will never reduce
1772 * below the current used-size. The "size" attribute
1773 * understands '0' to mean 'max'.
1774 */
1775 min_csize = 0;
1776 rv = 0;
1777 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1778 if (sysfs_set_num(sra, mdi, "size",
1779 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1780 /* Probably kernel refusing to let us
1781 * reduce the size - not an error.
1782 */
1783 break;
1784 }
1785 if (array.not_persistent == 0 &&
1786 array.major_version == 0 &&
1787 get_linux_version() < 3001000) {
1788 /* Dangerous to allow size to exceed 2TB */
1789 unsigned long long csize;
1790 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1791 if (csize >= 2ULL*1024*1024*1024)
1792 csize = 2ULL*1024*1024*1024;
1793 if ((min_csize == 0 || (min_csize
1794 > csize)))
1795 min_csize = csize;
1796 }
1797 }
1798 }
1799 if (rv) {
1800 pr_err("Cannot set size on array members.\n");
1801 goto size_change_error;
1802 }
1803 if (min_csize && s->size > min_csize) {
1804 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
1805 rv = 1;
1806 goto size_change_error;
1807 }
1808 if (min_csize && s->size == MAX_SIZE) {
1809 /* Don't let the kernel choose a size - it will get
1810 * it wrong
1811 */
1812 pr_err("Limited v0.90 array to 2TB per device\n");
1813 s->size = min_csize;
1814 }
1815 if (st->ss->external) {
1816 if (sra->array.level == 0) {
1817 rv = sysfs_set_str(sra, NULL, "level",
1818 "raid5");
1819 if (!rv) {
1820 raid0_takeover = 1;
1821 /* get array parameters after takeover
1822 * to change one parameter at time only
1823 */
1824 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1825 }
1826 }
1827 /* make sure mdmon is
1828 * aware of the new level */
1829 if (!mdmon_running(st->container_devnm))
1830 start_mdmon(st->container_devnm);
1831 ping_monitor(container);
1832 if (mdmon_running(st->container_devnm) &&
1833 st->update_tail == NULL)
1834 st->update_tail = &st->updates;
1835 }
1836
1837 if (s->size == MAX_SIZE)
1838 s->size = 0;
1839 array.size = s->size;
1840 if (s->size & ~INT32_MAX) {
1841 /* got truncated to 32bit, write to
1842 * component_size instead
1843 */
1844 if (sra)
1845 rv = sysfs_set_num(sra, NULL,
1846 "component_size", s->size);
1847 else
1848 rv = -1;
1849 } else {
1850 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1851
1852 /* manage array size when it is managed externally
1853 */
1854 if ((rv == 0) && st->ss->external)
1855 rv = set_array_size(st, sra, sra->text_version);
1856 }
1857
1858 if (raid0_takeover) {
1859 /* do not recync non-existing parity,
1860 * we will drop it anyway
1861 */
1862 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1863 /* go back to raid0, drop parity disk
1864 */
1865 sysfs_set_str(sra, NULL, "level", "raid0");
1866 ioctl(fd, GET_ARRAY_INFO, &array);
1867 }
1868
1869 size_change_error:
1870 if (rv != 0) {
1871 int err = errno;
1872
1873 /* restore metadata */
1874 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1875 UnSet, NULL, devname,
1876 ROLLBACK_METADATA_CHANGES,
1877 c->verbose) == 0)
1878 sync_metadata(st);
1879 pr_err("Cannot set device size for %s: %s\n",
1880 devname, strerror(err));
1881 if (err == EBUSY &&
1882 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1883 cont_err("Bitmap must be removed before size can be changed\n");
1884 rv = 1;
1885 goto release;
1886 }
1887 if (s->assume_clean) {
1888 /* This will fail on kernels older than 3.0 unless
1889 * a backport has been arranged.
1890 */
1891 if (sra == NULL ||
1892 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1893 pr_err("--assume-clean not supported with --grow on this kernel\n");
1894 }
1895 ioctl(fd, GET_ARRAY_INFO, &array);
1896 s->size = get_component_size(fd)/2;
1897 if (s->size == 0)
1898 s->size = array.size;
1899 if (c->verbose >= 0) {
1900 if (s->size == orig_size)
1901 pr_err("component size of %s unchanged at %lluK\n",
1902 devname, s->size);
1903 else
1904 pr_err("component size of %s has been set to %lluK\n",
1905 devname, s->size);
1906 }
1907 changed = 1;
1908 } else if (array.level != LEVEL_CONTAINER) {
1909 s->size = get_component_size(fd)/2;
1910 if (s->size == 0)
1911 s->size = array.size;
1912 }
1913
1914 /* See if there is anything else to do */
1915 if ((s->level == UnSet || s->level == array.level) &&
1916 (s->layout_str == NULL) &&
1917 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1918 data_offset == INVALID_SECTORS &&
1919 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1920 /* Nothing more to do */
1921 if (!changed && c->verbose >= 0)
1922 pr_err("%s: no change requested\n",
1923 devname);
1924 goto release;
1925 }
1926
1927 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1928 * current implementation assumes that following conditions must be met:
1929 * - RAID10:
1930 * - far_copies == 1
1931 * - near_copies == 2
1932 */
1933 if ((s->level == 0 && array.level == 10 && sra &&
1934 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1935 (s->level == 0 && array.level == 1 && sra)) {
1936 int err;
1937 err = remove_disks_for_takeover(st, sra, array.layout);
1938 if (err) {
1939 dprintf("Array cannot be reshaped\n");
1940 if (cfd > -1)
1941 close(cfd);
1942 rv = 1;
1943 goto release;
1944 }
1945 /* Make sure mdmon has seen the device removal
1946 * and updated metadata before we continue with
1947 * level change
1948 */
1949 if (container)
1950 ping_monitor(container);
1951 }
1952
1953 memset(&info, 0, sizeof(info));
1954 info.array = array;
1955 sysfs_init(&info, fd, NULL);
1956 strcpy(info.text_version, sra->text_version);
1957 info.component_size = s->size*2;
1958 info.new_level = s->level;
1959 info.new_chunk = s->chunk * 1024;
1960 if (info.array.level == LEVEL_CONTAINER) {
1961 info.delta_disks = UnSet;
1962 info.array.raid_disks = s->raiddisks;
1963 } else if (s->raiddisks)
1964 info.delta_disks = s->raiddisks - info.array.raid_disks;
1965 else
1966 info.delta_disks = UnSet;
1967 if (s->layout_str == NULL) {
1968 info.new_layout = UnSet;
1969 if (info.array.level == 6 &&
1970 (info.new_level == 6 || info.new_level == UnSet) &&
1971 info.array.layout >= 16) {
1972 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
1973 cont_err("during the reshape, please specify --layout=preserve\n");
1974 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
1975 rv = 1;
1976 goto release;
1977 }
1978 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1979 strcmp(s->layout_str, "normalize") == 0) {
1980 /* If we have a -6 RAID6 layout, remove the '-6'. */
1981 info.new_layout = UnSet;
1982 if (info.array.level == 6 && info.new_level == UnSet) {
1983 char l[40], *h;
1984 strcpy(l, map_num(r6layout, info.array.layout));
1985 h = strrchr(l, '-');
1986 if (h && strcmp(h, "-6") == 0) {
1987 *h = 0;
1988 info.new_layout = map_name(r6layout, l);
1989 }
1990 } else {
1991 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
1992 rv = 1;
1993 goto release;
1994 }
1995 } else if (strcmp(s->layout_str, "preserve") == 0) {
1996 /* This means that a non-standard RAID6 layout
1997 * is OK.
1998 * In particular:
1999 * - When reshape a RAID6 (e.g. adding a device)
2000 * which is in a non-standard layout, it is OK
2001 * to preserve that layout.
2002 * - When converting a RAID5 to RAID6, leave it in
2003 * the XXX-6 layout, don't re-layout.
2004 */
2005 if (info.array.level == 6 && info.new_level == UnSet)
2006 info.new_layout = info.array.layout;
2007 else if (info.array.level == 5 && info.new_level == 6) {
2008 char l[40];
2009 strcpy(l, map_num(r5layout, info.array.layout));
2010 strcat(l, "-6");
2011 info.new_layout = map_name(r6layout, l);
2012 } else {
2013 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2014 rv = 1;
2015 goto release;
2016 }
2017 } else {
2018 int l = info.new_level;
2019 if (l == UnSet)
2020 l = info.array.level;
2021 switch (l) {
2022 case 5:
2023 info.new_layout = map_name(r5layout, s->layout_str);
2024 break;
2025 case 6:
2026 info.new_layout = map_name(r6layout, s->layout_str);
2027 break;
2028 case 10:
2029 info.new_layout = parse_layout_10(s->layout_str);
2030 break;
2031 case LEVEL_FAULTY:
2032 info.new_layout = parse_layout_faulty(s->layout_str);
2033 break;
2034 default:
2035 pr_err("layout not meaningful with this level\n");
2036 rv = 1;
2037 goto release;
2038 }
2039 if (info.new_layout == UnSet) {
2040 pr_err("layout %s not understood for this level\n",
2041 s->layout_str);
2042 rv = 1;
2043 goto release;
2044 }
2045 }
2046
2047 if (array.level == LEVEL_FAULTY) {
2048 if (s->level != UnSet && s->level != array.level) {
2049 pr_err("cannot change level of Faulty device\n");
2050 rv =1 ;
2051 }
2052 if (s->chunk) {
2053 pr_err("cannot set chunksize of Faulty device\n");
2054 rv =1 ;
2055 }
2056 if (s->raiddisks && s->raiddisks != 1) {
2057 pr_err("cannot set raid_disks of Faulty device\n");
2058 rv =1 ;
2059 }
2060 if (s->layout_str) {
2061 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2062 dprintf("Cannot get array information.\n");
2063 goto release;
2064 }
2065 array.layout = info.new_layout;
2066 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2067 pr_err("failed to set new layout\n");
2068 rv = 1;
2069 } else if (c->verbose >= 0)
2070 printf("layout for %s set to %d\n",
2071 devname, array.layout);
2072 }
2073 } else if (array.level == LEVEL_CONTAINER) {
2074 /* This change is to be applied to every array in the
2075 * container. This is only needed when the metadata imposes
2076 * restraints of the various arrays in the container.
2077 * Currently we only know that IMSM requires all arrays
2078 * to have the same number of devices so changing the
2079 * number of devices (On-Line Capacity Expansion) must be
2080 * performed at the level of the container
2081 */
2082 if (fd > 0) {
2083 close(fd);
2084 fd = -1;
2085 }
2086 rv = reshape_container(container, devname, -1, st, &info,
2087 c->force, c->backup_file, c->verbose, 0, 0, 0);
2088 frozen = 0;
2089 } else {
2090 /* get spare devices from external metadata
2091 */
2092 if (st->ss->external) {
2093 struct mdinfo *info2;
2094
2095 info2 = st->ss->container_content(st, subarray);
2096 if (info2) {
2097 info.array.spare_disks =
2098 info2->array.spare_disks;
2099 sysfs_free(info2);
2100 }
2101 }
2102
2103 /* Impose these changes on a single array. First
2104 * check that the metadata is OK with the change. */
2105
2106 if (reshape_super(st, 0, info.new_level,
2107 info.new_layout, info.new_chunk,
2108 info.array.raid_disks, info.delta_disks,
2109 c->backup_file, devname, APPLY_METADATA_CHANGES,
2110 c->verbose)) {
2111 rv = 1;
2112 goto release;
2113 }
2114 sync_metadata(st);
2115 rv = reshape_array(container, fd, devname, st, &info, c->force,
2116 devlist, data_offset, c->backup_file, c->verbose,
2117 0, 0, 0);
2118 frozen = 0;
2119 }
2120 release:
2121 sysfs_free(sra);
2122 if (frozen > 0)
2123 unfreeze(st);
2124 return rv;
2125 }
2126
2127 /* verify_reshape_position()
2128 * Function checks if reshape position in metadata is not farther
2129 * than position in md.
2130 * Return value:
2131 * 0 : not valid sysfs entry
2132 * it can be caused by not started reshape, it should be started
2133 * by reshape array or raid0 array is before takeover
2134 * -1 : error, reshape position is obviously wrong
2135 * 1 : success, reshape progress correct or updated
2136 */
2137 static int verify_reshape_position(struct mdinfo *info, int level)
2138 {
2139 int ret_val = 0;
2140 char buf[40];
2141 int rv;
2142
2143 /* read sync_max, failure can mean raid0 array */
2144 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2145
2146 if (rv > 0) {
2147 char *ep;
2148 unsigned long long position = strtoull(buf, &ep, 0);
2149
2150 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2151 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2152 position *= get_data_disks(level,
2153 info->new_layout,
2154 info->array.raid_disks);
2155 if (info->reshape_progress < position) {
2156 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2157 info->reshape_progress, position);
2158 info->reshape_progress = position;
2159 ret_val = 1;
2160 } else if (info->reshape_progress > position) {
2161 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2162 position, info->reshape_progress);
2163 ret_val = -1;
2164 } else {
2165 dprintf("Reshape position in md and metadata are the same;");
2166 ret_val = 1;
2167 }
2168 }
2169 } else if (rv == 0) {
2170 /* for valid sysfs entry, 0-length content
2171 * should be indicated as error
2172 */
2173 ret_val = -1;
2174 }
2175
2176 return ret_val;
2177 }
2178
2179 static unsigned long long choose_offset(unsigned long long lo,
2180 unsigned long long hi,
2181 unsigned long long min,
2182 unsigned long long max)
2183 {
2184 /* Choose a new offset between hi and lo.
2185 * It must be between min and max, but
2186 * we would prefer something near the middle of hi/lo, and also
2187 * prefer to be aligned to a big power of 2.
2188 *
2189 * So we start with the middle, then for each bit,
2190 * starting at '1' and increasing, if it is set, we either
2191 * add it or subtract it if possible, preferring the option
2192 * which is furthest from the boundary.
2193 *
2194 * We stop once we get a 1MB alignment. As units are in sectors,
2195 * 1MB = 2*1024 sectors.
2196 */
2197 unsigned long long choice = (lo + hi) / 2;
2198 unsigned long long bit = 1;
2199
2200 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2201 unsigned long long bigger, smaller;
2202 if (! (bit & choice))
2203 continue;
2204 bigger = choice + bit;
2205 smaller = choice - bit;
2206 if (bigger > max && smaller < min)
2207 break;
2208 if (bigger > max)
2209 choice = smaller;
2210 else if (smaller < min)
2211 choice = bigger;
2212 else if (hi - bigger > smaller - lo)
2213 choice = bigger;
2214 else
2215 choice = smaller;
2216 }
2217 return choice;
2218 }
2219
2220 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2221 char *devname, int delta_disks,
2222 unsigned long long data_offset,
2223 unsigned long long min,
2224 int can_fallback)
2225 {
2226 struct mdinfo *sd;
2227 int dir = 0;
2228 int err = 0;
2229 unsigned long long before, after;
2230
2231 /* Need to find min space before and after so same is used
2232 * on all devices
2233 */
2234 before = UINT64_MAX;
2235 after = UINT64_MAX;
2236 for (sd = sra->devs; sd; sd = sd->next) {
2237 char *dn;
2238 int dfd;
2239 int rv;
2240 struct supertype *st2;
2241 struct mdinfo info2;
2242
2243 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2244 continue;
2245 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2246 dfd = dev_open(dn, O_RDONLY);
2247 if (dfd < 0) {
2248 pr_err("%s: cannot open component %s\n",
2249 devname, dn ? dn : "-unknown-");
2250 goto release;
2251 }
2252 st2 = dup_super(st);
2253 rv = st2->ss->load_super(st2,dfd, NULL);
2254 close(dfd);
2255 if (rv) {
2256 free(st2);
2257 pr_err("%s: cannot get superblock from %s\n",
2258 devname, dn);
2259 goto release;
2260 }
2261 st2->ss->getinfo_super(st2, &info2, NULL);
2262 st2->ss->free_super(st2);
2263 free(st2);
2264 if (info2.space_before == 0 &&
2265 info2.space_after == 0) {
2266 /* Metadata doesn't support data_offset changes */
2267 if (!can_fallback)
2268 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2269 devname);
2270 goto fallback;
2271 }
2272 if (before > info2.space_before)
2273 before = info2.space_before;
2274 if (after > info2.space_after)
2275 after = info2.space_after;
2276
2277 if (data_offset != INVALID_SECTORS) {
2278 if (dir == 0) {
2279 if (info2.data_offset == data_offset) {
2280 pr_err("%s: already has that data_offset\n",
2281 dn);
2282 goto release;
2283 }
2284 if (data_offset < info2.data_offset)
2285 dir = -1;
2286 else
2287 dir = 1;
2288 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2289 (data_offset >= info2.data_offset && dir == -1)) {
2290 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2291 dn);
2292 goto release;
2293 }
2294 }
2295 }
2296 if (before == UINT64_MAX)
2297 /* impossible really, there must be no devices */
2298 return 1;
2299
2300 for (sd = sra->devs; sd; sd = sd->next) {
2301 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2302 unsigned long long new_data_offset;
2303
2304 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2305 continue;
2306 if (delta_disks < 0) {
2307 /* Don't need any space as array is shrinking
2308 * just move data_offset up by min
2309 */
2310 if (data_offset == INVALID_SECTORS)
2311 new_data_offset = sd->data_offset + min;
2312 else {
2313 if (data_offset < sd->data_offset + min) {
2314 pr_err("--data-offset too small for %s\n",
2315 dn);
2316 goto release;
2317 }
2318 new_data_offset = data_offset;
2319 }
2320 } else if (delta_disks > 0) {
2321 /* need space before */
2322 if (before < min) {
2323 if (can_fallback)
2324 goto fallback;
2325 pr_err("Insufficient head-space for reshape on %s\n",
2326 dn);
2327 goto release;
2328 }
2329 if (data_offset == INVALID_SECTORS)
2330 new_data_offset = sd->data_offset - min;
2331 else {
2332 if (data_offset > sd->data_offset - min) {
2333 pr_err("--data-offset too large for %s\n",
2334 dn);
2335 goto release;
2336 }
2337 new_data_offset = data_offset;
2338 }
2339 } else {
2340 if (dir == 0) {
2341 /* can move up or down. If 'data_offset'
2342 * was set we would have already decided,
2343 * so just choose direction with most space.
2344 */
2345 if (before > after)
2346 dir = -1;
2347 else
2348 dir = 1;
2349 }
2350 sysfs_set_str(sra, NULL, "reshape_direction",
2351 dir == 1 ? "backwards" : "forwards");
2352 if (dir > 0) {
2353 /* Increase data offset */
2354 if (after < min) {
2355 if (can_fallback)
2356 goto fallback;
2357 pr_err("Insufficient tail-space for reshape on %s\n",
2358 dn);
2359 goto release;
2360 }
2361 if (data_offset != INVALID_SECTORS &&
2362 data_offset < sd->data_offset + min) {
2363 pr_err("--data-offset too small on %s\n",
2364 dn);
2365 goto release;
2366 }
2367 if (data_offset != INVALID_SECTORS)
2368 new_data_offset = data_offset;
2369 else
2370 new_data_offset = choose_offset(sd->data_offset,
2371 sd->data_offset + after,
2372 sd->data_offset + min,
2373 sd->data_offset + after);
2374 } else {
2375 /* Decrease data offset */
2376 if (before < min) {
2377 if (can_fallback)
2378 goto fallback;
2379 pr_err("insufficient head-room on %s\n",
2380 dn);
2381 goto release;
2382 }
2383 if (data_offset != INVALID_SECTORS &&
2384 data_offset < sd->data_offset - min) {
2385 pr_err("--data-offset too small on %s\n",
2386 dn);
2387 goto release;
2388 }
2389 if (data_offset != INVALID_SECTORS)
2390 new_data_offset = data_offset;
2391 else
2392 new_data_offset = choose_offset(sd->data_offset - before,
2393 sd->data_offset,
2394 sd->data_offset - before,
2395 sd->data_offset - min);
2396 }
2397 }
2398 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2399 if (err < 0 && errno == E2BIG) {
2400 /* try again after increasing data size to max */
2401 err = sysfs_set_num(sra, sd, "size", 0);
2402 if (err < 0 && errno == EINVAL &&
2403 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2404 /* some kernels have a bug where you cannot
2405 * use '0' on spare devices. */
2406 sysfs_set_num(sra, sd, "size",
2407 (sra->component_size + after)/2);
2408 }
2409 err = sysfs_set_num(sra, sd, "new_offset",
2410 new_data_offset);
2411 }
2412 if (err < 0) {
2413 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2414 pr_err("data-offset is too big for %s\n",
2415 dn);
2416 goto release;
2417 }
2418 if (sd == sra->devs &&
2419 (errno == ENOENT || errno == E2BIG))
2420 /* Early kernel, no 'new_offset' file,
2421 * or kernel doesn't like us.
2422 * For RAID5/6 this is not fatal
2423 */
2424 return 1;
2425 pr_err("Cannot set new_offset for %s\n",
2426 dn);
2427 break;
2428 }
2429 }
2430 return err;
2431 release:
2432 return -1;
2433 fallback:
2434 /* Just use a backup file */
2435 return 1;
2436 }
2437
2438 static int raid10_reshape(char *container, int fd, char *devname,
2439 struct supertype *st, struct mdinfo *info,
2440 struct reshape *reshape,
2441 unsigned long long data_offset,
2442 int force, int verbose)
2443 {
2444 /* Changing raid_disks, layout, chunksize or possibly
2445 * just data_offset for a RAID10.
2446 * We must always change data_offset. We change by at least
2447 * ->min_offset_change which is the largest of the old and new
2448 * chunk sizes.
2449 * If raid_disks is increasing, then data_offset must decrease
2450 * by at least this copy size.
2451 * If raid_disks is unchanged, data_offset must increase or
2452 * decrease by at least min_offset_change but preferably by much more.
2453 * We choose half of the available space.
2454 * If raid_disks is decreasing, data_offset must increase by
2455 * at least min_offset_change. To allow of this, component_size
2456 * must be decreased by the same amount.
2457 *
2458 * So we calculate the required minimum and direction, possibly
2459 * reduce the component_size, then iterate through the devices
2460 * and set the new_data_offset.
2461 * If that all works, we set chunk_size, layout, raid_disks, and start
2462 * 'reshape'
2463 */
2464 struct mdinfo *sra;
2465 unsigned long long min;
2466 int err = 0;
2467
2468 sra = sysfs_read(fd, NULL,
2469 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2470 );
2471 if (!sra) {
2472 pr_err("%s: Cannot get array details from sysfs\n",
2473 devname);
2474 goto release;
2475 }
2476 min = reshape->min_offset_change;
2477
2478 if (info->delta_disks)
2479 sysfs_set_str(sra, NULL, "reshape_direction",
2480 info->delta_disks < 0 ? "backwards" : "forwards");
2481 if (info->delta_disks < 0 &&
2482 info->space_after < min) {
2483 int rv = sysfs_set_num(sra, NULL, "component_size",
2484 (sra->component_size -
2485 min)/2);
2486 if (rv) {
2487 pr_err("cannot reduce component size\n");
2488 goto release;
2489 }
2490 }
2491 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2492 min, 0);
2493 if (err == 1) {
2494 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2495 cont_err("supported on this kernel\n");
2496 err = -1;
2497 }
2498 if (err < 0)
2499 goto release;
2500
2501 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2502 err = errno;
2503 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2504 err = errno;
2505 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2506 info->array.raid_disks + info->delta_disks) < 0)
2507 err = errno;
2508 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2509 err = errno;
2510 if (err) {
2511 pr_err("Cannot set array shape for %s\n",
2512 devname);
2513 if (err == EBUSY &&
2514 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2515 cont_err(" Bitmap must be removed before shape can be changed\n");
2516 goto release;
2517 }
2518 sysfs_free(sra);
2519 return 0;
2520 release:
2521 sysfs_free(sra);
2522 return 1;
2523 }
2524
2525 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2526 {
2527 struct mdinfo *sra, *sd;
2528 /* Initialisation to silence compiler warning */
2529 unsigned long long min_space_before = 0, min_space_after = 0;
2530 int first = 1;
2531
2532 sra = sysfs_read(fd, NULL, GET_DEVS);
2533 if (!sra)
2534 return;
2535 for (sd = sra->devs; sd; sd = sd->next) {
2536 char *dn;
2537 int dfd;
2538 struct supertype *st2;
2539 struct mdinfo info2;
2540
2541 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2542 continue;
2543 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2544 dfd = dev_open(dn, O_RDONLY);
2545 if (dfd < 0)
2546 break;
2547 st2 = dup_super(st);
2548 if (st2->ss->load_super(st2,dfd, NULL)) {
2549 close(dfd);
2550 free(st2);
2551 break;
2552 }
2553 close(dfd);
2554 st2->ss->getinfo_super(st2, &info2, NULL);
2555 st2->ss->free_super(st2);
2556 free(st2);
2557 if (first ||
2558 min_space_before > info2.space_before)
2559 min_space_before = info2.space_before;
2560 if (first ||
2561 min_space_after > info2.space_after)
2562 min_space_after = info2.space_after;
2563 first = 0;
2564 }
2565 if (sd == NULL && !first) {
2566 info->space_after = min_space_after;
2567 info->space_before = min_space_before;
2568 }
2569 sysfs_free(sra);
2570 }
2571
2572 static void update_cache_size(char *container, struct mdinfo *sra,
2573 struct mdinfo *info,
2574 int disks, unsigned long long blocks)
2575 {
2576 /* Check that the internal stripe cache is
2577 * large enough, or it won't work.
2578 * It must hold at least 4 stripes of the larger
2579 * chunk size
2580 */
2581 unsigned long cache;
2582 cache = max(info->array.chunk_size, info->new_chunk);
2583 cache *= 4; /* 4 stripes minimum */
2584 cache /= 512; /* convert to sectors */
2585 /* make sure there is room for 'blocks' with a bit to spare */
2586 if (cache < 16 + blocks / disks)
2587 cache = 16 + blocks / disks;
2588 cache /= (4096/512); /* Convert from sectors to pages */
2589
2590 if (sra->cache_size < cache)
2591 subarray_set_num(container, sra, "stripe_cache_size",
2592 cache+1);
2593 }
2594
2595 static int impose_reshape(struct mdinfo *sra,
2596 struct mdinfo *info,
2597 struct supertype *st,
2598 int fd,
2599 int restart,
2600 char *devname, char *container,
2601 struct reshape *reshape)
2602 {
2603 struct mdu_array_info_s array;
2604
2605 sra->new_chunk = info->new_chunk;
2606
2607 if (restart) {
2608 /* for external metadata checkpoint saved by mdmon can be lost
2609 * or missed /due to e.g. crash/. Check if md is not during
2610 * restart farther than metadata points to.
2611 * If so, this means metadata information is obsolete.
2612 */
2613 if (st->ss->external)
2614 verify_reshape_position(info, reshape->level);
2615 sra->reshape_progress = info->reshape_progress;
2616 } else {
2617 sra->reshape_progress = 0;
2618 if (reshape->after.data_disks < reshape->before.data_disks)
2619 /* start from the end of the new array */
2620 sra->reshape_progress = (sra->component_size
2621 * reshape->after.data_disks);
2622 }
2623
2624 ioctl(fd, GET_ARRAY_INFO, &array);
2625 if (info->array.chunk_size == info->new_chunk &&
2626 reshape->before.layout == reshape->after.layout &&
2627 st->ss->external == 0) {
2628 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2629 array.raid_disks = reshape->after.data_disks + reshape->parity;
2630 if (!restart &&
2631 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2632 int err = errno;
2633
2634 pr_err("Cannot set device shape for %s: %s\n",
2635 devname, strerror(errno));
2636
2637 if (err == EBUSY &&
2638 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2639 cont_err("Bitmap must be removed before shape can be changed\n");
2640
2641 goto release;
2642 }
2643 } else if (!restart) {
2644 /* set them all just in case some old 'new_*' value
2645 * persists from some earlier problem.
2646 */
2647 int err = 0;
2648 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2649 err = errno;
2650 if (!err && sysfs_set_num(sra, NULL, "layout",
2651 reshape->after.layout) < 0)
2652 err = errno;
2653 if (!err && subarray_set_num(container, sra, "raid_disks",
2654 reshape->after.data_disks +
2655 reshape->parity) < 0)
2656 err = errno;
2657 if (err) {
2658 pr_err("Cannot set device shape for %s\n",
2659 devname);
2660
2661 if (err == EBUSY &&
2662 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2663 cont_err("Bitmap must be removed before shape can be changed\n");
2664 goto release;
2665 }
2666 }
2667 return 0;
2668 release:
2669 return -1;
2670 }
2671
2672 static int impose_level(int fd, int level, char *devname, int verbose)
2673 {
2674 char *c;
2675 struct mdu_array_info_s array;
2676 struct mdinfo info;
2677 sysfs_init(&info, fd, NULL);
2678
2679 ioctl(fd, GET_ARRAY_INFO, &array);
2680 if (level == 0 &&
2681 (array.level >= 4 && array.level <= 6)) {
2682 /* To convert to RAID0 we need to fail and
2683 * remove any non-data devices. */
2684 int found = 0;
2685 int d;
2686 int data_disks = array.raid_disks - 1;
2687 if (array.level == 6)
2688 data_disks -= 1;
2689 if (array.level == 5 &&
2690 array.layout != ALGORITHM_PARITY_N)
2691 return -1;
2692 if (array.level == 6 &&
2693 array.layout != ALGORITHM_PARITY_N_6)
2694 return -1;
2695 sysfs_set_str(&info, NULL,"sync_action", "idle");
2696 /* First remove any spares so no recovery starts */
2697 for (d = 0, found = 0;
2698 d < MAX_DISKS && found < array.nr_disks;
2699 d++) {
2700 mdu_disk_info_t disk;
2701 disk.number = d;
2702 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2703 continue;
2704 if (disk.major == 0 && disk.minor == 0)
2705 continue;
2706 found++;
2707 if ((disk.state & (1 << MD_DISK_ACTIVE))
2708 && disk.raid_disk < data_disks)
2709 /* keep this */
2710 continue;
2711 ioctl(fd, HOT_REMOVE_DISK,
2712 makedev(disk.major, disk.minor));
2713 }
2714 /* Now fail anything left */
2715 ioctl(fd, GET_ARRAY_INFO, &array);
2716 for (d = 0, found = 0;
2717 d < MAX_DISKS && found < array.nr_disks;
2718 d++) {
2719 int cnt;
2720 mdu_disk_info_t disk;
2721 disk.number = d;
2722 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2723 continue;
2724 if (disk.major == 0 && disk.minor == 0)
2725 continue;
2726 found++;
2727 if ((disk.state & (1 << MD_DISK_ACTIVE))
2728 && disk.raid_disk < data_disks)
2729 /* keep this */
2730 continue;
2731 ioctl(fd, SET_DISK_FAULTY,
2732 makedev(disk.major, disk.minor));
2733 cnt = 5;
2734 while (ioctl(fd, HOT_REMOVE_DISK,
2735 makedev(disk.major, disk.minor)) < 0
2736 && errno == EBUSY
2737 && cnt--) {
2738 usleep(10000);
2739 }
2740 }
2741 }
2742 c = map_num(pers, level);
2743 if (c) {
2744 int err = sysfs_set_str(&info, NULL, "level", c);
2745 if (err) {
2746 err = errno;
2747 pr_err("%s: could not set level to %s\n",
2748 devname, c);
2749 if (err == EBUSY &&
2750 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2751 cont_err("Bitmap must be removed before level can be changed\n");
2752 return err;
2753 }
2754 if (verbose >= 0)
2755 pr_err("level of %s changed to %s\n",
2756 devname, c);
2757 }
2758 return 0;
2759 }
2760
2761 int sigterm = 0;
2762 static void catch_term(int sig)
2763 {
2764 sigterm = 1;
2765 }
2766
2767 static int continue_via_systemd(char *devnm)
2768 {
2769 int skipped, i, pid, status;
2770 char pathbuf[1024];
2771 /* In a systemd/udev world, it is best to get systemd to
2772 * run "mdadm --grow --continue" rather than running in the
2773 * background.
2774 */
2775 switch(fork()) {
2776 case 0:
2777 /* FIXME yuk. CLOSE_EXEC?? */
2778 skipped = 0;
2779 for (i = 3; skipped < 20; i++)
2780 if (close(i) < 0)
2781 skipped++;
2782 else
2783 skipped = 0;
2784
2785 /* Don't want to see error messages from
2786 * systemctl. If the service doesn't exist,
2787 * we fork ourselves.
2788 */
2789 close(2);
2790 open("/dev/null", O_WRONLY);
2791 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
2792 devnm);
2793 status = execl("/usr/bin/systemctl", "systemctl",
2794 "start",
2795 pathbuf, NULL);
2796 status = execl("/bin/systemctl", "systemctl", "start",
2797 pathbuf, NULL);
2798 exit(1);
2799 case -1: /* Just do it ourselves. */
2800 break;
2801 default: /* parent - good */
2802 pid = wait(&status);
2803 if (pid >= 0 && status == 0)
2804 return 1;
2805 }
2806 return 0;
2807 }
2808
2809 static int reshape_array(char *container, int fd, char *devname,
2810 struct supertype *st, struct mdinfo *info,
2811 int force, struct mddev_dev *devlist,
2812 unsigned long long data_offset,
2813 char *backup_file, int verbose, int forked,
2814 int restart, int freeze_reshape)
2815 {
2816 struct reshape reshape;
2817 int spares_needed;
2818 char *msg;
2819 int orig_level = UnSet;
2820 int odisks;
2821 int delayed;
2822
2823 struct mdu_array_info_s array;
2824 char *c;
2825
2826 struct mddev_dev *dv;
2827 int added_disks;
2828
2829 int *fdlist = NULL;
2830 unsigned long long *offsets = NULL;
2831 int d;
2832 int nrdisks;
2833 int err;
2834 unsigned long blocks;
2835 unsigned long long array_size;
2836 int done;
2837 struct mdinfo *sra = NULL;
2838 char buf[20];
2839
2840 /* when reshaping a RAID0, the component_size might be zero.
2841 * So try to fix that up.
2842 */
2843 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2844 dprintf("Cannot get array information.\n");
2845 goto release;
2846 }
2847 if (array.level == 0 && info->component_size == 0) {
2848 get_dev_size(fd, NULL, &array_size);
2849 info->component_size = array_size / array.raid_disks;
2850 }
2851
2852 if (array.level == 10)
2853 /* Need space_after info */
2854 get_space_after(fd, st, info);
2855
2856 if (info->reshape_active) {
2857 int new_level = info->new_level;
2858 info->new_level = UnSet;
2859 if (info->delta_disks > 0)
2860 info->array.raid_disks -= info->delta_disks;
2861 msg = analyse_change(devname, info, &reshape);
2862 info->new_level = new_level;
2863 if (info->delta_disks > 0)
2864 info->array.raid_disks += info->delta_disks;
2865 if (!restart)
2866 /* Make sure the array isn't read-only */
2867 ioctl(fd, RESTART_ARRAY_RW, 0);
2868 } else
2869 msg = analyse_change(devname, info, &reshape);
2870 if (msg) {
2871 /* if msg == "", error has already been printed */
2872 if (msg[0])
2873 pr_err("%s\n", msg);
2874 goto release;
2875 }
2876 if (restart &&
2877 (reshape.level != info->array.level ||
2878 reshape.before.layout != info->array.layout ||
2879 reshape.before.data_disks + reshape.parity
2880 != info->array.raid_disks - max(0, info->delta_disks))) {
2881 pr_err("reshape info is not in native format - cannot continue.\n");
2882 goto release;
2883 }
2884
2885 if (st->ss->external && restart && (info->reshape_progress == 0) &&
2886 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
2887 (strncmp(buf, "reshape", 7) == 0))) {
2888 /* When reshape is restarted from '0', very begin of array
2889 * it is possible that for external metadata reshape and array
2890 * configuration doesn't happen.
2891 * Check if md has the same opinion, and reshape is restarted
2892 * from 0. If so, this is regular reshape start after reshape
2893 * switch in metadata to next array only.
2894 */
2895 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2896 (info->reshape_progress == 0))
2897 restart = 0;
2898 }
2899 if (restart) {
2900 /* reshape already started. just skip to monitoring the reshape */
2901 if (reshape.backup_blocks == 0)
2902 return 0;
2903 if (restart & RESHAPE_NO_BACKUP)
2904 return 0;
2905
2906 /* Need 'sra' down at 'started:' */
2907 sra = sysfs_read(fd, NULL,
2908 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2909 GET_CACHE);
2910 if (!sra) {
2911 pr_err("%s: Cannot get array details from sysfs\n",
2912 devname);
2913 goto release;
2914 }
2915
2916 if (!backup_file)
2917 backup_file = locate_backup(sra->sys_name);
2918
2919 goto started;
2920 }
2921 /* The container is frozen but the array may not be.
2922 * So freeze the array so spares don't get put to the wrong use
2923 * FIXME there should probably be a cleaner separation between
2924 * freeze_array and freeze_container.
2925 */
2926 sysfs_freeze_array(info);
2927 /* Check we have enough spares to not be degraded */
2928 added_disks = 0;
2929 for (dv = devlist; dv ; dv=dv->next)
2930 added_disks++;
2931 spares_needed = max(reshape.before.data_disks,
2932 reshape.after.data_disks)
2933 + reshape.parity - array.raid_disks;
2934
2935 if (!force &&
2936 info->new_level > 1 && info->array.level > 1 &&
2937 spares_needed > info->array.spare_disks + added_disks) {
2938 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
2939 " Use --force to over-ride this check.\n",
2940 spares_needed,
2941 spares_needed == 1 ? "" : "s",
2942 info->array.spare_disks + added_disks);
2943 goto release;
2944 }
2945 /* Check we have enough spares to not fail */
2946 spares_needed = max(reshape.before.data_disks,
2947 reshape.after.data_disks)
2948 - array.raid_disks;
2949 if ((info->new_level > 1 || info->new_level == 0) &&
2950 spares_needed > info->array.spare_disks +added_disks) {
2951 pr_err("Need %d spare%s to create working array, and only have %d.\n",
2952 spares_needed,
2953 spares_needed == 1 ? "" : "s",
2954 info->array.spare_disks + added_disks);
2955 goto release;
2956 }
2957
2958 if (reshape.level != array.level) {
2959 int err = impose_level(fd, reshape.level, devname, verbose);
2960 if (err)
2961 goto release;
2962 info->new_layout = UnSet; /* after level change,
2963 * layout is meaningless */
2964 orig_level = array.level;
2965 sysfs_freeze_array(info);
2966
2967 if (reshape.level > 0 && st->ss->external) {
2968 /* make sure mdmon is aware of the new level */
2969 if (mdmon_running(container))
2970 flush_mdmon(container);
2971
2972 if (!mdmon_running(container))
2973 start_mdmon(container);
2974 ping_monitor(container);
2975 if (mdmon_running(container) &&
2976 st->update_tail == NULL)
2977 st->update_tail = &st->updates;
2978 }
2979 }
2980 /* ->reshape_super might have chosen some spares from the
2981 * container that it wants to be part of the new array.
2982 * We can collect them with ->container_content and give
2983 * them to the kernel.
2984 */
2985 if (st->ss->reshape_super && st->ss->container_content) {
2986 char *subarray = strchr(info->text_version+1, '/')+1;
2987 struct mdinfo *info2 =
2988 st->ss->container_content(st, subarray);
2989 struct mdinfo *d;
2990
2991 if (info2) {
2992 sysfs_init(info2, fd, st->devnm);
2993 /* When increasing number of devices, we need to set
2994 * new raid_disks before adding these, or they might
2995 * be rejected.
2996 */
2997 if (reshape.backup_blocks &&
2998 reshape.after.data_disks > reshape.before.data_disks)
2999 subarray_set_num(container, info2, "raid_disks",
3000 reshape.after.data_disks +
3001 reshape.parity);
3002 for (d = info2->devs; d; d = d->next) {
3003 if (d->disk.state == 0 &&
3004 d->disk.raid_disk >= 0) {
3005 /* This is a spare that wants to
3006 * be part of the array.
3007 */
3008 add_disk(fd, st, info2, d);
3009 }
3010 }
3011 sysfs_free(info2);
3012 }
3013 }
3014 /* We might have been given some devices to add to the
3015 * array. Now that the array has been changed to the right
3016 * level and frozen, we can safely add them.
3017 */
3018 if (devlist) {
3019 if (Manage_subdevs(devname, fd, devlist, verbose,
3020 0, NULL, 0))
3021 goto release;
3022 }
3023
3024 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3025 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3026 if (reshape.backup_blocks == 0) {
3027 /* No restriping needed, but we might need to impose
3028 * some more changes: layout, raid_disks, chunk_size
3029 */
3030 /* read current array info */
3031 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3032 dprintf("Cannot get array information.\n");
3033 goto release;
3034 }
3035 /* compare current array info with new values and if
3036 * it is different update them to new */
3037 if (info->new_layout != UnSet &&
3038 info->new_layout != array.layout) {
3039 array.layout = info->new_layout;
3040 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3041 pr_err("failed to set new layout\n");
3042 goto release;
3043 } else if (verbose >= 0)
3044 printf("layout for %s set to %d\n",
3045 devname, array.layout);
3046 }
3047 if (info->delta_disks != UnSet &&
3048 info->delta_disks != 0 &&
3049 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3050 array.raid_disks += info->delta_disks;
3051 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3052 pr_err("failed to set raid disks\n");
3053 goto release;
3054 } else if (verbose >= 0) {
3055 printf("raid_disks for %s set to %d\n",
3056 devname, array.raid_disks);
3057 }
3058 }
3059 if (info->new_chunk != 0 &&
3060 info->new_chunk != array.chunk_size) {
3061 if (sysfs_set_num(info, NULL,
3062 "chunk_size", info->new_chunk) != 0) {
3063 pr_err("failed to set chunk size\n");
3064 goto release;
3065 } else if (verbose >= 0)
3066 printf("chunk size for %s set to %d\n",
3067 devname, array.chunk_size);
3068 }
3069 unfreeze(st);
3070 return 0;
3071 }
3072
3073 /*
3074 * There are three possibilities.
3075 * 1/ The array will shrink.
3076 * We need to ensure the reshape will pause before reaching
3077 * the 'critical section'. We also need to fork and wait for
3078 * that to happen. When it does we
3079 * suspend/backup/complete/unfreeze
3080 *
3081 * 2/ The array will not change size.
3082 * This requires that we keep a backup of a sliding window
3083 * so that we can restore data after a crash. So we need
3084 * to fork and monitor progress.
3085 * In future we will allow the data_offset to change, so
3086 * a sliding backup becomes unnecessary.
3087 *
3088 * 3/ The array will grow. This is relatively easy.
3089 * However the kernel's restripe routines will cheerfully
3090 * overwrite some early data before it is safe. So we
3091 * need to make a backup of the early parts of the array
3092 * and be ready to restore it if rebuild aborts very early.
3093 * For externally managed metadata, we still need a forked
3094 * child to monitor the reshape and suspend IO over the region
3095 * that is being reshaped.
3096 *
3097 * We backup data by writing it to one spare, or to a
3098 * file which was given on command line.
3099 *
3100 * In each case, we first make sure that storage is available
3101 * for the required backup.
3102 * Then we:
3103 * - request the shape change.
3104 * - fork to handle backup etc.
3105 */
3106 /* Check that we can hold all the data */
3107 get_dev_size(fd, NULL, &array_size);
3108 if (reshape.new_size < (array_size/512)) {
3109 pr_err("this change will reduce the size of the array.\n"
3110 " use --grow --array-size first to truncate array.\n"
3111 " e.g. mdadm --grow %s --array-size %llu\n",
3112 devname, reshape.new_size/2);
3113 goto release;
3114 }
3115
3116 if (array.level == 10) {
3117 /* Reshaping RAID10 does not require any data backup by
3118 * user-space. Instead it requires that the data_offset
3119 * is changed to avoid the need for backup.
3120 * So this is handled very separately
3121 */
3122 if (restart)
3123 /* Nothing to do. */
3124 return 0;
3125 return raid10_reshape(container, fd, devname, st, info,
3126 &reshape, data_offset,
3127 force, verbose);
3128 }
3129 sra = sysfs_read(fd, NULL,
3130 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3131 GET_CACHE);
3132 if (!sra) {
3133 pr_err("%s: Cannot get array details from sysfs\n",
3134 devname);
3135 goto release;
3136 }
3137
3138 if (!backup_file)
3139 switch(set_new_data_offset(sra, st, devname,
3140 reshape.after.data_disks - reshape.before.data_disks,
3141 data_offset,
3142 reshape.min_offset_change, 1)) {
3143 case -1:
3144 goto release;
3145 case 0:
3146 /* Updated data_offset, so it's easy now */
3147 update_cache_size(container, sra, info,
3148 min(reshape.before.data_disks,
3149 reshape.after.data_disks),
3150 reshape.backup_blocks);
3151
3152 /* Right, everything seems fine. Let's kick things off.
3153 */
3154 sync_metadata(st);
3155
3156 if (impose_reshape(sra, info, st, fd, restart,
3157 devname, container, &reshape) < 0)
3158 goto release;
3159 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3160 struct mdinfo *sd;
3161 if (errno != EINVAL) {
3162 pr_err("Failed to initiate reshape!\n");
3163 goto release;
3164 }
3165 /* revert data_offset and try the old way */
3166 for (sd = sra->devs; sd; sd = sd->next) {
3167 sysfs_set_num(sra, sd, "new_offset",
3168 sd->data_offset);
3169 sysfs_set_str(sra, NULL, "reshape_direction",
3170 "forwards");
3171 }
3172 break;
3173 }
3174 if (info->new_level == reshape.level)
3175 return 0;
3176 /* need to adjust level when reshape completes */
3177 switch(fork()) {
3178 case -1: /* ignore error, but don't wait */
3179 return 0;
3180 default: /* parent */
3181 return 0;
3182 case 0:
3183 map_fork();
3184 break;
3185 }
3186 close(fd);
3187 wait_reshape(sra);
3188 fd = open_dev(sra->sys_name);
3189 if (fd >= 0)
3190 impose_level(fd, info->new_level, devname, verbose);
3191 return 0;
3192 case 1: /* Couldn't set data_offset, try the old way */
3193 if (data_offset != INVALID_SECTORS) {
3194 pr_err("Cannot update data_offset on this array\n");
3195 goto release;
3196 }
3197 break;
3198 }
3199
3200 started:
3201 /* Decide how many blocks (sectors) for a reshape
3202 * unit. The number we have so far is just a minimum
3203 */
3204 blocks = reshape.backup_blocks;
3205 if (reshape.before.data_disks ==
3206 reshape.after.data_disks) {
3207 /* Make 'blocks' bigger for better throughput, but
3208 * not so big that we reject it below.
3209 * Try for 16 megabytes
3210 */
3211 while (blocks * 32 < sra->component_size &&
3212 blocks < 16*1024*2)
3213 blocks *= 2;
3214 } else
3215 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3216
3217 if (blocks >= sra->component_size/2) {
3218 pr_err("%s: Something wrong - reshape aborted\n",
3219 devname);
3220 goto release;
3221 }
3222
3223 /* Now we need to open all these devices so we can read/write.
3224 */
3225 nrdisks = max(reshape.before.data_disks,
3226 reshape.after.data_disks) + reshape.parity
3227 + sra->array.spare_disks;
3228 fdlist = xcalloc((1+nrdisks), sizeof(int));
3229 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3230
3231 odisks = reshape.before.data_disks + reshape.parity;
3232 d = reshape_prepare_fdlist(devname, sra, odisks,
3233 nrdisks, blocks, backup_file,
3234 fdlist, offsets);
3235 if (d < odisks) {
3236 goto release;
3237 }
3238 if ((st->ss->manage_reshape == NULL) ||
3239 (st->ss->recover_backup == NULL)) {
3240 if (backup_file == NULL) {
3241 if (reshape.after.data_disks <=
3242 reshape.before.data_disks) {
3243 pr_err("%s: Cannot grow - need backup-file\n",
3244 devname);
3245 pr_err(" Please provide one with \"--backup=...\"\n");
3246 goto release;
3247 } else if (d == odisks) {
3248 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3249 goto release;
3250 }
3251 } else {
3252 if (!reshape_open_backup_file(backup_file, fd, devname,
3253 (signed)blocks,
3254 fdlist+d, offsets+d,
3255 sra->sys_name,
3256 restart)) {
3257 goto release;
3258 }
3259 d++;
3260 }
3261 }
3262
3263 update_cache_size(container, sra, info,
3264 min(reshape.before.data_disks, reshape.after.data_disks),
3265 blocks);
3266
3267 /* Right, everything seems fine. Let's kick things off.
3268 * If only changing raid_disks, use ioctl, else use
3269 * sysfs.
3270 */
3271 sync_metadata(st);
3272
3273 if (impose_reshape(sra, info, st, fd, restart,
3274 devname, container, &reshape) < 0)
3275 goto release;
3276
3277 err = start_reshape(sra, restart, reshape.before.data_disks,
3278 reshape.after.data_disks);
3279 if (err) {
3280 pr_err("Cannot %s reshape for %s\n",
3281 restart ? "continue" : "start",
3282 devname);
3283 goto release;
3284 }
3285 if (restart)
3286 sysfs_set_str(sra, NULL, "array_state", "active");
3287 if (freeze_reshape) {
3288 free(fdlist);
3289 free(offsets);
3290 sysfs_free(sra);
3291 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3292 sra->reshape_progress);
3293 return 1;
3294 }
3295
3296 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3297 if (continue_via_systemd(container ?: sra->sys_name)) {
3298 free(fdlist);
3299 free(offsets);
3300 sysfs_free(sra);
3301 return 0;
3302 }
3303
3304 /* Now we just need to kick off the reshape and watch, while
3305 * handling backups of the data...
3306 * This is all done by a forked background process.
3307 */
3308 switch(forked ? 0 : fork()) {
3309 case -1:
3310 pr_err("Cannot run child to monitor reshape: %s\n",
3311 strerror(errno));
3312 abort_reshape(sra);
3313 goto release;
3314 default:
3315 free(fdlist);
3316 free(offsets);
3317 sysfs_free(sra);
3318 return 0;
3319 case 0:
3320 map_fork();
3321 break;
3322 }
3323
3324 /* If another array on the same devices is busy, the
3325 * reshape will wait for them. This would mean that
3326 * the first section that we suspend will stay suspended
3327 * for a long time. So check on that possibility
3328 * by looking for "DELAYED" in /proc/mdstat, and if found,
3329 * wait a while
3330 */
3331 do {
3332 struct mdstat_ent *mds, *m;
3333 delayed = 0;
3334 mds = mdstat_read(1, 0);
3335 for (m = mds; m; m = m->next)
3336 if (strcmp(m->devnm, sra->sys_name) == 0) {
3337 if (m->resync &&
3338 m->percent == RESYNC_DELAYED)
3339 delayed = 1;
3340 if (m->resync == 0)
3341 /* Haven't started the reshape thread
3342 * yet, wait a bit
3343 */
3344 delayed = 2;
3345 break;
3346 }
3347 free_mdstat(mds);
3348 if (delayed == 1 && get_linux_version() < 3007000) {
3349 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3350 " You might experience problems until other reshapes complete.\n");
3351 delayed = 0;
3352 }
3353 if (delayed)
3354 mdstat_wait(30 - (delayed-1) * 25);
3355 } while (delayed);
3356 mdstat_close();
3357 close(fd);
3358 if (check_env("MDADM_GROW_VERIFY"))
3359 fd = open(devname, O_RDONLY | O_DIRECT);
3360 else
3361 fd = -1;
3362 mlockall(MCL_FUTURE);
3363
3364 signal(SIGTERM, catch_term);
3365
3366 if (st->ss->external) {
3367 /* metadata handler takes it from here */
3368 done = st->ss->manage_reshape(
3369 fd, sra, &reshape, st, blocks,
3370 fdlist, offsets,
3371 d - odisks, fdlist+odisks,
3372 offsets+odisks);
3373 } else
3374 done = child_monitor(
3375 fd, sra, &reshape, st, blocks,
3376 fdlist, offsets,
3377 d - odisks, fdlist+odisks,
3378 offsets+odisks);
3379
3380 free(fdlist);
3381 free(offsets);
3382
3383 if (backup_file && done) {
3384 char *bul;
3385 bul = make_backup(sra->sys_name);
3386 if (bul) {
3387 char buf[1024];
3388 int l = readlink(bul, buf, sizeof(buf) - 1);
3389 if (l > 0) {
3390 buf[l]=0;
3391 unlink(buf);
3392 }
3393 unlink(bul);
3394 free(bul);
3395 }
3396 unlink(backup_file);
3397 }
3398 if (!done) {
3399 abort_reshape(sra);
3400 goto out;
3401 }
3402
3403 if (!st->ss->external &&
3404 !(reshape.before.data_disks != reshape.after.data_disks
3405 && info->custom_array_size) &&
3406 info->new_level == reshape.level &&
3407 !forked) {
3408 /* no need to wait for the reshape to finish as
3409 * there is nothing more to do.
3410 */
3411 sysfs_free(sra);
3412 exit(0);
3413 }
3414 wait_reshape(sra);
3415
3416 if (st->ss->external) {
3417 /* Re-load the metadata as much could have changed */
3418 int cfd = open_dev(st->container_devnm);
3419 if (cfd >= 0) {
3420 flush_mdmon(container);
3421 st->ss->free_super(st);
3422 st->ss->load_container(st, cfd, container);
3423 close(cfd);
3424 }
3425 }
3426
3427 /* set new array size if required customer_array_size is used
3428 * by this metadata.
3429 */
3430 if (reshape.before.data_disks !=
3431 reshape.after.data_disks &&
3432 info->custom_array_size)
3433 set_array_size(st, info, info->text_version);
3434
3435 if (info->new_level != reshape.level) {
3436 if (fd < 0)
3437 fd = open(devname, O_RDONLY);
3438 impose_level(fd, info->new_level, devname, verbose);
3439 close(fd);
3440 if (info->new_level == 0)
3441 st->update_tail = NULL;
3442 }
3443 out:
3444 sysfs_free(sra);
3445 if (forked)
3446 return 0;
3447 unfreeze(st);
3448 exit(0);
3449
3450 release:
3451 free(fdlist);
3452 free(offsets);
3453 if (orig_level != UnSet && sra) {
3454 c = map_num(pers, orig_level);
3455 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3456 pr_err("aborting level change\n");
3457 }
3458 sysfs_free(sra);
3459 if (!forked)
3460 unfreeze(st);
3461 return 1;
3462 }
3463
3464 /* mdfd handle is passed to be closed in child process (after fork).
3465 */
3466 int reshape_container(char *container, char *devname,
3467 int mdfd,
3468 struct supertype *st,
3469 struct mdinfo *info,
3470 int force,
3471 char *backup_file, int verbose,
3472 int forked, int restart, int freeze_reshape)
3473 {
3474 struct mdinfo *cc = NULL;
3475 int rv = restart;
3476 char last_devnm[32] = "";
3477
3478 /* component_size is not meaningful for a container,
3479 * so pass '0' meaning 'no change'
3480 */
3481 if (!restart &&
3482 reshape_super(st, 0, info->new_level,
3483 info->new_layout, info->new_chunk,
3484 info->array.raid_disks, info->delta_disks,
3485 backup_file, devname, APPLY_METADATA_CHANGES,
3486 verbose)) {
3487 unfreeze(st);
3488 return 1;
3489 }
3490
3491 sync_metadata(st);
3492
3493 /* ping monitor to be sure that update is on disk
3494 */
3495 ping_monitor(container);
3496
3497 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3498 if (continue_via_systemd(container))
3499 return 0;
3500
3501 switch (forked ? 0 : fork()) {
3502 case -1: /* error */
3503 perror("Cannot fork to complete reshape\n");
3504 unfreeze(st);
3505 return 1;
3506 default: /* parent */
3507 if (!freeze_reshape)
3508 printf("%s: multi-array reshape continues in background\n", Name);
3509 return 0;
3510 case 0: /* child */
3511 map_fork();
3512 break;
3513 }
3514
3515 /* close unused handle in child process
3516 */
3517 if (mdfd > -1)
3518 close(mdfd);
3519
3520 while(1) {
3521 /* For each member array with reshape_active,
3522 * we need to perform the reshape.
3523 * We pick the first array that needs reshaping and
3524 * reshape it. reshape_array() will re-read the metadata
3525 * so the next time through a different array should be
3526 * ready for reshape.
3527 * It is possible that the 'different' array will not
3528 * be assembled yet. In that case we simple exit.
3529 * When it is assembled, the mdadm which assembles it
3530 * will take over the reshape.
3531 */
3532 struct mdinfo *content;
3533 int fd;
3534 struct mdstat_ent *mdstat;
3535 char *adev;
3536 dev_t devid;
3537
3538 sysfs_free(cc);
3539
3540 cc = st->ss->container_content(st, NULL);
3541
3542 for (content = cc; content ; content = content->next) {
3543 char *subarray;
3544 if (!content->reshape_active)
3545 continue;
3546
3547 subarray = strchr(content->text_version+1, '/')+1;
3548 mdstat = mdstat_by_subdev(subarray, container);
3549 if (!mdstat)
3550 continue;
3551 if (mdstat->active == 0) {
3552 pr_err("Skipping inactive array %s.\n",
3553 mdstat->devnm);
3554 free_mdstat(mdstat);
3555 mdstat = NULL;
3556 continue;
3557 }
3558 break;
3559 }
3560 if (!content)
3561 break;
3562
3563 devid = devnm2devid(mdstat->devnm);
3564 adev = map_dev(major(devid), minor(devid), 0);
3565 if (!adev)
3566 adev = content->text_version;
3567
3568 fd = open_dev(mdstat->devnm);
3569 if (fd < 0) {
3570 pr_err("Device %s cannot be opened for reshape.\n", adev);
3571 break;
3572 }
3573
3574 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3575 /* Do not allow for multiple reshape_array() calls for
3576 * the same array.
3577 * It can happen when reshape_array() returns without
3578 * error, when reshape is not finished (wrong reshape
3579 * starting/continuation conditions). Mdmon doesn't
3580 * switch to next array in container and reentry
3581 * conditions for the same array occur.
3582 * This is possibly interim until the behaviour of
3583 * reshape_array is resolved().
3584 */
3585 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3586 close(fd);
3587 break;
3588 }
3589 strcpy(last_devnm, mdstat->devnm);
3590
3591 sysfs_init(content, fd, mdstat->devnm);
3592
3593 if (mdmon_running(container))
3594 flush_mdmon(container);
3595
3596 rv = reshape_array(container, fd, adev, st,
3597 content, force, NULL, INVALID_SECTORS,
3598 backup_file, verbose, 1, restart,
3599 freeze_reshape);
3600 close(fd);
3601
3602 if (freeze_reshape) {
3603 sysfs_free(cc);
3604 exit(0);
3605 }
3606
3607 restart = 0;
3608 if (rv)
3609 break;
3610
3611 if (mdmon_running(container))
3612 flush_mdmon(container);
3613 }
3614 if (!rv)
3615 unfreeze(st);
3616 sysfs_free(cc);
3617 exit(0);
3618 }
3619
3620 /*
3621 * We run a child process in the background which performs the following
3622 * steps:
3623 * - wait for resync to reach a certain point
3624 * - suspend io to the following section
3625 * - backup that section
3626 * - allow resync to proceed further
3627 * - resume io
3628 * - discard the backup.
3629 *
3630 * When are combined in slightly different ways in the three cases.
3631 * Grow:
3632 * - suspend/backup/allow/wait/resume/discard
3633 * Shrink:
3634 * - allow/wait/suspend/backup/allow/wait/resume/discard
3635 * same-size:
3636 * - wait/resume/discard/suspend/backup/allow
3637 *
3638 * suspend/backup/allow always come together
3639 * wait/resume/discard do too.
3640 * For the same-size case we have two backups to improve flow.
3641 *
3642 */
3643
3644 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3645 unsigned long long backup_point,
3646 unsigned long long wait_point,
3647 unsigned long long *suspend_point,
3648 unsigned long long *reshape_completed, int *frozen)
3649 {
3650 /* This function is called repeatedly by the reshape manager.
3651 * It determines how much progress can safely be made and allows
3652 * that progress.
3653 * - 'info' identifies the array and particularly records in
3654 * ->reshape_progress the metadata's knowledge of progress
3655 * This is a sector offset from the start of the array
3656 * of the next array block to be relocated. This number
3657 * may increase from 0 or decrease from array_size, depending
3658 * on the type of reshape that is happening.
3659 * Note that in contrast, 'sync_completed' is a block count of the
3660 * reshape so far. It gives the distance between the start point
3661 * (head or tail of device) and the next place that data will be
3662 * written. It always increases.
3663 * - 'reshape' is the structure created by analyse_change
3664 * - 'backup_point' shows how much the metadata manager has backed-up
3665 * data. For reshapes with increasing progress, it is the next address
3666 * to be backed up, previous addresses have been backed-up. For
3667 * decreasing progress, it is the earliest address that has been
3668 * backed up - later address are also backed up.
3669 * So addresses between reshape_progress and backup_point are
3670 * backed up providing those are in the 'correct' order.
3671 * - 'wait_point' is an array address. When reshape_completed
3672 * passes this point, progress_reshape should return. It might
3673 * return earlier if it determines that ->reshape_progress needs
3674 * to be updated or further backup is needed.
3675 * - suspend_point is maintained by progress_reshape and the caller
3676 * should not touch it except to initialise to zero.
3677 * It is an array address and it only increases in 2.6.37 and earlier.
3678 * This makes it difficult to handle reducing reshapes with
3679 * external metadata.
3680 * However: it is similar to backup_point in that it records the
3681 * other end of a suspended region from reshape_progress.
3682 * it is moved to extend the region that is safe to backup and/or
3683 * reshape
3684 * - reshape_completed is read from sysfs and returned. The caller
3685 * should copy this into ->reshape_progress when it has reason to
3686 * believe that the metadata knows this, and any backup outside this
3687 * has been erased.
3688 *
3689 * Return value is:
3690 * 1 if more data from backup_point - but only as far as suspend_point,
3691 * should be backed up
3692 * 0 if things are progressing smoothly
3693 * -1 if the reshape is finished because it is all done,
3694 * -2 if the reshape is finished due to an error.
3695 */
3696
3697 int advancing = (reshape->after.data_disks
3698 >= reshape->before.data_disks);
3699 unsigned long long need_backup; /* All data between start of array and
3700 * here will at some point need to
3701 * be backed up.
3702 */
3703 unsigned long long read_offset, write_offset;
3704 unsigned long long write_range;
3705 unsigned long long max_progress, target, completed;
3706 unsigned long long array_size = (info->component_size
3707 * reshape->before.data_disks);
3708 int fd;
3709 char buf[20];
3710
3711 /* First, we unsuspend any region that is now known to be safe.
3712 * If suspend_point is on the 'wrong' side of reshape_progress, then
3713 * we don't have or need suspension at the moment. This is true for
3714 * native metadata when we don't need to back-up.
3715 */
3716 if (advancing) {
3717 if (info->reshape_progress <= *suspend_point)
3718 sysfs_set_num(info, NULL, "suspend_lo",
3719 info->reshape_progress);
3720 } else {
3721 /* Note: this won't work in 2.6.37 and before.
3722 * Something somewhere should make sure we don't need it!
3723 */
3724 if (info->reshape_progress >= *suspend_point)
3725 sysfs_set_num(info, NULL, "suspend_hi",
3726 info->reshape_progress);
3727 }
3728
3729 /* Now work out how far it is safe to progress.
3730 * If the read_offset for ->reshape_progress is less than
3731 * 'blocks' beyond the write_offset, we can only progress as far
3732 * as a backup.
3733 * Otherwise we can progress until the write_offset for the new location
3734 * reaches (within 'blocks' of) the read_offset at the current location.
3735 * However that region must be suspended unless we are using native
3736 * metadata.
3737 * If we need to suspend more, we limit it to 128M per device, which is
3738 * rather arbitrary and should be some time-based calculation.
3739 */
3740 read_offset = info->reshape_progress / reshape->before.data_disks;
3741 write_offset = info->reshape_progress / reshape->after.data_disks;
3742 write_range = info->new_chunk/512;
3743 if (reshape->before.data_disks == reshape->after.data_disks)
3744 need_backup = array_size;
3745 else
3746 need_backup = reshape->backup_blocks;
3747 if (advancing) {
3748 if (read_offset < write_offset + write_range)
3749 max_progress = backup_point;
3750 else
3751 max_progress =
3752 read_offset *
3753 reshape->after.data_disks;
3754 } else {
3755 if (read_offset > write_offset - write_range)
3756 /* Can only progress as far as has been backed up,
3757 * which must be suspended */
3758 max_progress = backup_point;
3759 else if (info->reshape_progress <= need_backup)
3760 max_progress = backup_point;
3761 else {
3762 if (info->array.major_version >= 0)
3763 /* Can progress until backup is needed */
3764 max_progress = need_backup;
3765 else {
3766 /* Can progress until metadata update is required */
3767 max_progress =
3768 read_offset *
3769 reshape->after.data_disks;
3770 /* but data must be suspended */
3771 if (max_progress < *suspend_point)
3772 max_progress = *suspend_point;
3773 }
3774 }
3775 }
3776
3777 /* We know it is safe to progress to 'max_progress' providing
3778 * it is suspended or we are using native metadata.
3779 * Consider extending suspend_point 128M per device if it
3780 * is less than 64M per device beyond reshape_progress.
3781 * But always do a multiple of 'blocks'
3782 * FIXME this is too big - it takes to long to complete
3783 * this much.
3784 */
3785 target = 64*1024*2 * min(reshape->before.data_disks,
3786 reshape->after.data_disks);
3787 target /= reshape->backup_blocks;
3788 if (target < 2)
3789 target = 2;
3790 target *= reshape->backup_blocks;
3791
3792 /* For externally managed metadata we always need to suspend IO to
3793 * the area being reshaped so we regularly push suspend_point forward.
3794 * For native metadata we only need the suspend if we are going to do
3795 * a backup.
3796 */
3797 if (advancing) {
3798 if ((need_backup > info->reshape_progress
3799 || info->array.major_version < 0) &&
3800 *suspend_point < info->reshape_progress + target) {
3801 if (need_backup < *suspend_point + 2 * target)
3802 *suspend_point = need_backup;
3803 else if (*suspend_point + 2 * target < array_size)
3804 *suspend_point += 2 * target;
3805 else
3806 *suspend_point = array_size;
3807 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3808 if (max_progress > *suspend_point)
3809 max_progress = *suspend_point;
3810 }
3811 } else {
3812 if (info->array.major_version >= 0) {
3813 /* Only need to suspend when about to backup */
3814 if (info->reshape_progress < need_backup * 2 &&
3815 *suspend_point > 0) {
3816 *suspend_point = 0;
3817 sysfs_set_num(info, NULL, "suspend_lo", 0);
3818 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3819 }
3820 } else {
3821 /* Need to suspend continually */
3822 if (info->reshape_progress < *suspend_point)
3823 *suspend_point = info->reshape_progress;
3824 if (*suspend_point + target < info->reshape_progress)
3825 /* No need to move suspend region yet */;
3826 else {
3827 if (*suspend_point >= 2 * target)
3828 *suspend_point -= 2 * target;
3829 else
3830 *suspend_point = 0;
3831 sysfs_set_num(info, NULL, "suspend_lo",
3832 *suspend_point);
3833 }
3834 if (max_progress < *suspend_point)
3835 max_progress = *suspend_point;
3836 }
3837 }
3838
3839 /* now set sync_max to allow that progress. sync_max, like
3840 * sync_completed is a count of sectors written per device, so
3841 * we find the difference between max_progress and the start point,
3842 * and divide that by after.data_disks to get a sync_max
3843 * number.
3844 * At the same time we convert wait_point to a similar number
3845 * for comparing against sync_completed.
3846 */
3847 /* scale down max_progress to per_disk */
3848 max_progress /= reshape->after.data_disks;
3849 /* Round to chunk size as some kernels give an erroneously high number */
3850 max_progress /= info->new_chunk/512;
3851 max_progress *= info->new_chunk/512;
3852 /* And round to old chunk size as the kernel wants that */
3853 max_progress /= info->array.chunk_size/512;
3854 max_progress *= info->array.chunk_size/512;
3855 /* Limit progress to the whole device */
3856 if (max_progress > info->component_size)
3857 max_progress = info->component_size;
3858 wait_point /= reshape->after.data_disks;
3859 if (!advancing) {
3860 /* switch from 'device offset' to 'processed block count' */
3861 max_progress = info->component_size - max_progress;
3862 wait_point = info->component_size - wait_point;
3863 }
3864
3865 if (!*frozen)
3866 sysfs_set_num(info, NULL, "sync_max", max_progress);
3867
3868 /* Now wait. If we have already reached the point that we were
3869 * asked to wait to, don't wait at all, else wait for any change.
3870 * We need to select on 'sync_completed' as that is the place that
3871 * notifications happen, but we are really interested in
3872 * 'reshape_position'
3873 */
3874 fd = sysfs_get_fd(info, NULL, "sync_completed");
3875 if (fd < 0)
3876 goto check_progress;
3877
3878 if (sysfs_fd_get_ll(fd, &completed) < 0)
3879 goto check_progress;
3880
3881 while (completed < max_progress && completed < wait_point) {
3882 /* Check that sync_action is still 'reshape' to avoid
3883 * waiting forever on a dead array
3884 */
3885 char action[20];
3886 if (sysfs_get_str(info, NULL, "sync_action",
3887 action, 20) <= 0 ||
3888 strncmp(action, "reshape", 7) != 0)
3889 break;
3890 /* Some kernels reset 'sync_completed' to zero
3891 * before setting 'sync_action' to 'idle'.
3892 * So we need these extra tests.
3893 */
3894 if (completed == 0 && advancing
3895 && strncmp(action, "idle", 4) == 0
3896 && info->reshape_progress > 0)
3897 break;
3898 if (completed == 0 && !advancing
3899 && strncmp(action, "idle", 4) == 0
3900 && info->reshape_progress < (info->component_size
3901 * reshape->after.data_disks))
3902 break;
3903 sysfs_wait(fd, NULL);
3904 if (sysfs_fd_get_ll(fd, &completed) < 0)
3905 goto check_progress;
3906 }
3907 /* Some kernels reset 'sync_completed' to zero,
3908 * we need to have real point we are in md.
3909 * So in that case, read 'reshape_position' from sysfs.
3910 */
3911 if (completed == 0) {
3912 unsigned long long reshapep;
3913 char action[20];
3914 if (sysfs_get_str(info, NULL, "sync_action",
3915 action, 20) > 0 &&
3916 strncmp(action, "idle", 4) == 0 &&
3917 sysfs_get_ll(info, NULL,
3918 "reshape_position", &reshapep) == 0)
3919 *reshape_completed = reshapep;
3920 } else {
3921 /* some kernels can give an incorrectly high
3922 * 'completed' number, so round down */
3923 completed /= (info->new_chunk/512);
3924 completed *= (info->new_chunk/512);
3925 /* Convert 'completed' back in to a 'progress' number */
3926 completed *= reshape->after.data_disks;
3927 if (!advancing)
3928 completed = (info->component_size
3929 * reshape->after.data_disks
3930 - completed);
3931 *reshape_completed = completed;
3932 }
3933
3934 close(fd);
3935
3936 /* We return the need_backup flag. Caller will decide
3937 * how much - a multiple of ->backup_blocks up to *suspend_point
3938 */
3939 if (advancing)
3940 return need_backup > info->reshape_progress;
3941 else
3942 return need_backup >= info->reshape_progress;
3943
3944 check_progress:
3945 /* if we couldn't read a number from sync_completed, then
3946 * either the reshape did complete, or it aborted.
3947 * We can tell which by checking for 'none' in reshape_position.
3948 * If it did abort, then it might immediately restart if it
3949 * it was just a device failure that leaves us degraded but
3950 * functioning.
3951 */
3952 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3953 || strncmp(buf, "none", 4) != 0) {
3954 /* The abort might only be temporary. Wait up to 10
3955 * seconds for fd to contain a valid number again.
3956 */
3957 int wait = 10000;
3958 int rv = -2;
3959 unsigned long long new_sync_max;
3960 while (fd >= 0 && rv < 0 && wait > 0) {
3961 if (sysfs_wait(fd, &wait) != 1)
3962 break;
3963 switch (sysfs_fd_get_ll(fd, &completed)) {
3964 case 0:
3965 /* all good again */
3966 rv = 1;
3967 /* If "sync_max" is no longer max_progress
3968 * we need to freeze things
3969 */
3970 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3971 *frozen = (new_sync_max != max_progress);
3972 break;
3973 case -2: /* read error - abort */
3974 wait = 0;
3975 break;
3976 }
3977 }
3978 if (fd >= 0)
3979 close(fd);
3980 return rv; /* abort */
3981 } else {
3982 /* Maybe racing with array shutdown - check state */
3983 if (fd >= 0)
3984 close(fd);
3985 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3986 || strncmp(buf, "inactive", 8) == 0
3987 || strncmp(buf, "clear",5) == 0)
3988 return -2; /* abort */
3989 return -1; /* complete */
3990 }
3991 }
3992
3993 /* FIXME return status is never checked */
3994 static int grow_backup(struct mdinfo *sra,
3995 unsigned long long offset, /* per device */
3996 unsigned long stripes, /* per device, in old chunks */
3997 int *sources, unsigned long long *offsets,
3998 int disks, int chunk, int level, int layout,
3999 int dests, int *destfd, unsigned long long *destoffsets,
4000 int part, int *degraded,
4001 char *buf)
4002 {
4003 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4004 * to storage 'destfd' (offset 'destoffsets'), after first
4005 * suspending IO. Then allow resync to continue
4006 * over the suspended section.
4007 * Use part 'part' of the backup-super-block.
4008 */
4009 int odata = disks;
4010 int rv = 0;
4011 int i;
4012 unsigned long long ll;
4013 int new_degraded;
4014 //printf("offset %llu\n", offset);
4015 if (level >= 4)
4016 odata--;
4017 if (level == 6)
4018 odata--;
4019
4020 /* Check that array hasn't become degraded, else we might backup the wrong data */
4021 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4022 return -1; /* FIXME this error is ignored */
4023 new_degraded = (int)ll;
4024 if (new_degraded != *degraded) {
4025 /* check each device to ensure it is still working */
4026 struct mdinfo *sd;
4027 for (sd = sra->devs ; sd ; sd = sd->next) {
4028 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4029 continue;
4030 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4031 char sbuf[20];
4032 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
4033 strstr(sbuf, "faulty") ||
4034 strstr(sbuf, "in_sync") == NULL) {
4035 /* this device is dead */
4036 sd->disk.state = (1<<MD_DISK_FAULTY);
4037 if (sd->disk.raid_disk >= 0 &&
4038 sources[sd->disk.raid_disk] >= 0) {
4039 close(sources[sd->disk.raid_disk]);
4040 sources[sd->disk.raid_disk] = -1;
4041 }
4042 }
4043 }
4044 }
4045 *degraded = new_degraded;
4046 }
4047 if (part) {
4048 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4049 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4050 } else {
4051 bsb.arraystart = __cpu_to_le64(offset * odata);
4052 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4053 }
4054 if (part)
4055 bsb.magic[15] = '2';
4056 for (i = 0; i < dests; i++)
4057 if (part)
4058 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4059 else
4060 lseek64(destfd[i], destoffsets[i], 0);
4061
4062 rv = save_stripes(sources, offsets,
4063 disks, chunk, level, layout,
4064 dests, destfd,
4065 offset*512*odata, stripes * chunk * odata,
4066 buf);
4067
4068 if (rv)
4069 return rv;
4070 bsb.mtime = __cpu_to_le64(time(0));
4071 for (i = 0; i < dests; i++) {
4072 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4073
4074 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4075 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4076 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4077 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4078
4079 rv = -1;
4080 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
4081 != destoffsets[i] - 4096)
4082 break;
4083 if (write(destfd[i], &bsb, 512) != 512)
4084 break;
4085 if (destoffsets[i] > 4096) {
4086 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4087 destoffsets[i]+stripes*chunk*odata)
4088 break;
4089 if (write(destfd[i], &bsb, 512) != 512)
4090 break;
4091 }
4092 fsync(destfd[i]);
4093 rv = 0;
4094 }
4095
4096 return rv;
4097 }
4098
4099 /* in 2.6.30, the value reported by sync_completed can be
4100 * less that it should be by one stripe.
4101 * This only happens when reshape hits sync_max and pauses.
4102 * So allow wait_backup to either extent sync_max further
4103 * than strictly necessary, or return before the
4104 * sync has got quite as far as we would really like.
4105 * This is what 'blocks2' is for.
4106 * The various caller give appropriate values so that
4107 * every works.
4108 */
4109 /* FIXME return value is often ignored */
4110 static int forget_backup(int dests, int *destfd,
4111 unsigned long long *destoffsets,
4112 int part)
4113 {
4114 /*
4115 * Erase backup 'part' (which is 0 or 1)
4116 */
4117 int i;
4118 int rv;
4119
4120 if (part) {
4121 bsb.arraystart2 = __cpu_to_le64(0);
4122 bsb.length2 = __cpu_to_le64(0);
4123 } else {
4124 bsb.arraystart = __cpu_to_le64(0);
4125 bsb.length = __cpu_to_le64(0);
4126 }
4127 bsb.mtime = __cpu_to_le64(time(0));
4128 rv = 0;
4129 for (i = 0; i < dests; i++) {
4130 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4131 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4132 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4133 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4134 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4135 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4136 destoffsets[i]-4096)
4137 rv = -1;
4138 if (rv == 0 &&
4139 write(destfd[i], &bsb, 512) != 512)
4140 rv = -1;
4141 fsync(destfd[i]);
4142 }
4143 return rv;
4144 }
4145
4146 static void fail(char *msg)
4147 {
4148 int rv;
4149 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4150 rv |= (write(2, "\n", 1) != 1);
4151 exit(rv ? 1 : 2);
4152 }
4153
4154 static char *abuf, *bbuf;
4155 static unsigned long long abuflen;
4156 static void validate(int afd, int bfd, unsigned long long offset)
4157 {
4158 /* check that the data in the backup against the array.
4159 * This is only used for regression testing and should not
4160 * be used while the array is active
4161 */
4162 if (afd < 0)
4163 return;
4164 lseek64(bfd, offset - 4096, 0);
4165 if (read(bfd, &bsb2, 512) != 512)
4166 fail("cannot read bsb");
4167 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4168 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4169 fail("first csum bad");
4170 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4171 fail("magic is bad");
4172 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4173 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4174 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4175 fail("second csum bad");
4176
4177 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4178 fail("devstart is wrong");
4179
4180 if (bsb2.length) {
4181 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4182
4183 if (abuflen < len) {
4184 free(abuf);
4185 free(bbuf);
4186 abuflen = len;
4187 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4188 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4189 abuflen = 0;
4190 /* just stop validating on mem-alloc failure */
4191 return;
4192 }
4193 }
4194
4195 lseek64(bfd, offset, 0);
4196 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4197 //printf("len %llu\n", len);
4198 fail("read first backup failed");
4199 }
4200 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4201 if ((unsigned long long)read(afd, abuf, len) != len)
4202 fail("read first from array failed");
4203 if (memcmp(bbuf, abuf, len) != 0) {
4204 #if 0
4205 int i;
4206 printf("offset=%llu len=%llu\n",
4207 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4208 for (i=0; i<len; i++)
4209 if (bbuf[i] != abuf[i]) {
4210 printf("first diff byte %d\n", i);
4211 break;
4212 }
4213 #endif
4214 fail("data1 compare failed");
4215 }
4216 }
4217 if (bsb2.length2) {
4218 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4219
4220 if (abuflen < len) {
4221 free(abuf);
4222 free(bbuf);
4223 abuflen = len;
4224 abuf = xmalloc(abuflen);
4225 bbuf = xmalloc(abuflen);
4226 }
4227
4228 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4229 if ((unsigned long long)read(bfd, bbuf, len) != len)
4230 fail("read second backup failed");
4231 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4232 if ((unsigned long long)read(afd, abuf, len) != len)
4233 fail("read second from array failed");
4234 if (memcmp(bbuf, abuf, len) != 0)
4235 fail("data2 compare failed");
4236 }
4237 }
4238
4239 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4240 struct supertype *st, unsigned long blocks,
4241 int *fds, unsigned long long *offsets,
4242 int dests, int *destfd, unsigned long long *destoffsets)
4243 {
4244 /* Monitor a reshape where backup is being performed using
4245 * 'native' mechanism - either to a backup file, or
4246 * to some space in a spare.
4247 */
4248 char *buf;
4249 int degraded = -1;
4250 unsigned long long speed;
4251 unsigned long long suspend_point, array_size;
4252 unsigned long long backup_point, wait_point;
4253 unsigned long long reshape_completed;
4254 int done = 0;
4255 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4256 int part = 0; /* The next part of the backup area to fill. It may already
4257 * be full, so we need to check */
4258 int level = reshape->level;
4259 int layout = reshape->before.layout;
4260 int data = reshape->before.data_disks;
4261 int disks = reshape->before.data_disks + reshape->parity;
4262 int chunk = sra->array.chunk_size;
4263 struct mdinfo *sd;
4264 unsigned long stripes;
4265 int uuid[4];
4266 int frozen = 0;
4267
4268 /* set up the backup-super-block. This requires the
4269 * uuid from the array.
4270 */
4271 /* Find a superblock */
4272 for (sd = sra->devs; sd; sd = sd->next) {
4273 char *dn;
4274 int devfd;
4275 int ok;
4276 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4277 continue;
4278 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4279 devfd = dev_open(dn, O_RDONLY);
4280 if (devfd < 0)
4281 continue;
4282 ok = st->ss->load_super(st, devfd, NULL);
4283 close(devfd);
4284 if (ok == 0)
4285 break;
4286 }
4287 if (!sd) {
4288 pr_err("Cannot find a superblock\n");
4289 return 0;
4290 }
4291
4292 memset(&bsb, 0, 512);
4293 memcpy(bsb.magic, "md_backup_data-1", 16);
4294 st->ss->uuid_from_super(st, uuid);
4295 memcpy(bsb.set_uuid, uuid, 16);
4296 bsb.mtime = __cpu_to_le64(time(0));
4297 bsb.devstart2 = blocks;
4298
4299 stripes = blocks / (sra->array.chunk_size/512) /
4300 reshape->before.data_disks;
4301
4302 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4303 /* Don't start the 'reshape' */
4304 return 0;
4305 if (reshape->before.data_disks == reshape->after.data_disks) {
4306 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4307 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4308 }
4309
4310 if (increasing) {
4311 array_size = sra->component_size * reshape->after.data_disks;
4312 backup_point = sra->reshape_progress;
4313 suspend_point = 0;
4314 } else {
4315 array_size = sra->component_size * reshape->before.data_disks;
4316 backup_point = reshape->backup_blocks;
4317 suspend_point = array_size;
4318 }
4319
4320 while (!done) {
4321 int rv;
4322
4323 /* Want to return as soon the oldest backup slot can
4324 * be released as that allows us to start backing up
4325 * some more, providing suspend_point has been
4326 * advanced, which it should have.
4327 */
4328 if (increasing) {
4329 wait_point = array_size;
4330 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4331 wait_point = (__le64_to_cpu(bsb.arraystart) +
4332 __le64_to_cpu(bsb.length));
4333 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4334 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4335 __le64_to_cpu(bsb.length2));
4336 } else {
4337 wait_point = 0;
4338 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4339 wait_point = __le64_to_cpu(bsb.arraystart);
4340 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4341 wait_point = __le64_to_cpu(bsb.arraystart2);
4342 }
4343
4344 reshape_completed = sra->reshape_progress;
4345 rv = progress_reshape(sra, reshape,
4346 backup_point, wait_point,
4347 &suspend_point, &reshape_completed,
4348 &frozen);
4349 /* external metadata would need to ping_monitor here */
4350 sra->reshape_progress = reshape_completed;
4351
4352 /* Clear any backup region that is before 'here' */
4353 if (increasing) {
4354 if (__le64_to_cpu(bsb.length) > 0 &&
4355 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4356 __le64_to_cpu(bsb.length)))
4357 forget_backup(dests, destfd,
4358 destoffsets, 0);
4359 if (__le64_to_cpu(bsb.length2) > 0 &&
4360 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4361 __le64_to_cpu(bsb.length2)))
4362 forget_backup(dests, destfd,
4363 destoffsets, 1);
4364 } else {
4365 if (__le64_to_cpu(bsb.length) > 0 &&
4366 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4367 forget_backup(dests, destfd,
4368 destoffsets, 0);
4369 if (__le64_to_cpu(bsb.length2) > 0 &&
4370 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4371 forget_backup(dests, destfd,
4372 destoffsets, 1);
4373 }
4374 if (sigterm)
4375 rv = -2;
4376 if (rv < 0) {
4377 if (rv == -1)
4378 done = 1;
4379 break;
4380 }
4381 if (rv == 0 && increasing && !st->ss->external) {
4382 /* No longer need to monitor this reshape */
4383 sysfs_set_str(sra, NULL, "sync_max", "max");
4384 done = 1;
4385 break;
4386 }
4387
4388 while (rv) {
4389 unsigned long long offset;
4390 unsigned long actual_stripes;
4391 /* Need to backup some data.
4392 * If 'part' is not used and the desired
4393 * backup size is suspended, do a backup,
4394 * then consider the next part.
4395 */
4396 /* Check that 'part' is unused */
4397 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4398 break;
4399 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4400 break;
4401
4402 offset = backup_point / data;
4403 actual_stripes = stripes;
4404 if (increasing) {
4405 if (offset + actual_stripes * (chunk/512) >
4406 sra->component_size)
4407 actual_stripes = ((sra->component_size - offset)
4408 / (chunk/512));
4409 if (offset + actual_stripes * (chunk/512) >
4410 suspend_point/data)
4411 break;
4412 } else {
4413 if (offset < actual_stripes * (chunk/512))
4414 actual_stripes = offset / (chunk/512);
4415 offset -= actual_stripes * (chunk/512);
4416 if (offset < suspend_point/data)
4417 break;
4418 }
4419 if (actual_stripes == 0)
4420 break;
4421 grow_backup(sra, offset, actual_stripes,
4422 fds, offsets,
4423 disks, chunk, level, layout,
4424 dests, destfd, destoffsets,
4425 part, &degraded, buf);
4426 validate(afd, destfd[0], destoffsets[0]);
4427 /* record where 'part' is up to */
4428 part = !part;
4429 if (increasing)
4430 backup_point += actual_stripes * (chunk/512) * data;
4431 else
4432 backup_point -= actual_stripes * (chunk/512) * data;
4433 }
4434 }
4435
4436 /* FIXME maybe call progress_reshape one more time instead */
4437 /* remove any remaining suspension */
4438 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4439 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4440 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4441 sysfs_set_num(sra, NULL, "sync_min", 0);
4442
4443 if (reshape->before.data_disks == reshape->after.data_disks)
4444 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4445 free(buf);
4446 return done;
4447 }
4448
4449 /*
4450 * If any spare contains md_back_data-1 which is recent wrt mtime,
4451 * write that data into the array and update the super blocks with
4452 * the new reshape_progress
4453 */
4454 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4455 char *backup_file, int verbose)
4456 {
4457 int i, j;
4458 int old_disks;
4459 unsigned long long *offsets;
4460 unsigned long long nstripe, ostripe;
4461 int ndata, odata;
4462
4463 odata = info->array.raid_disks - info->delta_disks - 1;
4464 if (info->array.level == 6) odata--; /* number of data disks */
4465 ndata = info->array.raid_disks - 1;
4466 if (info->new_level == 6) ndata--;
4467
4468 old_disks = info->array.raid_disks - info->delta_disks;
4469
4470 if (info->delta_disks <= 0)
4471 /* Didn't grow, so the backup file must have
4472 * been used
4473 */
4474 old_disks = cnt;
4475 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4476 struct mdinfo dinfo;
4477 int fd;
4478 int bsbsize;
4479 char *devname, namebuf[20];
4480 unsigned long long lo, hi;
4481
4482 /* This was a spare and may have some saved data on it.
4483 * Load the superblock, find and load the
4484 * backup_super_block.
4485 * If either fail, go on to next device.
4486 * If the backup contains no new info, just return
4487 * else restore data and update all superblocks
4488 */
4489 if (i == old_disks-1) {
4490 fd = open(backup_file, O_RDONLY);
4491 if (fd<0) {
4492 pr_err("backup file %s inaccessible: %s\n",
4493 backup_file, strerror(errno));
4494 continue;
4495 }
4496 devname = backup_file;
4497 } else {
4498 fd = fdlist[i];
4499 if (fd < 0)
4500 continue;
4501 if (st->ss->load_super(st, fd, NULL))
4502 continue;
4503
4504 st->ss->getinfo_super(st, &dinfo, NULL);
4505 st->ss->free_super(st);
4506
4507 if (lseek64(fd,
4508 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4509 0) < 0) {
4510 pr_err("Cannot seek on device %d\n", i);
4511 continue; /* Cannot seek */
4512 }
4513 sprintf(namebuf, "device-%d", i);
4514 devname = namebuf;
4515 }
4516 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4517 if (verbose)
4518 pr_err("Cannot read from %s\n", devname);
4519 continue; /* Cannot read */
4520 }
4521 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4522 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4523 if (verbose)
4524 pr_err("No backup metadata on %s\n", devname);
4525 continue;
4526 }
4527 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4528 if (verbose)
4529 pr_err("Bad backup-metadata checksum on %s\n", devname);
4530 continue; /* bad checksum */
4531 }
4532 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4533 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4534 if (verbose)
4535 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4536 continue; /* Bad second checksum */
4537 }
4538 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4539 if (verbose)
4540 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4541 continue; /* Wrong uuid */
4542 }
4543
4544 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4545 * sometimes they aren't... So allow considerable flexability in matching, and allow
4546 * this test to be overridden by an environment variable.
4547 */
4548 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4549 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4550 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4551 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4552 (unsigned long)__le64_to_cpu(bsb.mtime),
4553 (unsigned long)info->array.utime);
4554 } else {
4555 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4556 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4557 continue; /* time stamp is too bad */
4558 }
4559 }
4560
4561 if (bsb.magic[15] == '1') {
4562 if (bsb.length == 0)
4563 continue;
4564 if (info->delta_disks >= 0) {
4565 /* reshape_progress is increasing */
4566 if (__le64_to_cpu(bsb.arraystart)
4567 + __le64_to_cpu(bsb.length)
4568 < info->reshape_progress) {
4569 nonew:
4570 if (verbose)
4571 pr_err("backup-metadata found on %s but is not needed\n", devname);
4572 continue; /* No new data here */
4573 }
4574 } else {
4575 /* reshape_progress is decreasing */
4576 if (__le64_to_cpu(bsb.arraystart) >=
4577 info->reshape_progress)
4578 goto nonew; /* No new data here */
4579 }
4580 } else {
4581 if (bsb.length == 0 && bsb.length2 == 0)
4582 continue;
4583 if (info->delta_disks >= 0) {
4584 /* reshape_progress is increasing */
4585 if ((__le64_to_cpu(bsb.arraystart)
4586 + __le64_to_cpu(bsb.length)
4587 < info->reshape_progress)
4588 &&
4589 (__le64_to_cpu(bsb.arraystart2)
4590 + __le64_to_cpu(bsb.length2)
4591 < info->reshape_progress))
4592 goto nonew; /* No new data here */
4593 } else {
4594 /* reshape_progress is decreasing */
4595 if (__le64_to_cpu(bsb.arraystart) >=
4596 info->reshape_progress &&
4597 __le64_to_cpu(bsb.arraystart2) >=
4598 info->reshape_progress)
4599 goto nonew; /* No new data here */
4600 }
4601 }
4602 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4603 second_fail:
4604 if (verbose)
4605 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4606 devname);
4607 continue; /* Cannot seek */
4608 }
4609 /* There should be a duplicate backup superblock 4k before here */
4610 if (lseek64(fd, -4096, 1) < 0 ||
4611 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4612 goto second_fail; /* Cannot find leading superblock */
4613 if (bsb.magic[15] == '1')
4614 bsbsize = offsetof(struct mdp_backup_super, pad1);
4615 else
4616 bsbsize = offsetof(struct mdp_backup_super, pad);
4617 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4618 goto second_fail; /* Cannot find leading superblock */
4619
4620 /* Now need the data offsets for all devices. */
4621 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4622 for(j=0; j<info->array.raid_disks; j++) {
4623 if (fdlist[j] < 0)
4624 continue;
4625 if (st->ss->load_super(st, fdlist[j], NULL))
4626 /* FIXME should be this be an error */
4627 continue;
4628 st->ss->getinfo_super(st, &dinfo, NULL);
4629 st->ss->free_super(st);
4630 offsets[j] = dinfo.data_offset * 512;
4631 }
4632 printf("%s: restoring critical section\n", Name);
4633
4634 if (restore_stripes(fdlist, offsets,
4635 info->array.raid_disks,
4636 info->new_chunk,
4637 info->new_level,
4638 info->new_layout,
4639 fd, __le64_to_cpu(bsb.devstart)*512,
4640 __le64_to_cpu(bsb.arraystart)*512,
4641 __le64_to_cpu(bsb.length)*512, NULL)) {
4642 /* didn't succeed, so giveup */
4643 if (verbose)
4644 pr_err("Error restoring backup from %s\n",
4645 devname);
4646 free(offsets);
4647 return 1;
4648 }
4649
4650 if (bsb.magic[15] == '2' &&
4651 restore_stripes(fdlist, offsets,
4652 info->array.raid_disks,
4653 info->new_chunk,
4654 info->new_level,
4655 info->new_layout,
4656 fd, __le64_to_cpu(bsb.devstart)*512 +
4657 __le64_to_cpu(bsb.devstart2)*512,
4658 __le64_to_cpu(bsb.arraystart2)*512,
4659 __le64_to_cpu(bsb.length2)*512, NULL)) {
4660 /* didn't succeed, so giveup */
4661 if (verbose)
4662 pr_err("Error restoring second backup from %s\n",
4663 devname);
4664 free(offsets);
4665 return 1;
4666 }
4667
4668 free(offsets);
4669
4670 /* Ok, so the data is restored. Let's update those superblocks. */
4671
4672 lo = hi = 0;
4673 if (bsb.length) {
4674 lo = __le64_to_cpu(bsb.arraystart);
4675 hi = lo + __le64_to_cpu(bsb.length);
4676 }
4677 if (bsb.magic[15] == '2' && bsb.length2) {
4678 unsigned long long lo1, hi1;
4679 lo1 = __le64_to_cpu(bsb.arraystart2);
4680 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4681 if (lo == hi) {
4682 lo = lo1;
4683 hi = hi1;
4684 } else if (lo < lo1)
4685 hi = hi1;
4686 else
4687 lo = lo1;
4688 }
4689 if (lo < hi &&
4690 (info->reshape_progress < lo ||
4691 info->reshape_progress > hi))
4692 /* backup does not affect reshape_progress*/ ;
4693 else if (info->delta_disks >= 0) {
4694 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4695 __le64_to_cpu(bsb.length);
4696 if (bsb.magic[15] == '2') {
4697 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4698 __le64_to_cpu(bsb.length2);
4699 if (p2 > info->reshape_progress)
4700 info->reshape_progress = p2;
4701 }
4702 } else {
4703 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4704 if (bsb.magic[15] == '2') {
4705 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4706 if (p2 < info->reshape_progress)
4707 info->reshape_progress = p2;
4708 }
4709 }
4710 for (j=0; j<info->array.raid_disks; j++) {
4711 if (fdlist[j] < 0)
4712 continue;
4713 if (st->ss->load_super(st, fdlist[j], NULL))
4714 continue;
4715 st->ss->getinfo_super(st, &dinfo, NULL);
4716 dinfo.reshape_progress = info->reshape_progress;
4717 st->ss->update_super(st, &dinfo,
4718 "_reshape_progress",
4719 NULL,0, 0, NULL);
4720 st->ss->store_super(st, fdlist[j]);
4721 st->ss->free_super(st);
4722 }
4723 return 0;
4724 }
4725 /* Didn't find any backup data, try to see if any
4726 * was needed.
4727 */
4728 if (info->delta_disks < 0) {
4729 /* When shrinking, the critical section is at the end.
4730 * So see if we are before the critical section.
4731 */
4732 unsigned long long first_block;
4733 nstripe = ostripe = 0;
4734 first_block = 0;
4735 while (ostripe >= nstripe) {
4736 ostripe += info->array.chunk_size / 512;
4737 first_block = ostripe * odata;
4738 nstripe = first_block / ndata / (info->new_chunk/512) *
4739 (info->new_chunk/512);
4740 }
4741
4742 if (info->reshape_progress >= first_block)
4743 return 0;
4744 }
4745 if (info->delta_disks > 0) {
4746 /* See if we are beyond the critical section. */
4747 unsigned long long last_block;
4748 nstripe = ostripe = 0;
4749 last_block = 0;
4750 while (nstripe >= ostripe) {
4751 nstripe += info->new_chunk / 512;
4752 last_block = nstripe * ndata;
4753 ostripe = last_block / odata / (info->array.chunk_size/512) *
4754 (info->array.chunk_size/512);
4755 }
4756
4757 if (info->reshape_progress >= last_block)
4758 return 0;
4759 }
4760 /* needed to recover critical section! */
4761 if (verbose)
4762 pr_err("Failed to find backup of critical section\n");
4763 return 1;
4764 }
4765
4766 int Grow_continue_command(char *devname, int fd,
4767 char *backup_file, int verbose)
4768 {
4769 int ret_val = 0;
4770 struct supertype *st = NULL;
4771 struct mdinfo *content = NULL;
4772 struct mdinfo array;
4773 char *subarray = NULL;
4774 struct mdinfo *cc = NULL;
4775 struct mdstat_ent *mdstat = NULL;
4776 int cfd = -1;
4777 int fd2;
4778
4779 dprintf("Grow continue from command line called for %s\n",
4780 devname);
4781
4782 st = super_by_fd(fd, &subarray);
4783 if (!st || !st->ss) {
4784 pr_err("Unable to determine metadata format for %s\n",
4785 devname);
4786 return 1;
4787 }
4788 dprintf("Grow continue is run for ");
4789 if (st->ss->external == 0) {
4790 int d;
4791 dprintf_cont("native array (%s)\n", devname);
4792 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4793 pr_err("%s is not an active md array - aborting\n", devname);
4794 ret_val = 1;
4795 goto Grow_continue_command_exit;
4796 }
4797 content = &array;
4798 /* Need to load a superblock.
4799 * FIXME we should really get what we need from
4800 * sysfs
4801 */
4802 for (d = 0; d < MAX_DISKS; d++) {
4803 mdu_disk_info_t disk;
4804 char *dv;
4805 int err;
4806 disk.number = d;
4807 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4808 continue;
4809 if (disk.major == 0 && disk.minor == 0)
4810 continue;
4811 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4812 continue;
4813 dv = map_dev(disk.major, disk.minor, 1);
4814 if (!dv)
4815 continue;
4816 fd2 = dev_open(dv, O_RDONLY);
4817 if (fd2 < 0)
4818 continue;
4819 err = st->ss->load_super(st, fd2, NULL);
4820 close(fd2);
4821 if (err)
4822 continue;
4823 break;
4824 }
4825 if (d == MAX_DISKS) {
4826 pr_err("Unable to load metadata for %s\n",
4827 devname);
4828 ret_val = 1;
4829 goto Grow_continue_command_exit;
4830 }
4831 st->ss->getinfo_super(st, content, NULL);
4832 } else {
4833 char *container;
4834
4835 if (subarray) {
4836 dprintf_cont("subarray (%s)\n", subarray);
4837 container = st->container_devnm;
4838 cfd = open_dev_excl(st->container_devnm);
4839 } else {
4840 container = st->devnm;
4841 close(fd);
4842 cfd = open_dev_excl(st->devnm);
4843 dprintf_cont("container (%s)\n", container);
4844 fd = cfd;
4845 }
4846 if (cfd < 0) {
4847 pr_err("Unable to open container for %s\n", devname);
4848 ret_val = 1;
4849 goto Grow_continue_command_exit;
4850 }
4851
4852 /* find in container array under reshape
4853 */
4854 ret_val = st->ss->load_container(st, cfd, NULL);
4855 if (ret_val) {
4856 pr_err("Cannot read superblock for %s\n",
4857 devname);
4858 ret_val = 1;
4859 goto Grow_continue_command_exit;
4860 }
4861
4862 cc = st->ss->container_content(st, subarray);
4863 for (content = cc; content ; content = content->next) {
4864 char *array;
4865 int allow_reshape = 1;
4866
4867 if (content->reshape_active == 0)
4868 continue;
4869 /* The decision about array or container wide
4870 * reshape is taken in Grow_continue based
4871 * content->reshape_active state, therefore we
4872 * need to check_reshape based on
4873 * reshape_active and subarray name
4874 */
4875 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4876 allow_reshape = 0;
4877 if (content->reshape_active == CONTAINER_RESHAPE &&
4878 (content->array.state
4879 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4880 allow_reshape = 0;
4881
4882 if (!allow_reshape) {
4883 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
4884 devname, container);
4885 ret_val = 1;
4886 goto Grow_continue_command_exit;
4887 }
4888
4889 array = strchr(content->text_version+1, '/')+1;
4890 mdstat = mdstat_by_subdev(array, container);
4891 if (!mdstat)
4892 continue;
4893 if (mdstat->active == 0) {
4894 pr_err("Skipping inactive array %s.\n",
4895 mdstat->devnm);
4896 free_mdstat(mdstat);
4897 mdstat = NULL;
4898 continue;
4899 }
4900 break;
4901 }
4902 if (!content) {
4903 pr_err("Unable to determine reshaped array for %s\n", devname);
4904 ret_val = 1;
4905 goto Grow_continue_command_exit;
4906 }
4907 fd2 = open_dev(mdstat->devnm);
4908 if (fd2 < 0) {
4909 pr_err("cannot open (%s)\n", mdstat->devnm);
4910 ret_val = 1;
4911 goto Grow_continue_command_exit;
4912 }
4913
4914 sysfs_init(content, fd2, mdstat->devnm);
4915
4916 close(fd2);
4917
4918 /* start mdmon in case it is not running
4919 */
4920 if (!mdmon_running(container))
4921 start_mdmon(container);
4922 ping_monitor(container);
4923
4924 if (mdmon_running(container))
4925 st->update_tail = &st->updates;
4926 else {
4927 pr_err("No mdmon found. Grow cannot continue.\n");
4928 ret_val = 1;
4929 goto Grow_continue_command_exit;
4930 }
4931 }
4932
4933 /* verify that array under reshape is started from
4934 * correct position
4935 */
4936 if (verify_reshape_position(content, content->array.level) < 0) {
4937 ret_val = 1;
4938 goto Grow_continue_command_exit;
4939 }
4940
4941 /* continue reshape
4942 */
4943 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
4944
4945 Grow_continue_command_exit:
4946 if (cfd > -1)
4947 close(cfd);
4948 st->ss->free_super(st);
4949 free_mdstat(mdstat);
4950 sysfs_free(cc);
4951 free(subarray);
4952
4953 return ret_val;
4954 }
4955
4956 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4957 char *backup_file, int forked, int freeze_reshape)
4958 {
4959 int ret_val = 2;
4960
4961 if (!info->reshape_active)
4962 return ret_val;
4963
4964 if (st->ss->external) {
4965 int cfd = open_dev(st->container_devnm);
4966
4967 if (cfd < 0)
4968 return 1;
4969
4970 st->ss->load_container(st, cfd, st->container_devnm);
4971 close(cfd);
4972 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4973 st, info, 0, backup_file,
4974 0, forked,
4975 1 | info->reshape_active,
4976 freeze_reshape);
4977 } else
4978 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4979 NULL, INVALID_SECTORS,
4980 backup_file, 0, forked,
4981 1 | info->reshape_active,
4982 freeze_reshape);
4983
4984 return ret_val;
4985 }
4986
4987 char *make_backup(char *name)
4988 {
4989 char *base = "backup_file-";
4990 int len;
4991 char *fname;
4992
4993 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
4994 fname = xmalloc(len);
4995 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
4996 return fname;
4997 }
4998
4999 char *locate_backup(char *name)
5000 {
5001 char *fl = make_backup(name);
5002 struct stat stb;
5003
5004 if (stat(fl, &stb) == 0 &&
5005 S_ISREG(stb.st_mode))
5006 return fl;
5007
5008 free(fl);
5009 return NULL;
5010 }