]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Grow: goto release if Manage_subdevs failed
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 struct stat stb;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 fstat(nfd, &stb);
149 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
150 pr_err("%s is not a block device!\n", newdev);
151 close(nfd);
152 free(st);
153 return 1;
154 }
155 /* now check out all the devices and make sure we can read the
156 * superblock */
157 for (d=0 ; d < info.array.raid_disks ; d++) {
158 mdu_disk_info_t disk;
159 char *dv;
160
161 st->ss->free_super(st);
162
163 disk.number = d;
164 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
165 pr_err("cannot get device detail for device %d\n",
166 d);
167 close(nfd);
168 free(st);
169 return 1;
170 }
171 dv = map_dev(disk.major, disk.minor, 1);
172 if (!dv) {
173 pr_err("cannot find device file for device %d\n",
174 d);
175 close(nfd);
176 free(st);
177 return 1;
178 }
179 fd2 = dev_open(dv, O_RDWR);
180 if (fd2 < 0) {
181 pr_err("cannot open device file %s\n", dv);
182 close(nfd);
183 free(st);
184 return 1;
185 }
186
187 if (st->ss->load_super(st, fd2, NULL)) {
188 pr_err("cannot find super block on %s\n", dv);
189 close(nfd);
190 close(fd2);
191 free(st);
192 return 1;
193 }
194 close(fd2);
195 }
196 /* Ok, looks good. Lets update the superblock and write it out to
197 * newdev.
198 */
199
200 info.disk.number = d;
201 info.disk.major = major(stb.st_rdev);
202 info.disk.minor = minor(stb.st_rdev);
203 info.disk.raid_disk = d;
204 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
205 st->ss->update_super(st, &info, "linear-grow-new", newdev,
206 0, 0, NULL);
207
208 if (st->ss->store_super(st, nfd)) {
209 pr_err("Cannot store new superblock on %s\n",
210 newdev);
211 close(nfd);
212 return 1;
213 }
214 close(nfd);
215
216 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
217 pr_err("Cannot add new disk to this array\n");
218 return 1;
219 }
220 /* Well, that seems to have worked.
221 * Now go through and update all superblocks
222 */
223
224 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
225 pr_err("cannot get array info for %s\n", devname);
226 return 1;
227 }
228
229 nd = d;
230 for (d=0 ; d < info.array.raid_disks ; d++) {
231 mdu_disk_info_t disk;
232 char *dv;
233
234 disk.number = d;
235 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
236 pr_err("cannot get device detail for device %d\n",
237 d);
238 return 1;
239 }
240 dv = map_dev(disk.major, disk.minor, 1);
241 if (!dv) {
242 pr_err("cannot find device file for device %d\n",
243 d);
244 return 1;
245 }
246 fd2 = dev_open(dv, O_RDWR);
247 if (fd2 < 0) {
248 pr_err("cannot open device file %s\n", dv);
249 return 1;
250 }
251 if (st->ss->load_super(st, fd2, NULL)) {
252 pr_err("cannot find super block on %s\n", dv);
253 close(fd);
254 return 1;
255 }
256 info.array.raid_disks = nd+1;
257 info.array.nr_disks = nd+1;
258 info.array.active_disks = nd+1;
259 info.array.working_disks = nd+1;
260
261 st->ss->update_super(st, &info, "linear-grow-update", dv,
262 0, 0, NULL);
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 int vers = md_get_version(fd);
292 unsigned long long bitmapsize, array_size;
293
294 if (vers < 9003) {
295 major = BITMAP_MAJOR_HOSTENDIAN;
296 pr_err("Warning - bitmaps created on this kernel are not portable\n"
297 " between different architectures. Consider upgrading the Linux kernel.\n");
298 }
299
300 /*
301 * We only ever get called if s->bitmap_file is != NULL, so this check
302 * is just here to quiet down static code checkers.
303 */
304 if (!s->bitmap_file)
305 return 1;
306
307 if (strcmp(s->bitmap_file, "clustered") == 0)
308 major = BITMAP_MAJOR_CLUSTERED;
309
310 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
311 if (errno == ENOMEM)
312 pr_err("Memory allocation failure.\n");
313 else
314 pr_err("bitmaps not supported by this kernel.\n");
315 return 1;
316 }
317 if (bmf.pathname[0]) {
318 if (strcmp(s->bitmap_file,"none")==0) {
319 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
320 pr_err("failed to remove bitmap %s\n",
321 bmf.pathname);
322 return 1;
323 }
324 return 0;
325 }
326 pr_err("%s already has a bitmap (%s)\n",
327 devname, bmf.pathname);
328 return 1;
329 }
330 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
331 pr_err("cannot get array status for %s\n", devname);
332 return 1;
333 }
334 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
335 if (strcmp(s->bitmap_file, "none")==0) {
336 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
337 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
338 if (array.state & (1<<MD_SB_CLUSTERED))
339 pr_err("failed to remove clustered bitmap.\n");
340 else
341 pr_err("failed to remove internal bitmap.\n");
342 return 1;
343 }
344 return 0;
345 }
346 pr_err("bitmap already present on %s\n", devname);
347 return 1;
348 }
349
350 if (strcmp(s->bitmap_file, "none") == 0) {
351 pr_err("no bitmap found on %s\n", devname);
352 return 1;
353 }
354 if (array.level <= 0) {
355 pr_err("Bitmaps not meaningful with level %s\n",
356 map_num(pers, array.level)?:"of this array");
357 return 1;
358 }
359 bitmapsize = array.size;
360 bitmapsize <<= 1;
361 if (get_dev_size(fd, NULL, &array_size) &&
362 array_size > (0x7fffffffULL<<9)) {
363 /* Array is big enough that we cannot trust array.size
364 * try other approaches
365 */
366 bitmapsize = get_component_size(fd);
367 }
368 if (bitmapsize == 0) {
369 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
370 return 1;
371 }
372
373 if (array.level == 10) {
374 int ncopies = (array.layout&255)*((array.layout>>8)&255);
375 bitmapsize = bitmapsize * array.raid_disks / ncopies;
376 }
377
378 st = super_by_fd(fd, &subarray);
379 if (!st) {
380 pr_err("Cannot understand version %d.%d\n",
381 array.major_version, array.minor_version);
382 return 1;
383 }
384 if (subarray) {
385 pr_err("Cannot add bitmaps to sub-arrays yet\n");
386 free(subarray);
387 free(st);
388 return 1;
389 }
390 if (strcmp(s->bitmap_file, "internal") == 0 ||
391 strcmp(s->bitmap_file, "clustered") == 0) {
392 int rv;
393 int d;
394 int offset_setable = 0;
395 struct mdinfo *mdi;
396 if (st->ss->add_internal_bitmap == NULL) {
397 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
398 return 1;
399 }
400 st->nodes = c->nodes;
401 st->cluster_name = c->homecluster;
402 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
403 if (mdi)
404 offset_setable = 1;
405 for (d=0; d< st->max_devs; d++) {
406 mdu_disk_info_t disk;
407 char *dv;
408 disk.number = d;
409 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
410 continue;
411 if (disk.major == 0 &&
412 disk.minor == 0)
413 continue;
414 if ((disk.state & (1<<MD_DISK_SYNC))==0)
415 continue;
416 dv = map_dev(disk.major, disk.minor, 1);
417 if (dv) {
418 int fd2 = dev_open(dv, O_RDWR);
419 if (fd2 < 0)
420 continue;
421 if (st->ss->load_super(st, fd2, NULL)==0) {
422 if (st->ss->add_internal_bitmap(
423 st,
424 &s->bitmap_chunk, c->delay, s->write_behind,
425 bitmapsize, offset_setable,
426 major)
427 )
428 st->ss->write_bitmap(st, fd2, NoUpdate);
429 else {
430 pr_err("failed to create internal bitmap - chunksize problem.\n");
431 close(fd2);
432 return 1;
433 }
434 }
435 close(fd2);
436 }
437 }
438 if (offset_setable) {
439 st->ss->getinfo_super(st, mdi, NULL);
440 sysfs_init(mdi, fd, NULL);
441 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
442 mdi->bitmap_offset);
443 } else {
444 if (strcmp(s->bitmap_file, "clustered") == 0)
445 array.state |= (1<<MD_SB_CLUSTERED);
446 array.state |= (1<<MD_SB_BITMAP_PRESENT);
447 rv = ioctl(fd, SET_ARRAY_INFO, &array);
448 }
449 if (rv < 0) {
450 if (errno == EBUSY)
451 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
452 pr_err("failed to set internal bitmap.\n");
453 return 1;
454 }
455 } else {
456 int uuid[4];
457 int bitmap_fd;
458 int d;
459 int max_devs = st->max_devs;
460
461 /* try to load a superblock */
462 for (d = 0; d < max_devs; d++) {
463 mdu_disk_info_t disk;
464 char *dv;
465 int fd2;
466 disk.number = d;
467 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
468 continue;
469 if ((disk.major==0 && disk.minor==0) ||
470 (disk.state & (1<<MD_DISK_REMOVED)))
471 continue;
472 dv = map_dev(disk.major, disk.minor, 1);
473 if (!dv)
474 continue;
475 fd2 = dev_open(dv, O_RDONLY);
476 if (fd2 >= 0) {
477 if (st->ss->load_super(st, fd2, NULL) == 0) {
478 close(fd2);
479 st->ss->uuid_from_super(st, uuid);
480 break;
481 }
482 close(fd2);
483 }
484 }
485 if (d == max_devs) {
486 pr_err("cannot find UUID for array!\n");
487 return 1;
488 }
489 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
490 c->delay, s->write_behind, bitmapsize, major)) {
491 return 1;
492 }
493 bitmap_fd = open(s->bitmap_file, O_RDWR);
494 if (bitmap_fd < 0) {
495 pr_err("weird: %s cannot be opened\n",
496 s->bitmap_file);
497 return 1;
498 }
499 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
500 int err = errno;
501 if (errno == EBUSY)
502 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
503 pr_err("Cannot set bitmap file for %s: %s\n",
504 devname, strerror(err));
505 return 1;
506 }
507 }
508
509 return 0;
510 }
511
512 /*
513 * When reshaping an array we might need to backup some data.
514 * This is written to all spares with a 'super_block' describing it.
515 * The superblock goes 4K from the end of the used space on the
516 * device.
517 * It if written after the backup is complete.
518 * It has the following structure.
519 */
520
521 static struct mdp_backup_super {
522 char magic[16]; /* md_backup_data-1 or -2 */
523 __u8 set_uuid[16];
524 __u64 mtime;
525 /* start/sizes in 512byte sectors */
526 __u64 devstart; /* address on backup device/file of data */
527 __u64 arraystart;
528 __u64 length;
529 __u32 sb_csum; /* csum of preceeding bytes. */
530 __u32 pad1;
531 __u64 devstart2; /* offset in to data of second section */
532 __u64 arraystart2;
533 __u64 length2;
534 __u32 sb_csum2; /* csum of preceeding bytes. */
535 __u8 pad[512-68-32];
536 } __attribute__((aligned(512))) bsb, bsb2;
537
538 static __u32 bsb_csum(char *buf, int len)
539 {
540 int i;
541 int csum = 0;
542 for (i = 0; i < len; i++)
543 csum = (csum<<3) + buf[0];
544 return __cpu_to_le32(csum);
545 }
546
547 static int check_idle(struct supertype *st)
548 {
549 /* Check that all member arrays for this container, or the
550 * container of this array, are idle
551 */
552 char *container = (st->container_devnm[0]
553 ? st->container_devnm : st->devnm);
554 struct mdstat_ent *ent, *e;
555 int is_idle = 1;
556
557 ent = mdstat_read(0, 0);
558 for (e = ent ; e; e = e->next) {
559 if (!is_container_member(e, container))
560 continue;
561 if (e->percent >= 0) {
562 is_idle = 0;
563 break;
564 }
565 }
566 free_mdstat(ent);
567 return is_idle;
568 }
569
570 static int freeze_container(struct supertype *st)
571 {
572 char *container = (st->container_devnm[0]
573 ? st->container_devnm : st->devnm);
574
575 if (!check_idle(st))
576 return -1;
577
578 if (block_monitor(container, 1)) {
579 pr_err("failed to freeze container\n");
580 return -2;
581 }
582
583 return 1;
584 }
585
586 static void unfreeze_container(struct supertype *st)
587 {
588 char *container = (st->container_devnm[0]
589 ? st->container_devnm : st->devnm);
590
591 unblock_monitor(container, 1);
592 }
593
594 static int freeze(struct supertype *st)
595 {
596 /* Try to freeze resync/rebuild on this array/container.
597 * Return -1 if the array is busy,
598 * return -2 container cannot be frozen,
599 * return 0 if this kernel doesn't support 'frozen'
600 * return 1 if it worked.
601 */
602 if (st->ss->external)
603 return freeze_container(st);
604 else {
605 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
606 int err;
607 char buf[20];
608
609 if (!sra)
610 return -1;
611 /* Need to clear any 'read-auto' status */
612 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
613 strncmp(buf, "read-auto", 9) == 0)
614 sysfs_set_str(sra, NULL, "array_state", "clean");
615
616 err = sysfs_freeze_array(sra);
617 sysfs_free(sra);
618 return err;
619 }
620 }
621
622 static void unfreeze(struct supertype *st)
623 {
624 if (st->ss->external)
625 return unfreeze_container(st);
626 else {
627 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
628 char buf[20];
629
630 if (sra &&
631 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
632 && strcmp(buf, "frozen\n") == 0)
633 sysfs_set_str(sra, NULL, "sync_action", "idle");
634 sysfs_free(sra);
635 }
636 }
637
638 static void wait_reshape(struct mdinfo *sra)
639 {
640 int fd = sysfs_get_fd(sra, NULL, "sync_action");
641 char action[20];
642
643 if (fd < 0)
644 return;
645
646 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
647 strncmp(action, "reshape", 7) == 0)
648 sysfs_wait(fd, NULL);
649 close(fd);
650 }
651
652 static int reshape_super(struct supertype *st, unsigned long long size,
653 int level, int layout, int chunksize, int raid_disks,
654 int delta_disks, char *backup_file, char *dev,
655 int direction, int verbose)
656 {
657 /* nothing extra to check in the native case */
658 if (!st->ss->external)
659 return 0;
660 if (!st->ss->reshape_super ||
661 !st->ss->manage_reshape) {
662 pr_err("%s metadata does not support reshape\n",
663 st->ss->name);
664 return 1;
665 }
666
667 return st->ss->reshape_super(st, size, level, layout, chunksize,
668 raid_disks, delta_disks, backup_file, dev,
669 direction, verbose);
670 }
671
672 static void sync_metadata(struct supertype *st)
673 {
674 if (st->ss->external) {
675 if (st->update_tail) {
676 flush_metadata_updates(st);
677 st->update_tail = &st->updates;
678 } else
679 st->ss->sync_metadata(st);
680 }
681 }
682
683 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
684 {
685 /* when dealing with external metadata subarrays we need to be
686 * prepared to handle EAGAIN. The kernel may need to wait for
687 * mdmon to mark the array active so the kernel can handle
688 * allocations/writeback when preparing the reshape action
689 * (md_allow_write()). We temporarily disable safe_mode_delay
690 * to close a race with the array_state going clean before the
691 * next write to raid_disks / stripe_cache_size
692 */
693 char safe[50];
694 int rc;
695
696 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
697 if (!container ||
698 (strcmp(name, "raid_disks") != 0 &&
699 strcmp(name, "stripe_cache_size") != 0))
700 return sysfs_set_num(sra, NULL, name, n);
701
702 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
703 if (rc <= 0)
704 return -1;
705 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
706 rc = sysfs_set_num(sra, NULL, name, n);
707 if (rc < 0 && errno == EAGAIN) {
708 ping_monitor(container);
709 /* if we get EAGAIN here then the monitor is not active
710 * so stop trying
711 */
712 rc = sysfs_set_num(sra, NULL, name, n);
713 }
714 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
715 return rc;
716 }
717
718 int start_reshape(struct mdinfo *sra, int already_running,
719 int before_data_disks, int data_disks)
720 {
721 int err;
722 unsigned long long sync_max_to_set;
723
724 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
725 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
726 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
727 sra->reshape_progress);
728 if (before_data_disks <= data_disks)
729 sync_max_to_set = sra->reshape_progress / data_disks;
730 else
731 sync_max_to_set = (sra->component_size * data_disks
732 - sra->reshape_progress) / data_disks;
733 if (!already_running)
734 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
735 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
736 if (!already_running && err == 0) {
737 int cnt = 5;
738 do {
739 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
740 if (err)
741 sleep(1);
742 } while (err && errno == EBUSY && cnt-- > 0);
743 }
744 return err;
745 }
746
747 void abort_reshape(struct mdinfo *sra)
748 {
749 sysfs_set_str(sra, NULL, "sync_action", "idle");
750 /*
751 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
752 * suspend_hi to decrease as well as increase.")
753 * you could only increase suspend_{lo,hi} unless the region they
754 * covered was empty. So to reset to 0, you need to push suspend_lo
755 * up past suspend_hi first. So to maximize the chance of mdadm
756 * working on all kernels, we want to keep doing that.
757 */
758 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
759 sysfs_set_num(sra, NULL, "suspend_hi", 0);
760 sysfs_set_num(sra, NULL, "suspend_lo", 0);
761 sysfs_set_num(sra, NULL, "sync_min", 0);
762 // It isn't safe to reset sync_max as we aren't monitoring.
763 // Array really should be stopped at this point.
764 }
765
766 int remove_disks_for_takeover(struct supertype *st,
767 struct mdinfo *sra,
768 int layout)
769 {
770 int nr_of_copies;
771 struct mdinfo *remaining;
772 int slot;
773
774 if (sra->array.level == 10)
775 nr_of_copies = layout & 0xff;
776 else if (sra->array.level == 1)
777 nr_of_copies = sra->array.raid_disks;
778 else
779 return 1;
780
781 remaining = sra->devs;
782 sra->devs = NULL;
783 /* for each 'copy', select one device and remove from the list. */
784 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
785 struct mdinfo **diskp;
786 int found = 0;
787
788 /* Find a working device to keep */
789 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
790 struct mdinfo *disk = *diskp;
791
792 if (disk->disk.raid_disk < slot)
793 continue;
794 if (disk->disk.raid_disk >= slot + nr_of_copies)
795 continue;
796 if (disk->disk.state & (1<<MD_DISK_REMOVED))
797 continue;
798 if (disk->disk.state & (1<<MD_DISK_FAULTY))
799 continue;
800 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
801 continue;
802
803 /* We have found a good disk to use! */
804 *diskp = disk->next;
805 disk->next = sra->devs;
806 sra->devs = disk;
807 found = 1;
808 break;
809 }
810 if (!found)
811 break;
812 }
813
814 if (slot < sra->array.raid_disks) {
815 /* didn't find all slots */
816 struct mdinfo **e;
817 e = &remaining;
818 while (*e)
819 e = &(*e)->next;
820 *e = sra->devs;
821 sra->devs = remaining;
822 return 1;
823 }
824
825 /* Remove all 'remaining' devices from the array */
826 while (remaining) {
827 struct mdinfo *sd = remaining;
828 remaining = sd->next;
829
830 sysfs_set_str(sra, sd, "state", "faulty");
831 sysfs_set_str(sra, sd, "slot", "none");
832 /* for external metadata disks should be removed in mdmon */
833 if (!st->ss->external)
834 sysfs_set_str(sra, sd, "state", "remove");
835 sd->disk.state |= (1<<MD_DISK_REMOVED);
836 sd->disk.state &= ~(1<<MD_DISK_SYNC);
837 sd->next = sra->devs;
838 sra->devs = sd;
839 }
840 return 0;
841 }
842
843 void reshape_free_fdlist(int *fdlist,
844 unsigned long long *offsets,
845 int size)
846 {
847 int i;
848
849 for (i = 0; i < size; i++)
850 if (fdlist[i] >= 0)
851 close(fdlist[i]);
852
853 free(fdlist);
854 free(offsets);
855 }
856
857 int reshape_prepare_fdlist(char *devname,
858 struct mdinfo *sra,
859 int raid_disks,
860 int nrdisks,
861 unsigned long blocks,
862 char *backup_file,
863 int *fdlist,
864 unsigned long long *offsets)
865 {
866 int d = 0;
867 struct mdinfo *sd;
868
869 enable_fds(nrdisks);
870 for (d = 0; d <= nrdisks; d++)
871 fdlist[d] = -1;
872 d = raid_disks;
873 for (sd = sra->devs; sd; sd = sd->next) {
874 if (sd->disk.state & (1<<MD_DISK_FAULTY))
875 continue;
876 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
877 sd->disk.raid_disk < raid_disks) {
878 char *dn = map_dev(sd->disk.major,
879 sd->disk.minor, 1);
880 fdlist[sd->disk.raid_disk]
881 = dev_open(dn, O_RDONLY);
882 offsets[sd->disk.raid_disk] = sd->data_offset*512;
883 if (fdlist[sd->disk.raid_disk] < 0) {
884 pr_err("%s: cannot open component %s\n",
885 devname, dn ? dn : "-unknown-");
886 d = -1;
887 goto release;
888 }
889 } else if (backup_file == NULL) {
890 /* spare */
891 char *dn = map_dev(sd->disk.major,
892 sd->disk.minor, 1);
893 fdlist[d] = dev_open(dn, O_RDWR);
894 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
895 if (fdlist[d] < 0) {
896 pr_err("%s: cannot open component %s\n",
897 devname, dn ? dn : "-unknown-");
898 d = -1;
899 goto release;
900 }
901 d++;
902 }
903 }
904 release:
905 return d;
906 }
907
908 int reshape_open_backup_file(char *backup_file,
909 int fd,
910 char *devname,
911 long blocks,
912 int *fdlist,
913 unsigned long long *offsets,
914 char *sys_name,
915 int restart)
916 {
917 /* Return 1 on success, 0 on any form of failure */
918 /* need to check backup file is large enough */
919 char buf[512];
920 struct stat stb;
921 unsigned int dev;
922 int i;
923
924 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
925 S_IRUSR | S_IWUSR);
926 *offsets = 8 * 512;
927 if (*fdlist < 0) {
928 pr_err("%s: cannot create backup file %s: %s\n",
929 devname, backup_file, strerror(errno));
930 return 0;
931 }
932 /* Guard against backup file being on array device.
933 * If array is partitioned or if LVM etc is in the
934 * way this will not notice, but it is better than
935 * nothing.
936 */
937 fstat(*fdlist, &stb);
938 dev = stb.st_dev;
939 fstat(fd, &stb);
940 if (stb.st_rdev == dev) {
941 pr_err("backup file must NOT be on the array being reshaped.\n");
942 close(*fdlist);
943 return 0;
944 }
945
946 memset(buf, 0, 512);
947 for (i=0; i < blocks + 8 ; i++) {
948 if (write(*fdlist, buf, 512) != 512) {
949 pr_err("%s: cannot create backup file %s: %s\n",
950 devname, backup_file, strerror(errno));
951 return 0;
952 }
953 }
954 if (fsync(*fdlist) != 0) {
955 pr_err("%s: cannot create backup file %s: %s\n",
956 devname, backup_file, strerror(errno));
957 return 0;
958 }
959
960 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
961 char *bu = make_backup(sys_name);
962 if (symlink(backup_file, bu))
963 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
964 strerror(errno));
965 free(bu);
966 }
967
968 return 1;
969 }
970
971 unsigned long compute_backup_blocks(int nchunk, int ochunk,
972 unsigned int ndata, unsigned int odata)
973 {
974 unsigned long a, b, blocks;
975 /* So how much do we need to backup.
976 * We need an amount of data which is both a whole number of
977 * old stripes and a whole number of new stripes.
978 * So LCM for (chunksize*datadisks).
979 */
980 a = (ochunk/512) * odata;
981 b = (nchunk/512) * ndata;
982 /* Find GCD */
983 a = GCD(a, b);
984 /* LCM == product / GCD */
985 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
986
987 return blocks;
988 }
989
990 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
991 {
992 /* Based on the current array state in info->array and
993 * the changes in info->new_* etc, determine:
994 * - whether the change is possible
995 * - Intermediate level/raid_disks/layout
996 * - whether a restriping reshape is needed
997 * - number of sectors in minimum change unit. This
998 * will cover a whole number of stripes in 'before' and
999 * 'after'.
1000 *
1001 * Return message if the change should be rejected
1002 * NULL if the change can be achieved
1003 *
1004 * This can be called as part of starting a reshape, or
1005 * when assembling an array that is undergoing reshape.
1006 */
1007 int near, far, offset, copies;
1008 int new_disks;
1009 int old_chunk, new_chunk;
1010 /* delta_parity records change in number of devices
1011 * caused by level change
1012 */
1013 int delta_parity = 0;
1014
1015 memset(re, 0, sizeof(*re));
1016
1017 /* If a new level not explicitly given, we assume no-change */
1018 if (info->new_level == UnSet)
1019 info->new_level = info->array.level;
1020
1021 if (info->new_chunk)
1022 switch (info->new_level) {
1023 case 0:
1024 case 4:
1025 case 5:
1026 case 6:
1027 case 10:
1028 /* chunk size is meaningful, must divide component_size
1029 * evenly
1030 */
1031 if (info->component_size % (info->new_chunk/512)) {
1032 unsigned long long shrink = info->component_size;
1033 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1034 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1035 info->new_chunk/1024, info->component_size/2);
1036 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1037 devname, shrink/2);
1038 pr_err("will shrink the array so the given chunk size would work.\n");
1039 return "";
1040 }
1041 break;
1042 default:
1043 return "chunk size not meaningful for this level";
1044 }
1045 else
1046 info->new_chunk = info->array.chunk_size;
1047
1048 switch (info->array.level) {
1049 default:
1050 return "No reshape is possibly for this RAID level";
1051 case LEVEL_LINEAR:
1052 if (info->delta_disks != UnSet)
1053 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1054 else
1055 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1056 case 1:
1057 /* RAID1 can convert to RAID1 with different disks, or
1058 * raid5 with 2 disks, or
1059 * raid0 with 1 disk
1060 */
1061 if (info->new_level > 1 &&
1062 (info->component_size & 7))
1063 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1064 if (info->new_level == 0) {
1065 if (info->delta_disks != UnSet &&
1066 info->delta_disks != 0)
1067 return "Cannot change number of disks with RAID1->RAID0 conversion";
1068 re->level = 0;
1069 re->before.data_disks = 1;
1070 re->after.data_disks = 1;
1071 return NULL;
1072 }
1073 if (info->new_level == 1) {
1074 if (info->delta_disks == UnSet)
1075 /* Don't know what to do */
1076 return "no change requested for Growing RAID1";
1077 re->level = 1;
1078 return NULL;
1079 }
1080 if (info->array.raid_disks != 2 &&
1081 info->new_level == 5)
1082 return "Can only convert a 2-device array to RAID5";
1083 if (info->array.raid_disks == 2 &&
1084 info->new_level == 5) {
1085
1086 re->level = 5;
1087 re->before.data_disks = 1;
1088 if (info->delta_disks != UnSet &&
1089 info->delta_disks != 0)
1090 re->after.data_disks = 1 + info->delta_disks;
1091 else
1092 re->after.data_disks = 1;
1093 if (re->after.data_disks < 1)
1094 return "Number of disks too small for RAID5";
1095
1096 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1097 info->array.chunk_size = 65536;
1098 break;
1099 }
1100 /* Could do some multi-stage conversions, but leave that to
1101 * later.
1102 */
1103 return "Impossibly level change request for RAID1";
1104
1105 case 10:
1106 /* RAID10 can be converted from near mode to
1107 * RAID0 by removing some devices.
1108 * It can also be reshaped if the kernel supports
1109 * new_data_offset.
1110 */
1111 switch (info->new_level) {
1112 case 0:
1113 if ((info->array.layout & ~0xff) != 0x100)
1114 return "Cannot Grow RAID10 with far/offset layout";
1115 /* number of devices must be multiple of number of copies */
1116 if (info->array.raid_disks % (info->array.layout & 0xff))
1117 return "RAID10 layout too complex for Grow operation";
1118
1119 new_disks = (info->array.raid_disks
1120 / (info->array.layout & 0xff));
1121 if (info->delta_disks == UnSet)
1122 info->delta_disks = (new_disks
1123 - info->array.raid_disks);
1124
1125 if (info->delta_disks != new_disks - info->array.raid_disks)
1126 return "New number of raid-devices impossible for RAID10";
1127 if (info->new_chunk &&
1128 info->new_chunk != info->array.chunk_size)
1129 return "Cannot change chunk-size with RAID10 Grow";
1130
1131 /* looks good */
1132 re->level = 0;
1133 re->before.data_disks = new_disks;
1134 re->after.data_disks = re->before.data_disks;
1135 return NULL;
1136
1137 case 10:
1138 near = info->array.layout & 0xff;
1139 far = (info->array.layout >> 8) & 0xff;
1140 offset = info->array.layout & 0x10000;
1141 if (far > 1 && !offset)
1142 return "Cannot reshape RAID10 in far-mode";
1143 copies = near * far;
1144
1145 old_chunk = info->array.chunk_size * far;
1146
1147 if (info->new_layout == UnSet)
1148 info->new_layout = info->array.layout;
1149 else {
1150 near = info->new_layout & 0xff;
1151 far = (info->new_layout >> 8) & 0xff;
1152 offset = info->new_layout & 0x10000;
1153 if (far > 1 && !offset)
1154 return "Cannot reshape RAID10 to far-mode";
1155 if (near * far != copies)
1156 return "Cannot change number of copies when reshaping RAID10";
1157 }
1158 if (info->delta_disks == UnSet)
1159 info->delta_disks = 0;
1160 new_disks = (info->array.raid_disks +
1161 info->delta_disks);
1162
1163 new_chunk = info->new_chunk * far;
1164
1165 re->level = 10;
1166 re->before.layout = info->array.layout;
1167 re->before.data_disks = info->array.raid_disks;
1168 re->after.layout = info->new_layout;
1169 re->after.data_disks = new_disks;
1170 /* For RAID10 we don't do backup but do allow reshape,
1171 * so set backup_blocks to INVALID_SECTORS rather than
1172 * zero.
1173 * And there is no need to synchronise stripes on both
1174 * 'old' and 'new'. So the important
1175 * number is the minimum data_offset difference
1176 * which is the larger of (offset copies * chunk).
1177 */
1178 re->backup_blocks = INVALID_SECTORS;
1179 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1180 if (new_disks < re->before.data_disks &&
1181 info->space_after < re->min_offset_change)
1182 /* Reduce component size by one chunk */
1183 re->new_size = (info->component_size -
1184 re->min_offset_change);
1185 else
1186 re->new_size = info->component_size;
1187 re->new_size = re->new_size * new_disks / copies;
1188 return NULL;
1189
1190 default:
1191 return "RAID10 can only be changed to RAID0";
1192 }
1193 case 0:
1194 /* RAID0 can be converted to RAID10, or to RAID456 */
1195 if (info->new_level == 10) {
1196 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1197 /* Assume near=2 layout */
1198 info->new_layout = 0x102;
1199 info->delta_disks = info->array.raid_disks;
1200 }
1201 if (info->new_layout == UnSet) {
1202 int copies = 1 + (info->delta_disks
1203 / info->array.raid_disks);
1204 if (info->array.raid_disks * (copies-1)
1205 != info->delta_disks)
1206 return "Impossible number of devices for RAID0->RAID10";
1207 info->new_layout = 0x100 + copies;
1208 }
1209 if (info->delta_disks == UnSet) {
1210 int copies = info->new_layout & 0xff;
1211 if (info->new_layout != 0x100 + copies)
1212 return "New layout impossible for RAID0->RAID10";;
1213 info->delta_disks = (copies - 1) *
1214 info->array.raid_disks;
1215 }
1216 if (info->new_chunk &&
1217 info->new_chunk != info->array.chunk_size)
1218 return "Cannot change chunk-size with RAID0->RAID10";
1219 /* looks good */
1220 re->level = 10;
1221 re->before.data_disks = (info->array.raid_disks +
1222 info->delta_disks);
1223 re->after.data_disks = re->before.data_disks;
1224 re->before.layout = info->new_layout;
1225 return NULL;
1226 }
1227
1228 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1229 * a raid4 style layout of the final level.
1230 */
1231 switch (info->new_level) {
1232 case 4:
1233 delta_parity = 1;
1234 case 0:
1235 re->level = 4;
1236 re->before.layout = 0;
1237 break;
1238 case 5:
1239 delta_parity = 1;
1240 re->level = 5;
1241 re->before.layout = ALGORITHM_PARITY_N;
1242 if (info->new_layout == UnSet)
1243 info->new_layout = map_name(r5layout, "default");
1244 break;
1245 case 6:
1246 delta_parity = 2;
1247 re->level = 6;
1248 re->before.layout = ALGORITHM_PARITY_N;
1249 if (info->new_layout == UnSet)
1250 info->new_layout = map_name(r6layout, "default");
1251 break;
1252 default:
1253 return "Impossible level change requested";
1254 }
1255 re->before.data_disks = info->array.raid_disks;
1256 /* determining 'after' layout happens outside this 'switch' */
1257 break;
1258
1259 case 4:
1260 info->array.layout = ALGORITHM_PARITY_N;
1261 case 5:
1262 switch (info->new_level) {
1263 case 0:
1264 delta_parity = -1;
1265 case 4:
1266 re->level = info->array.level;
1267 re->before.data_disks = info->array.raid_disks - 1;
1268 re->before.layout = info->array.layout;
1269 break;
1270 case 5:
1271 re->level = 5;
1272 re->before.data_disks = info->array.raid_disks - 1;
1273 re->before.layout = info->array.layout;
1274 break;
1275 case 6:
1276 delta_parity = 1;
1277 re->level = 6;
1278 re->before.data_disks = info->array.raid_disks - 1;
1279 switch (info->array.layout) {
1280 case ALGORITHM_LEFT_ASYMMETRIC:
1281 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1282 break;
1283 case ALGORITHM_RIGHT_ASYMMETRIC:
1284 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1285 break;
1286 case ALGORITHM_LEFT_SYMMETRIC:
1287 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1288 break;
1289 case ALGORITHM_RIGHT_SYMMETRIC:
1290 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1291 break;
1292 case ALGORITHM_PARITY_0:
1293 re->before.layout = ALGORITHM_PARITY_0_6;
1294 break;
1295 case ALGORITHM_PARITY_N:
1296 re->before.layout = ALGORITHM_PARITY_N_6;
1297 break;
1298 default:
1299 return "Cannot convert an array with this layout";
1300 }
1301 break;
1302 case 1:
1303 if (info->array.raid_disks != 2)
1304 return "Can only convert a 2-device array to RAID1";
1305 if (info->delta_disks != UnSet &&
1306 info->delta_disks != 0)
1307 return "Cannot set raid_disk when converting RAID5->RAID1";
1308 re->level = 1;
1309 info->new_chunk = 0;
1310 return NULL;
1311 default:
1312 return "Impossible level change requested";
1313 }
1314 break;
1315 case 6:
1316 switch (info->new_level) {
1317 case 4:
1318 case 5:
1319 delta_parity = -1;
1320 case 6:
1321 re->level = 6;
1322 re->before.data_disks = info->array.raid_disks - 2;
1323 re->before.layout = info->array.layout;
1324 break;
1325 default:
1326 return "Impossible level change requested";
1327 }
1328 break;
1329 }
1330
1331 /* If we reached here then it looks like a re-stripe is
1332 * happening. We have determined the intermediate level
1333 * and initial raid_disks/layout and stored these in 're'.
1334 *
1335 * We need to deduce the final layout that can be atomically
1336 * converted to the end state.
1337 */
1338 switch (info->new_level) {
1339 case 0:
1340 /* We can only get to RAID0 from RAID4 or RAID5
1341 * with appropriate layout and one extra device
1342 */
1343 if (re->level != 4 && re->level != 5)
1344 return "Cannot covert to RAID0 from this level";
1345
1346 switch (re->level) {
1347 case 4:
1348 re->before.layout = 0;
1349 re->after.layout = 0;
1350 break;
1351 case 5:
1352 re->after.layout = ALGORITHM_PARITY_N;
1353 break;
1354 }
1355 break;
1356
1357 case 4:
1358 /* We can only get to RAID4 from RAID5 */
1359 if (re->level != 4 && re->level != 5)
1360 return "Cannot convert to RAID4 from this level";
1361
1362 switch (re->level) {
1363 case 4:
1364 re->after.layout = 0;
1365 break;
1366 case 5:
1367 re->after.layout = ALGORITHM_PARITY_N;
1368 break;
1369 }
1370 break;
1371
1372 case 5:
1373 /* We get to RAID5 from RAID5 or RAID6 */
1374 if (re->level != 5 && re->level != 6)
1375 return "Cannot convert to RAID5 from this level";
1376
1377 switch (re->level) {
1378 case 5:
1379 if (info->new_layout == UnSet)
1380 re->after.layout = re->before.layout;
1381 else
1382 re->after.layout = info->new_layout;
1383 break;
1384 case 6:
1385 if (info->new_layout == UnSet)
1386 info->new_layout = re->before.layout;
1387
1388 /* after.layout needs to be raid6 version of new_layout */
1389 if (info->new_layout == ALGORITHM_PARITY_N)
1390 re->after.layout = ALGORITHM_PARITY_N;
1391 else {
1392 char layout[40];
1393 char *ls = map_num(r5layout, info->new_layout);
1394 int l;
1395 if (ls) {
1396 /* Current RAID6 layout has a RAID5
1397 * equivalent - good
1398 */
1399 strcat(strcpy(layout, ls), "-6");
1400 l = map_name(r6layout, layout);
1401 if (l == UnSet)
1402 return "Cannot find RAID6 layout to convert to";
1403 } else {
1404 /* Current RAID6 has no equivalent.
1405 * If it is already a '-6' layout we
1406 * can leave it unchanged, else we must
1407 * fail
1408 */
1409 ls = map_num(r6layout, info->new_layout);
1410 if (!ls ||
1411 strcmp(ls+strlen(ls)-2, "-6") != 0)
1412 return "Please specify new layout";
1413 l = info->new_layout;
1414 }
1415 re->after.layout = l;
1416 }
1417 }
1418 break;
1419
1420 case 6:
1421 /* We must already be at level 6 */
1422 if (re->level != 6)
1423 return "Impossible level change";
1424 if (info->new_layout == UnSet)
1425 re->after.layout = info->array.layout;
1426 else
1427 re->after.layout = info->new_layout;
1428 break;
1429 default:
1430 return "Impossible level change requested";
1431 }
1432 if (info->delta_disks == UnSet)
1433 info->delta_disks = delta_parity;
1434
1435 re->after.data_disks = (re->before.data_disks
1436 + info->delta_disks
1437 - delta_parity);
1438 switch (re->level) {
1439 case 6: re->parity = 2;
1440 break;
1441 case 4:
1442 case 5: re->parity = 1;
1443 break;
1444 default: re->parity = 0;
1445 break;
1446 }
1447 /* So we have a restripe operation, we need to calculate the number
1448 * of blocks per reshape operation.
1449 */
1450 re->new_size = info->component_size * re->before.data_disks;
1451 if (info->new_chunk == 0)
1452 info->new_chunk = info->array.chunk_size;
1453 if (re->after.data_disks == re->before.data_disks &&
1454 re->after.layout == re->before.layout &&
1455 info->new_chunk == info->array.chunk_size) {
1456 /* Nothing to change, can change level immediately. */
1457 re->level = info->new_level;
1458 re->backup_blocks = 0;
1459 return NULL;
1460 }
1461 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1462 /* chunk and layout changes make no difference */
1463 re->level = info->new_level;
1464 re->backup_blocks = 0;
1465 return NULL;
1466 }
1467
1468 if (re->after.data_disks == re->before.data_disks &&
1469 get_linux_version() < 2006032)
1470 return "in-place reshape is not safe before 2.6.32 - sorry.";
1471
1472 if (re->after.data_disks < re->before.data_disks &&
1473 get_linux_version() < 2006030)
1474 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1475
1476 re->backup_blocks = compute_backup_blocks(
1477 info->new_chunk, info->array.chunk_size,
1478 re->after.data_disks,
1479 re->before.data_disks);
1480 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1481
1482 re->new_size = info->component_size * re->after.data_disks;
1483 return NULL;
1484 }
1485
1486 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1487 char *text_version)
1488 {
1489 struct mdinfo *info;
1490 char *subarray;
1491 int ret_val = -1;
1492
1493 if ((st == NULL) || (sra == NULL))
1494 return ret_val;
1495
1496 if (text_version == NULL)
1497 text_version = sra->text_version;
1498 subarray = strchr(text_version+1, '/')+1;
1499 info = st->ss->container_content(st, subarray);
1500 if (info) {
1501 unsigned long long current_size = 0;
1502 unsigned long long new_size =
1503 info->custom_array_size/2;
1504
1505 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1506 new_size > current_size) {
1507 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1508 < 0)
1509 dprintf("Error: Cannot set array size");
1510 else {
1511 ret_val = 0;
1512 dprintf("Array size changed");
1513 }
1514 dprintf_cont(" from %llu to %llu.\n",
1515 current_size, new_size);
1516 }
1517 sysfs_free(info);
1518 } else
1519 dprintf("Error: set_array_size(): info pointer in NULL\n");
1520
1521 return ret_val;
1522 }
1523
1524 static int reshape_array(char *container, int fd, char *devname,
1525 struct supertype *st, struct mdinfo *info,
1526 int force, struct mddev_dev *devlist,
1527 unsigned long long data_offset,
1528 char *backup_file, int verbose, int forked,
1529 int restart, int freeze_reshape);
1530 static int reshape_container(char *container, char *devname,
1531 int mdfd,
1532 struct supertype *st,
1533 struct mdinfo *info,
1534 int force,
1535 char *backup_file, int verbose,
1536 int forked, int restart, int freeze_reshape);
1537
1538 int Grow_reshape(char *devname, int fd,
1539 struct mddev_dev *devlist,
1540 unsigned long long data_offset,
1541 struct context *c, struct shape *s)
1542 {
1543 /* Make some changes in the shape of an array.
1544 * The kernel must support the change.
1545 *
1546 * There are three different changes. Each can trigger
1547 * a resync or recovery so we freeze that until we have
1548 * requested everything (if kernel supports freezing - 2.6.30).
1549 * The steps are:
1550 * - change size (i.e. component_size)
1551 * - change level
1552 * - change layout/chunksize/ndisks
1553 *
1554 * The last can require a reshape. It is different on different
1555 * levels so we need to check the level before actioning it.
1556 * Some times the level change needs to be requested after the
1557 * reshape (e.g. raid6->raid5, raid5->raid0)
1558 *
1559 */
1560 struct mdu_array_info_s array;
1561 int rv = 0;
1562 struct supertype *st;
1563 char *subarray = NULL;
1564
1565 int frozen;
1566 int changed = 0;
1567 char *container = NULL;
1568 int cfd = -1;
1569
1570 struct mddev_dev *dv;
1571 int added_disks;
1572
1573 struct mdinfo info;
1574 struct mdinfo *sra;
1575
1576 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1577 pr_err("%s is not an active md array - aborting\n",
1578 devname);
1579 return 1;
1580 }
1581 if (data_offset != INVALID_SECTORS && array.level != 10
1582 && (array.level < 4 || array.level > 6)) {
1583 pr_err("--grow --data-offset not yet supported\n");
1584 return 1;
1585 }
1586
1587 if (s->size > 0 &&
1588 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1589 pr_err("cannot change component size at the same time as other changes.\n"
1590 " Change size first, then check data is intact before making other changes.\n");
1591 return 1;
1592 }
1593
1594 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1595 get_linux_version() < 2006032 &&
1596 !check_env("MDADM_FORCE_FEWER")) {
1597 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1598 " Please use a newer kernel\n");
1599 return 1;
1600 }
1601
1602 st = super_by_fd(fd, &subarray);
1603 if (!st) {
1604 pr_err("Unable to determine metadata format for %s\n", devname);
1605 return 1;
1606 }
1607 if (s->raiddisks > st->max_devs) {
1608 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1609 return 1;
1610 }
1611 if (s->level == 0 &&
1612 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1613 !(array.state & (1<<MD_SB_CLUSTERED))) {
1614 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1615 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
1616 pr_err("failed to remove internal bitmap.\n");
1617 return 1;
1618 }
1619 }
1620
1621 /* in the external case we need to check that the requested reshape is
1622 * supported, and perform an initial check that the container holds the
1623 * pre-requisite spare devices (mdmon owns final validation)
1624 */
1625 if (st->ss->external) {
1626 int rv;
1627
1628 if (subarray) {
1629 container = st->container_devnm;
1630 cfd = open_dev_excl(st->container_devnm);
1631 } else {
1632 container = st->devnm;
1633 close(fd);
1634 cfd = open_dev_excl(st->devnm);
1635 fd = cfd;
1636 }
1637 if (cfd < 0) {
1638 pr_err("Unable to open container for %s\n",
1639 devname);
1640 free(subarray);
1641 return 1;
1642 }
1643
1644 rv = st->ss->load_container(st, cfd, NULL);
1645
1646 if (rv) {
1647 pr_err("Cannot read superblock for %s\n",
1648 devname);
1649 free(subarray);
1650 return 1;
1651 }
1652
1653 /* check if operation is supported for metadata handler */
1654 if (st->ss->container_content) {
1655 struct mdinfo *cc = NULL;
1656 struct mdinfo *content = NULL;
1657
1658 cc = st->ss->container_content(st, subarray);
1659 for (content = cc; content ; content = content->next) {
1660 int allow_reshape = 1;
1661
1662 /* check if reshape is allowed based on metadata
1663 * indications stored in content.array.status
1664 */
1665 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1666 allow_reshape = 0;
1667 if (content->array.state
1668 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1669 allow_reshape = 0;
1670 if (!allow_reshape) {
1671 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1672 devname, container);
1673 sysfs_free(cc);
1674 free(subarray);
1675 return 1;
1676 }
1677 }
1678 sysfs_free(cc);
1679 }
1680 if (mdmon_running(container))
1681 st->update_tail = &st->updates;
1682 }
1683
1684 added_disks = 0;
1685 for (dv = devlist; dv; dv = dv->next)
1686 added_disks++;
1687 if (s->raiddisks > array.raid_disks &&
1688 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1689 !c->force) {
1690 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1691 " Use --force to over-ride this check.\n",
1692 s->raiddisks - array.raid_disks,
1693 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1694 array.spare_disks + added_disks);
1695 return 1;
1696 }
1697
1698 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1699 | GET_STATE | GET_VERSION);
1700 if (sra) {
1701 if (st->ss->external && subarray == NULL) {
1702 array.level = LEVEL_CONTAINER;
1703 sra->array.level = LEVEL_CONTAINER;
1704 }
1705 } else {
1706 pr_err("failed to read sysfs parameters for %s\n",
1707 devname);
1708 return 1;
1709 }
1710 frozen = freeze(st);
1711 if (frozen < -1) {
1712 /* freeze() already spewed the reason */
1713 sysfs_free(sra);
1714 return 1;
1715 } else if (frozen < 0) {
1716 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1717 sysfs_free(sra);
1718 return 1;
1719 }
1720
1721 /* ========= set size =============== */
1722 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1723 unsigned long long orig_size = get_component_size(fd)/2;
1724 unsigned long long min_csize;
1725 struct mdinfo *mdi;
1726 int raid0_takeover = 0;
1727
1728 if (orig_size == 0)
1729 orig_size = (unsigned) array.size;
1730
1731 if (orig_size == 0) {
1732 pr_err("Cannot set device size in this type of array.\n");
1733 rv = 1;
1734 goto release;
1735 }
1736
1737 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1738 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1739 rv = 1;
1740 goto release;
1741 }
1742 sync_metadata(st);
1743 if (st->ss->external) {
1744 /* metadata can have size limitation
1745 * update size value according to metadata information
1746 */
1747 struct mdinfo *sizeinfo =
1748 st->ss->container_content(st, subarray);
1749 if (sizeinfo) {
1750 unsigned long long new_size =
1751 sizeinfo->custom_array_size/2;
1752 int data_disks = get_data_disks(
1753 sizeinfo->array.level,
1754 sizeinfo->array.layout,
1755 sizeinfo->array.raid_disks);
1756 new_size /= data_disks;
1757 dprintf("Metadata size correction from %llu to %llu (%llu)\n", orig_size, new_size,
1758 new_size * data_disks);
1759 s->size = new_size;
1760 sysfs_free(sizeinfo);
1761 }
1762 }
1763
1764 /* Update the size of each member device in case
1765 * they have been resized. This will never reduce
1766 * below the current used-size. The "size" attribute
1767 * understands '0' to mean 'max'.
1768 */
1769 min_csize = 0;
1770 rv = 0;
1771 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1772 if (sysfs_set_num(sra, mdi, "size",
1773 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1774 /* Probably kernel refusing to let us
1775 * reduce the size - not an error.
1776 */
1777 break;
1778 }
1779 if (array.not_persistent == 0 &&
1780 array.major_version == 0 &&
1781 get_linux_version() < 3001000) {
1782 /* Dangerous to allow size to exceed 2TB */
1783 unsigned long long csize;
1784 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1785 if (csize >= 2ULL*1024*1024*1024)
1786 csize = 2ULL*1024*1024*1024;
1787 if ((min_csize == 0 || (min_csize
1788 > csize)))
1789 min_csize = csize;
1790 }
1791 }
1792 }
1793 if (rv) {
1794 pr_err("Cannot set size on array members.\n");
1795 goto size_change_error;
1796 }
1797 if (min_csize && s->size > min_csize) {
1798 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
1799 rv = 1;
1800 goto size_change_error;
1801 }
1802 if (min_csize && s->size == MAX_SIZE) {
1803 /* Don't let the kernel choose a size - it will get
1804 * it wrong
1805 */
1806 pr_err("Limited v0.90 array to 2TB per device\n");
1807 s->size = min_csize;
1808 }
1809 if (st->ss->external) {
1810 if (sra->array.level == 0) {
1811 rv = sysfs_set_str(sra, NULL, "level",
1812 "raid5");
1813 if (!rv) {
1814 raid0_takeover = 1;
1815 /* get array parameters after takeover
1816 * to change one parameter at time only
1817 */
1818 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1819 }
1820 }
1821 /* make sure mdmon is
1822 * aware of the new level */
1823 if (!mdmon_running(st->container_devnm))
1824 start_mdmon(st->container_devnm);
1825 ping_monitor(container);
1826 if (mdmon_running(st->container_devnm) &&
1827 st->update_tail == NULL)
1828 st->update_tail = &st->updates;
1829 }
1830
1831 if (s->size == MAX_SIZE)
1832 s->size = 0;
1833 array.size = s->size;
1834 if (s->size & ~INT32_MAX) {
1835 /* got truncated to 32bit, write to
1836 * component_size instead
1837 */
1838 if (sra)
1839 rv = sysfs_set_num(sra, NULL,
1840 "component_size", s->size);
1841 else
1842 rv = -1;
1843 } else {
1844 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1845
1846 /* manage array size when it is managed externally
1847 */
1848 if ((rv == 0) && st->ss->external)
1849 rv = set_array_size(st, sra, sra->text_version);
1850 }
1851
1852 if (raid0_takeover) {
1853 /* do not recync non-existing parity,
1854 * we will drop it anyway
1855 */
1856 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1857 /* go back to raid0, drop parity disk
1858 */
1859 sysfs_set_str(sra, NULL, "level", "raid0");
1860 ioctl(fd, GET_ARRAY_INFO, &array);
1861 }
1862
1863 size_change_error:
1864 if (rv != 0) {
1865 int err = errno;
1866
1867 /* restore metadata */
1868 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1869 UnSet, NULL, devname,
1870 ROLLBACK_METADATA_CHANGES,
1871 c->verbose) == 0)
1872 sync_metadata(st);
1873 pr_err("Cannot set device size for %s: %s\n",
1874 devname, strerror(err));
1875 if (err == EBUSY &&
1876 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1877 cont_err("Bitmap must be removed before size can be changed\n");
1878 rv = 1;
1879 goto release;
1880 }
1881 if (s->assume_clean) {
1882 /* This will fail on kernels older than 3.0 unless
1883 * a backport has been arranged.
1884 */
1885 if (sra == NULL ||
1886 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1887 pr_err("--assume-clean not supported with --grow on this kernel\n");
1888 }
1889 ioctl(fd, GET_ARRAY_INFO, &array);
1890 s->size = get_component_size(fd)/2;
1891 if (s->size == 0)
1892 s->size = array.size;
1893 if (c->verbose >= 0) {
1894 if (s->size == orig_size)
1895 pr_err("component size of %s unchanged at %lluK\n",
1896 devname, s->size);
1897 else
1898 pr_err("component size of %s has been set to %lluK\n",
1899 devname, s->size);
1900 }
1901 changed = 1;
1902 } else if (array.level != LEVEL_CONTAINER) {
1903 s->size = get_component_size(fd)/2;
1904 if (s->size == 0)
1905 s->size = array.size;
1906 }
1907
1908 /* See if there is anything else to do */
1909 if ((s->level == UnSet || s->level == array.level) &&
1910 (s->layout_str == NULL) &&
1911 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1912 data_offset == INVALID_SECTORS &&
1913 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1914 /* Nothing more to do */
1915 if (!changed && c->verbose >= 0)
1916 pr_err("%s: no change requested\n",
1917 devname);
1918 goto release;
1919 }
1920
1921 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1922 * current implementation assumes that following conditions must be met:
1923 * - RAID10:
1924 * - far_copies == 1
1925 * - near_copies == 2
1926 */
1927 if ((s->level == 0 && array.level == 10 && sra &&
1928 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1929 (s->level == 0 && array.level == 1 && sra)) {
1930 int err;
1931 err = remove_disks_for_takeover(st, sra, array.layout);
1932 if (err) {
1933 dprintf("Array cannot be reshaped\n");
1934 if (cfd > -1)
1935 close(cfd);
1936 rv = 1;
1937 goto release;
1938 }
1939 /* Make sure mdmon has seen the device removal
1940 * and updated metadata before we continue with
1941 * level change
1942 */
1943 if (container)
1944 ping_monitor(container);
1945 }
1946
1947 memset(&info, 0, sizeof(info));
1948 info.array = array;
1949 sysfs_init(&info, fd, NULL);
1950 strcpy(info.text_version, sra->text_version);
1951 info.component_size = s->size*2;
1952 info.new_level = s->level;
1953 info.new_chunk = s->chunk * 1024;
1954 if (info.array.level == LEVEL_CONTAINER) {
1955 info.delta_disks = UnSet;
1956 info.array.raid_disks = s->raiddisks;
1957 } else if (s->raiddisks)
1958 info.delta_disks = s->raiddisks - info.array.raid_disks;
1959 else
1960 info.delta_disks = UnSet;
1961 if (s->layout_str == NULL) {
1962 info.new_layout = UnSet;
1963 if (info.array.level == 6 &&
1964 (info.new_level == 6 || info.new_level == UnSet) &&
1965 info.array.layout >= 16) {
1966 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
1967 cont_err("during the reshape, please specify --layout=preserve\n");
1968 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
1969 rv = 1;
1970 goto release;
1971 }
1972 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1973 strcmp(s->layout_str, "normalize") == 0) {
1974 /* If we have a -6 RAID6 layout, remove the '-6'. */
1975 info.new_layout = UnSet;
1976 if (info.array.level == 6 && info.new_level == UnSet) {
1977 char l[40], *h;
1978 strcpy(l, map_num(r6layout, info.array.layout));
1979 h = strrchr(l, '-');
1980 if (h && strcmp(h, "-6") == 0) {
1981 *h = 0;
1982 info.new_layout = map_name(r6layout, l);
1983 }
1984 } else {
1985 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
1986 rv = 1;
1987 goto release;
1988 }
1989 } else if (strcmp(s->layout_str, "preserve") == 0) {
1990 /* This means that a non-standard RAID6 layout
1991 * is OK.
1992 * In particular:
1993 * - When reshape a RAID6 (e.g. adding a device)
1994 * which is in a non-standard layout, it is OK
1995 * to preserve that layout.
1996 * - When converting a RAID5 to RAID6, leave it in
1997 * the XXX-6 layout, don't re-layout.
1998 */
1999 if (info.array.level == 6 && info.new_level == UnSet)
2000 info.new_layout = info.array.layout;
2001 else if (info.array.level == 5 && info.new_level == 6) {
2002 char l[40];
2003 strcpy(l, map_num(r5layout, info.array.layout));
2004 strcat(l, "-6");
2005 info.new_layout = map_name(r6layout, l);
2006 } else {
2007 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2008 rv = 1;
2009 goto release;
2010 }
2011 } else {
2012 int l = info.new_level;
2013 if (l == UnSet)
2014 l = info.array.level;
2015 switch (l) {
2016 case 5:
2017 info.new_layout = map_name(r5layout, s->layout_str);
2018 break;
2019 case 6:
2020 info.new_layout = map_name(r6layout, s->layout_str);
2021 break;
2022 case 10:
2023 info.new_layout = parse_layout_10(s->layout_str);
2024 break;
2025 case LEVEL_FAULTY:
2026 info.new_layout = parse_layout_faulty(s->layout_str);
2027 break;
2028 default:
2029 pr_err("layout not meaningful with this level\n");
2030 rv = 1;
2031 goto release;
2032 }
2033 if (info.new_layout == UnSet) {
2034 pr_err("layout %s not understood for this level\n",
2035 s->layout_str);
2036 rv = 1;
2037 goto release;
2038 }
2039 }
2040
2041 if (array.level == LEVEL_FAULTY) {
2042 if (s->level != UnSet && s->level != array.level) {
2043 pr_err("cannot change level of Faulty device\n");
2044 rv =1 ;
2045 }
2046 if (s->chunk) {
2047 pr_err("cannot set chunksize of Faulty device\n");
2048 rv =1 ;
2049 }
2050 if (s->raiddisks && s->raiddisks != 1) {
2051 pr_err("cannot set raid_disks of Faulty device\n");
2052 rv =1 ;
2053 }
2054 if (s->layout_str) {
2055 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2056 dprintf("Cannot get array information.\n");
2057 goto release;
2058 }
2059 array.layout = info.new_layout;
2060 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2061 pr_err("failed to set new layout\n");
2062 rv = 1;
2063 } else if (c->verbose >= 0)
2064 printf("layout for %s set to %d\n",
2065 devname, array.layout);
2066 }
2067 } else if (array.level == LEVEL_CONTAINER) {
2068 /* This change is to be applied to every array in the
2069 * container. This is only needed when the metadata imposes
2070 * restraints of the various arrays in the container.
2071 * Currently we only know that IMSM requires all arrays
2072 * to have the same number of devices so changing the
2073 * number of devices (On-Line Capacity Expansion) must be
2074 * performed at the level of the container
2075 */
2076 if (fd > 0) {
2077 close(fd);
2078 fd = -1;
2079 }
2080 rv = reshape_container(container, devname, -1, st, &info,
2081 c->force, c->backup_file, c->verbose, 0, 0, 0);
2082 frozen = 0;
2083 } else {
2084 /* get spare devices from external metadata
2085 */
2086 if (st->ss->external) {
2087 struct mdinfo *info2;
2088
2089 info2 = st->ss->container_content(st, subarray);
2090 if (info2) {
2091 info.array.spare_disks =
2092 info2->array.spare_disks;
2093 sysfs_free(info2);
2094 }
2095 }
2096
2097 /* Impose these changes on a single array. First
2098 * check that the metadata is OK with the change. */
2099
2100 if (reshape_super(st, 0, info.new_level,
2101 info.new_layout, info.new_chunk,
2102 info.array.raid_disks, info.delta_disks,
2103 c->backup_file, devname, APPLY_METADATA_CHANGES,
2104 c->verbose)) {
2105 rv = 1;
2106 goto release;
2107 }
2108 sync_metadata(st);
2109 rv = reshape_array(container, fd, devname, st, &info, c->force,
2110 devlist, data_offset, c->backup_file, c->verbose,
2111 0, 0, 0);
2112 frozen = 0;
2113 }
2114 release:
2115 sysfs_free(sra);
2116 if (frozen > 0)
2117 unfreeze(st);
2118 return rv;
2119 }
2120
2121 /* verify_reshape_position()
2122 * Function checks if reshape position in metadata is not farther
2123 * than position in md.
2124 * Return value:
2125 * 0 : not valid sysfs entry
2126 * it can be caused by not started reshape, it should be started
2127 * by reshape array or raid0 array is before takeover
2128 * -1 : error, reshape position is obviously wrong
2129 * 1 : success, reshape progress correct or updated
2130 */
2131 static int verify_reshape_position(struct mdinfo *info, int level)
2132 {
2133 int ret_val = 0;
2134 char buf[40];
2135 int rv;
2136
2137 /* read sync_max, failure can mean raid0 array */
2138 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2139
2140 if (rv > 0) {
2141 char *ep;
2142 unsigned long long position = strtoull(buf, &ep, 0);
2143
2144 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2145 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2146 position *= get_data_disks(level,
2147 info->new_layout,
2148 info->array.raid_disks);
2149 if (info->reshape_progress < position) {
2150 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2151 info->reshape_progress, position);
2152 info->reshape_progress = position;
2153 ret_val = 1;
2154 } else if (info->reshape_progress > position) {
2155 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2156 position, info->reshape_progress);
2157 ret_val = -1;
2158 } else {
2159 dprintf("Reshape position in md and metadata are the same;");
2160 ret_val = 1;
2161 }
2162 }
2163 } else if (rv == 0) {
2164 /* for valid sysfs entry, 0-length content
2165 * should be indicated as error
2166 */
2167 ret_val = -1;
2168 }
2169
2170 return ret_val;
2171 }
2172
2173 static unsigned long long choose_offset(unsigned long long lo,
2174 unsigned long long hi,
2175 unsigned long long min,
2176 unsigned long long max)
2177 {
2178 /* Choose a new offset between hi and lo.
2179 * It must be between min and max, but
2180 * we would prefer something near the middle of hi/lo, and also
2181 * prefer to be aligned to a big power of 2.
2182 *
2183 * So we start with the middle, then for each bit,
2184 * starting at '1' and increasing, if it is set, we either
2185 * add it or subtract it if possible, preferring the option
2186 * which is furthest from the boundary.
2187 *
2188 * We stop once we get a 1MB alignment. As units are in sectors,
2189 * 1MB = 2*1024 sectors.
2190 */
2191 unsigned long long choice = (lo + hi) / 2;
2192 unsigned long long bit = 1;
2193
2194 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2195 unsigned long long bigger, smaller;
2196 if (! (bit & choice))
2197 continue;
2198 bigger = choice + bit;
2199 smaller = choice - bit;
2200 if (bigger > max && smaller < min)
2201 break;
2202 if (bigger > max)
2203 choice = smaller;
2204 else if (smaller < min)
2205 choice = bigger;
2206 else if (hi - bigger > smaller - lo)
2207 choice = bigger;
2208 else
2209 choice = smaller;
2210 }
2211 return choice;
2212 }
2213
2214 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2215 char *devname, int delta_disks,
2216 unsigned long long data_offset,
2217 unsigned long long min,
2218 int can_fallback)
2219 {
2220 struct mdinfo *sd;
2221 int dir = 0;
2222 int err = 0;
2223 unsigned long long before, after;
2224
2225 /* Need to find min space before and after so same is used
2226 * on all devices
2227 */
2228 before = UINT64_MAX;
2229 after = UINT64_MAX;
2230 for (sd = sra->devs; sd; sd = sd->next) {
2231 char *dn;
2232 int dfd;
2233 int rv;
2234 struct supertype *st2;
2235 struct mdinfo info2;
2236
2237 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2238 continue;
2239 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2240 dfd = dev_open(dn, O_RDONLY);
2241 if (dfd < 0) {
2242 pr_err("%s: cannot open component %s\n",
2243 devname, dn ? dn : "-unknown-");
2244 goto release;
2245 }
2246 st2 = dup_super(st);
2247 rv = st2->ss->load_super(st2,dfd, NULL);
2248 close(dfd);
2249 if (rv) {
2250 free(st2);
2251 pr_err("%s: cannot get superblock from %s\n",
2252 devname, dn);
2253 goto release;
2254 }
2255 st2->ss->getinfo_super(st2, &info2, NULL);
2256 st2->ss->free_super(st2);
2257 free(st2);
2258 if (info2.space_before == 0 &&
2259 info2.space_after == 0) {
2260 /* Metadata doesn't support data_offset changes */
2261 if (!can_fallback)
2262 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2263 devname);
2264 goto fallback;
2265 }
2266 if (before > info2.space_before)
2267 before = info2.space_before;
2268 if (after > info2.space_after)
2269 after = info2.space_after;
2270
2271 if (data_offset != INVALID_SECTORS) {
2272 if (dir == 0) {
2273 if (info2.data_offset == data_offset) {
2274 pr_err("%s: already has that data_offset\n",
2275 dn);
2276 goto release;
2277 }
2278 if (data_offset < info2.data_offset)
2279 dir = -1;
2280 else
2281 dir = 1;
2282 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2283 (data_offset >= info2.data_offset && dir == -1)) {
2284 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2285 dn);
2286 goto release;
2287 }
2288 }
2289 }
2290 if (before == UINT64_MAX)
2291 /* impossible really, there must be no devices */
2292 return 1;
2293
2294 for (sd = sra->devs; sd; sd = sd->next) {
2295 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2296 unsigned long long new_data_offset;
2297
2298 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2299 continue;
2300 if (delta_disks < 0) {
2301 /* Don't need any space as array is shrinking
2302 * just move data_offset up by min
2303 */
2304 if (data_offset == INVALID_SECTORS)
2305 new_data_offset = sd->data_offset + min;
2306 else {
2307 if (data_offset < sd->data_offset + min) {
2308 pr_err("--data-offset too small for %s\n",
2309 dn);
2310 goto release;
2311 }
2312 new_data_offset = data_offset;
2313 }
2314 } else if (delta_disks > 0) {
2315 /* need space before */
2316 if (before < min) {
2317 if (can_fallback)
2318 goto fallback;
2319 pr_err("Insufficient head-space for reshape on %s\n",
2320 dn);
2321 goto release;
2322 }
2323 if (data_offset == INVALID_SECTORS)
2324 new_data_offset = sd->data_offset - min;
2325 else {
2326 if (data_offset > sd->data_offset - min) {
2327 pr_err("--data-offset too large for %s\n",
2328 dn);
2329 goto release;
2330 }
2331 new_data_offset = data_offset;
2332 }
2333 } else {
2334 if (dir == 0) {
2335 /* can move up or down. If 'data_offset'
2336 * was set we would have already decided,
2337 * so just choose direction with most space.
2338 */
2339 if (before > after)
2340 dir = -1;
2341 else
2342 dir = 1;
2343 }
2344 sysfs_set_str(sra, NULL, "reshape_direction",
2345 dir == 1 ? "backwards" : "forwards");
2346 if (dir > 0) {
2347 /* Increase data offset */
2348 if (after < min) {
2349 if (can_fallback)
2350 goto fallback;
2351 pr_err("Insufficient tail-space for reshape on %s\n",
2352 dn);
2353 goto release;
2354 }
2355 if (data_offset != INVALID_SECTORS &&
2356 data_offset < sd->data_offset + min) {
2357 pr_err("--data-offset too small on %s\n",
2358 dn);
2359 goto release;
2360 }
2361 if (data_offset != INVALID_SECTORS)
2362 new_data_offset = data_offset;
2363 else
2364 new_data_offset = choose_offset(sd->data_offset,
2365 sd->data_offset + after,
2366 sd->data_offset + min,
2367 sd->data_offset + after);
2368 } else {
2369 /* Decrease data offset */
2370 if (before < min) {
2371 if (can_fallback)
2372 goto fallback;
2373 pr_err("insufficient head-room on %s\n",
2374 dn);
2375 goto release;
2376 }
2377 if (data_offset != INVALID_SECTORS &&
2378 data_offset < sd->data_offset - min) {
2379 pr_err("--data-offset too small on %s\n",
2380 dn);
2381 goto release;
2382 }
2383 if (data_offset != INVALID_SECTORS)
2384 new_data_offset = data_offset;
2385 else
2386 new_data_offset = choose_offset(sd->data_offset - before,
2387 sd->data_offset,
2388 sd->data_offset - before,
2389 sd->data_offset - min);
2390 }
2391 }
2392 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2393 if (err < 0 && errno == E2BIG) {
2394 /* try again after increasing data size to max */
2395 err = sysfs_set_num(sra, sd, "size", 0);
2396 if (err < 0 && errno == EINVAL &&
2397 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2398 /* some kernels have a bug where you cannot
2399 * use '0' on spare devices. */
2400 sysfs_set_num(sra, sd, "size",
2401 (sra->component_size + after)/2);
2402 }
2403 err = sysfs_set_num(sra, sd, "new_offset",
2404 new_data_offset);
2405 }
2406 if (err < 0) {
2407 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2408 pr_err("data-offset is too big for %s\n",
2409 dn);
2410 goto release;
2411 }
2412 if (sd == sra->devs &&
2413 (errno == ENOENT || errno == E2BIG))
2414 /* Early kernel, no 'new_offset' file,
2415 * or kernel doesn't like us.
2416 * For RAID5/6 this is not fatal
2417 */
2418 return 1;
2419 pr_err("Cannot set new_offset for %s\n",
2420 dn);
2421 break;
2422 }
2423 }
2424 return err;
2425 release:
2426 return -1;
2427 fallback:
2428 /* Just use a backup file */
2429 return 1;
2430 }
2431
2432 static int raid10_reshape(char *container, int fd, char *devname,
2433 struct supertype *st, struct mdinfo *info,
2434 struct reshape *reshape,
2435 unsigned long long data_offset,
2436 int force, int verbose)
2437 {
2438 /* Changing raid_disks, layout, chunksize or possibly
2439 * just data_offset for a RAID10.
2440 * We must always change data_offset. We change by at least
2441 * ->min_offset_change which is the largest of the old and new
2442 * chunk sizes.
2443 * If raid_disks is increasing, then data_offset must decrease
2444 * by at least this copy size.
2445 * If raid_disks is unchanged, data_offset must increase or
2446 * decrease by at least min_offset_change but preferably by much more.
2447 * We choose half of the available space.
2448 * If raid_disks is decreasing, data_offset must increase by
2449 * at least min_offset_change. To allow of this, component_size
2450 * must be decreased by the same amount.
2451 *
2452 * So we calculate the required minimum and direction, possibly
2453 * reduce the component_size, then iterate through the devices
2454 * and set the new_data_offset.
2455 * If that all works, we set chunk_size, layout, raid_disks, and start
2456 * 'reshape'
2457 */
2458 struct mdinfo *sra;
2459 unsigned long long min;
2460 int err = 0;
2461
2462 sra = sysfs_read(fd, NULL,
2463 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2464 );
2465 if (!sra) {
2466 pr_err("%s: Cannot get array details from sysfs\n",
2467 devname);
2468 goto release;
2469 }
2470 min = reshape->min_offset_change;
2471
2472 if (info->delta_disks)
2473 sysfs_set_str(sra, NULL, "reshape_direction",
2474 info->delta_disks < 0 ? "backwards" : "forwards");
2475 if (info->delta_disks < 0 &&
2476 info->space_after < min) {
2477 int rv = sysfs_set_num(sra, NULL, "component_size",
2478 (sra->component_size -
2479 min)/2);
2480 if (rv) {
2481 pr_err("cannot reduce component size\n");
2482 goto release;
2483 }
2484 }
2485 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2486 min, 0);
2487 if (err == 1) {
2488 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2489 cont_err("supported on this kernel\n");
2490 err = -1;
2491 }
2492 if (err < 0)
2493 goto release;
2494
2495 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2496 err = errno;
2497 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2498 err = errno;
2499 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2500 info->array.raid_disks + info->delta_disks) < 0)
2501 err = errno;
2502 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2503 err = errno;
2504 if (err) {
2505 pr_err("Cannot set array shape for %s\n",
2506 devname);
2507 if (err == EBUSY &&
2508 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2509 cont_err(" Bitmap must be removed before shape can be changed\n");
2510 goto release;
2511 }
2512 sysfs_free(sra);
2513 return 0;
2514 release:
2515 sysfs_free(sra);
2516 return 1;
2517 }
2518
2519 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2520 {
2521 struct mdinfo *sra, *sd;
2522 /* Initialisation to silence compiler warning */
2523 unsigned long long min_space_before = 0, min_space_after = 0;
2524 int first = 1;
2525
2526 sra = sysfs_read(fd, NULL, GET_DEVS);
2527 if (!sra)
2528 return;
2529 for (sd = sra->devs; sd; sd = sd->next) {
2530 char *dn;
2531 int dfd;
2532 struct supertype *st2;
2533 struct mdinfo info2;
2534
2535 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2536 continue;
2537 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2538 dfd = dev_open(dn, O_RDONLY);
2539 if (dfd < 0)
2540 break;
2541 st2 = dup_super(st);
2542 if (st2->ss->load_super(st2,dfd, NULL)) {
2543 close(dfd);
2544 free(st2);
2545 break;
2546 }
2547 close(dfd);
2548 st2->ss->getinfo_super(st2, &info2, NULL);
2549 st2->ss->free_super(st2);
2550 free(st2);
2551 if (first ||
2552 min_space_before > info2.space_before)
2553 min_space_before = info2.space_before;
2554 if (first ||
2555 min_space_after > info2.space_after)
2556 min_space_after = info2.space_after;
2557 first = 0;
2558 }
2559 if (sd == NULL && !first) {
2560 info->space_after = min_space_after;
2561 info->space_before = min_space_before;
2562 }
2563 sysfs_free(sra);
2564 }
2565
2566 static void update_cache_size(char *container, struct mdinfo *sra,
2567 struct mdinfo *info,
2568 int disks, unsigned long long blocks)
2569 {
2570 /* Check that the internal stripe cache is
2571 * large enough, or it won't work.
2572 * It must hold at least 4 stripes of the larger
2573 * chunk size
2574 */
2575 unsigned long cache;
2576 cache = max(info->array.chunk_size, info->new_chunk);
2577 cache *= 4; /* 4 stripes minimum */
2578 cache /= 512; /* convert to sectors */
2579 /* make sure there is room for 'blocks' with a bit to spare */
2580 if (cache < 16 + blocks / disks)
2581 cache = 16 + blocks / disks;
2582 cache /= (4096/512); /* Convert from sectors to pages */
2583
2584 if (sra->cache_size < cache)
2585 subarray_set_num(container, sra, "stripe_cache_size",
2586 cache+1);
2587 }
2588
2589 static int impose_reshape(struct mdinfo *sra,
2590 struct mdinfo *info,
2591 struct supertype *st,
2592 int fd,
2593 int restart,
2594 char *devname, char *container,
2595 struct reshape *reshape)
2596 {
2597 struct mdu_array_info_s array;
2598
2599 sra->new_chunk = info->new_chunk;
2600
2601 if (restart) {
2602 /* for external metadata checkpoint saved by mdmon can be lost
2603 * or missed /due to e.g. crash/. Check if md is not during
2604 * restart farther than metadata points to.
2605 * If so, this means metadata information is obsolete.
2606 */
2607 if (st->ss->external)
2608 verify_reshape_position(info, reshape->level);
2609 sra->reshape_progress = info->reshape_progress;
2610 } else {
2611 sra->reshape_progress = 0;
2612 if (reshape->after.data_disks < reshape->before.data_disks)
2613 /* start from the end of the new array */
2614 sra->reshape_progress = (sra->component_size
2615 * reshape->after.data_disks);
2616 }
2617
2618 ioctl(fd, GET_ARRAY_INFO, &array);
2619 if (info->array.chunk_size == info->new_chunk &&
2620 reshape->before.layout == reshape->after.layout &&
2621 st->ss->external == 0) {
2622 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2623 array.raid_disks = reshape->after.data_disks + reshape->parity;
2624 if (!restart &&
2625 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2626 int err = errno;
2627
2628 pr_err("Cannot set device shape for %s: %s\n",
2629 devname, strerror(errno));
2630
2631 if (err == EBUSY &&
2632 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2633 cont_err("Bitmap must be removed before shape can be changed\n");
2634
2635 goto release;
2636 }
2637 } else if (!restart) {
2638 /* set them all just in case some old 'new_*' value
2639 * persists from some earlier problem.
2640 */
2641 int err = 0;
2642 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2643 err = errno;
2644 if (!err && sysfs_set_num(sra, NULL, "layout",
2645 reshape->after.layout) < 0)
2646 err = errno;
2647 if (!err && subarray_set_num(container, sra, "raid_disks",
2648 reshape->after.data_disks +
2649 reshape->parity) < 0)
2650 err = errno;
2651 if (err) {
2652 pr_err("Cannot set device shape for %s\n",
2653 devname);
2654
2655 if (err == EBUSY &&
2656 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2657 cont_err("Bitmap must be removed before shape can be changed\n");
2658 goto release;
2659 }
2660 }
2661 return 0;
2662 release:
2663 return -1;
2664 }
2665
2666 static int impose_level(int fd, int level, char *devname, int verbose)
2667 {
2668 char *c;
2669 struct mdu_array_info_s array;
2670 struct mdinfo info;
2671 sysfs_init(&info, fd, NULL);
2672
2673 ioctl(fd, GET_ARRAY_INFO, &array);
2674 if (level == 0 &&
2675 (array.level >= 4 && array.level <= 6)) {
2676 /* To convert to RAID0 we need to fail and
2677 * remove any non-data devices. */
2678 int found = 0;
2679 int d;
2680 int data_disks = array.raid_disks - 1;
2681 if (array.level == 6)
2682 data_disks -= 1;
2683 if (array.level == 5 &&
2684 array.layout != ALGORITHM_PARITY_N)
2685 return -1;
2686 if (array.level == 6 &&
2687 array.layout != ALGORITHM_PARITY_N_6)
2688 return -1;
2689 sysfs_set_str(&info, NULL,"sync_action", "idle");
2690 /* First remove any spares so no recovery starts */
2691 for (d = 0, found = 0;
2692 d < MAX_DISKS && found < array.nr_disks;
2693 d++) {
2694 mdu_disk_info_t disk;
2695 disk.number = d;
2696 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2697 continue;
2698 if (disk.major == 0 && disk.minor == 0)
2699 continue;
2700 found++;
2701 if ((disk.state & (1 << MD_DISK_ACTIVE))
2702 && disk.raid_disk < data_disks)
2703 /* keep this */
2704 continue;
2705 ioctl(fd, HOT_REMOVE_DISK,
2706 makedev(disk.major, disk.minor));
2707 }
2708 /* Now fail anything left */
2709 ioctl(fd, GET_ARRAY_INFO, &array);
2710 for (d = 0, found = 0;
2711 d < MAX_DISKS && found < array.nr_disks;
2712 d++) {
2713 int cnt;
2714 mdu_disk_info_t disk;
2715 disk.number = d;
2716 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2717 continue;
2718 if (disk.major == 0 && disk.minor == 0)
2719 continue;
2720 found++;
2721 if ((disk.state & (1 << MD_DISK_ACTIVE))
2722 && disk.raid_disk < data_disks)
2723 /* keep this */
2724 continue;
2725 ioctl(fd, SET_DISK_FAULTY,
2726 makedev(disk.major, disk.minor));
2727 cnt = 5;
2728 while (ioctl(fd, HOT_REMOVE_DISK,
2729 makedev(disk.major, disk.minor)) < 0
2730 && errno == EBUSY
2731 && cnt--) {
2732 usleep(10000);
2733 }
2734 }
2735 }
2736 c = map_num(pers, level);
2737 if (c) {
2738 int err = sysfs_set_str(&info, NULL, "level", c);
2739 if (err) {
2740 err = errno;
2741 pr_err("%s: could not set level to %s\n",
2742 devname, c);
2743 if (err == EBUSY &&
2744 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2745 cont_err("Bitmap must be removed before level can be changed\n");
2746 return err;
2747 }
2748 if (verbose >= 0)
2749 pr_err("level of %s changed to %s\n",
2750 devname, c);
2751 }
2752 return 0;
2753 }
2754
2755 int sigterm = 0;
2756 static void catch_term(int sig)
2757 {
2758 sigterm = 1;
2759 }
2760
2761 static int continue_via_systemd(char *devnm)
2762 {
2763 int skipped, i, pid, status;
2764 char pathbuf[1024];
2765 /* In a systemd/udev world, it is best to get systemd to
2766 * run "mdadm --grow --continue" rather than running in the
2767 * background.
2768 */
2769 switch(fork()) {
2770 case 0:
2771 /* FIXME yuk. CLOSE_EXEC?? */
2772 skipped = 0;
2773 for (i = 3; skipped < 20; i++)
2774 if (close(i) < 0)
2775 skipped++;
2776 else
2777 skipped = 0;
2778
2779 /* Don't want to see error messages from
2780 * systemctl. If the service doesn't exist,
2781 * we fork ourselves.
2782 */
2783 close(2);
2784 open("/dev/null", O_WRONLY);
2785 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
2786 devnm);
2787 status = execl("/usr/bin/systemctl", "systemctl",
2788 "start",
2789 pathbuf, NULL);
2790 status = execl("/bin/systemctl", "systemctl", "start",
2791 pathbuf, NULL);
2792 exit(1);
2793 case -1: /* Just do it ourselves. */
2794 break;
2795 default: /* parent - good */
2796 pid = wait(&status);
2797 if (pid >= 0 && status == 0)
2798 return 1;
2799 }
2800 return 0;
2801 }
2802
2803 static int reshape_array(char *container, int fd, char *devname,
2804 struct supertype *st, struct mdinfo *info,
2805 int force, struct mddev_dev *devlist,
2806 unsigned long long data_offset,
2807 char *backup_file, int verbose, int forked,
2808 int restart, int freeze_reshape)
2809 {
2810 struct reshape reshape;
2811 int spares_needed;
2812 char *msg;
2813 int orig_level = UnSet;
2814 int odisks;
2815 int delayed;
2816
2817 struct mdu_array_info_s array;
2818 char *c;
2819
2820 struct mddev_dev *dv;
2821 int added_disks;
2822
2823 int *fdlist = NULL;
2824 unsigned long long *offsets = NULL;
2825 int d;
2826 int nrdisks;
2827 int err;
2828 unsigned long blocks;
2829 unsigned long long array_size;
2830 int done;
2831 struct mdinfo *sra = NULL;
2832 char buf[20];
2833
2834 /* when reshaping a RAID0, the component_size might be zero.
2835 * So try to fix that up.
2836 */
2837 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2838 dprintf("Cannot get array information.\n");
2839 goto release;
2840 }
2841 if (array.level == 0 && info->component_size == 0) {
2842 get_dev_size(fd, NULL, &array_size);
2843 info->component_size = array_size / array.raid_disks;
2844 }
2845
2846 if (array.level == 10)
2847 /* Need space_after info */
2848 get_space_after(fd, st, info);
2849
2850 if (info->reshape_active) {
2851 int new_level = info->new_level;
2852 info->new_level = UnSet;
2853 if (info->delta_disks > 0)
2854 info->array.raid_disks -= info->delta_disks;
2855 msg = analyse_change(devname, info, &reshape);
2856 info->new_level = new_level;
2857 if (info->delta_disks > 0)
2858 info->array.raid_disks += info->delta_disks;
2859 if (!restart)
2860 /* Make sure the array isn't read-only */
2861 ioctl(fd, RESTART_ARRAY_RW, 0);
2862 } else
2863 msg = analyse_change(devname, info, &reshape);
2864 if (msg) {
2865 /* if msg == "", error has already been printed */
2866 if (msg[0])
2867 pr_err("%s\n", msg);
2868 goto release;
2869 }
2870 if (restart &&
2871 (reshape.level != info->array.level ||
2872 reshape.before.layout != info->array.layout ||
2873 reshape.before.data_disks + reshape.parity
2874 != info->array.raid_disks - max(0, info->delta_disks))) {
2875 pr_err("reshape info is not in native format - cannot continue.\n");
2876 goto release;
2877 }
2878
2879 if (st->ss->external && restart && (info->reshape_progress == 0) &&
2880 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
2881 (strncmp(buf, "reshape", 7) == 0))) {
2882 /* When reshape is restarted from '0', very begin of array
2883 * it is possible that for external metadata reshape and array
2884 * configuration doesn't happen.
2885 * Check if md has the same opinion, and reshape is restarted
2886 * from 0. If so, this is regular reshape start after reshape
2887 * switch in metadata to next array only.
2888 */
2889 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2890 (info->reshape_progress == 0))
2891 restart = 0;
2892 }
2893 if (restart) {
2894 /* reshape already started. just skip to monitoring the reshape */
2895 if (reshape.backup_blocks == 0)
2896 return 0;
2897 if (restart & RESHAPE_NO_BACKUP)
2898 return 0;
2899
2900 /* Need 'sra' down at 'started:' */
2901 sra = sysfs_read(fd, NULL,
2902 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2903 GET_CACHE);
2904 if (!sra) {
2905 pr_err("%s: Cannot get array details from sysfs\n",
2906 devname);
2907 goto release;
2908 }
2909
2910 if (!backup_file)
2911 backup_file = locate_backup(sra->sys_name);
2912
2913 goto started;
2914 }
2915 /* The container is frozen but the array may not be.
2916 * So freeze the array so spares don't get put to the wrong use
2917 * FIXME there should probably be a cleaner separation between
2918 * freeze_array and freeze_container.
2919 */
2920 sysfs_freeze_array(info);
2921 /* Check we have enough spares to not be degraded */
2922 added_disks = 0;
2923 for (dv = devlist; dv ; dv=dv->next)
2924 added_disks++;
2925 spares_needed = max(reshape.before.data_disks,
2926 reshape.after.data_disks)
2927 + reshape.parity - array.raid_disks;
2928
2929 if (!force &&
2930 info->new_level > 1 && info->array.level > 1 &&
2931 spares_needed > info->array.spare_disks + added_disks) {
2932 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
2933 " Use --force to over-ride this check.\n",
2934 spares_needed,
2935 spares_needed == 1 ? "" : "s",
2936 info->array.spare_disks + added_disks);
2937 goto release;
2938 }
2939 /* Check we have enough spares to not fail */
2940 spares_needed = max(reshape.before.data_disks,
2941 reshape.after.data_disks)
2942 - array.raid_disks;
2943 if ((info->new_level > 1 || info->new_level == 0) &&
2944 spares_needed > info->array.spare_disks +added_disks) {
2945 pr_err("Need %d spare%s to create working array, and only have %d.\n",
2946 spares_needed,
2947 spares_needed == 1 ? "" : "s",
2948 info->array.spare_disks + added_disks);
2949 goto release;
2950 }
2951
2952 if (reshape.level != array.level) {
2953 int err = impose_level(fd, reshape.level, devname, verbose);
2954 if (err)
2955 goto release;
2956 info->new_layout = UnSet; /* after level change,
2957 * layout is meaningless */
2958 orig_level = array.level;
2959 sysfs_freeze_array(info);
2960
2961 if (reshape.level > 0 && st->ss->external) {
2962 /* make sure mdmon is aware of the new level */
2963 if (mdmon_running(container))
2964 flush_mdmon(container);
2965
2966 if (!mdmon_running(container))
2967 start_mdmon(container);
2968 ping_monitor(container);
2969 if (mdmon_running(container) &&
2970 st->update_tail == NULL)
2971 st->update_tail = &st->updates;
2972 }
2973 }
2974 /* ->reshape_super might have chosen some spares from the
2975 * container that it wants to be part of the new array.
2976 * We can collect them with ->container_content and give
2977 * them to the kernel.
2978 */
2979 if (st->ss->reshape_super && st->ss->container_content) {
2980 char *subarray = strchr(info->text_version+1, '/')+1;
2981 struct mdinfo *info2 =
2982 st->ss->container_content(st, subarray);
2983 struct mdinfo *d;
2984
2985 if (info2) {
2986 sysfs_init(info2, fd, st->devnm);
2987 /* When increasing number of devices, we need to set
2988 * new raid_disks before adding these, or they might
2989 * be rejected.
2990 */
2991 if (reshape.backup_blocks &&
2992 reshape.after.data_disks > reshape.before.data_disks)
2993 subarray_set_num(container, info2, "raid_disks",
2994 reshape.after.data_disks +
2995 reshape.parity);
2996 for (d = info2->devs; d; d = d->next) {
2997 if (d->disk.state == 0 &&
2998 d->disk.raid_disk >= 0) {
2999 /* This is a spare that wants to
3000 * be part of the array.
3001 */
3002 add_disk(fd, st, info2, d);
3003 }
3004 }
3005 sysfs_free(info2);
3006 }
3007 }
3008 /* We might have been given some devices to add to the
3009 * array. Now that the array has been changed to the right
3010 * level and frozen, we can safely add them.
3011 */
3012 if (devlist) {
3013 if (Manage_subdevs(devname, fd, devlist, verbose,
3014 0, NULL, 0))
3015 goto release;
3016 }
3017
3018 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3019 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3020 if (reshape.backup_blocks == 0) {
3021 /* No restriping needed, but we might need to impose
3022 * some more changes: layout, raid_disks, chunk_size
3023 */
3024 /* read current array info */
3025 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3026 dprintf("Cannot get array information.\n");
3027 goto release;
3028 }
3029 /* compare current array info with new values and if
3030 * it is different update them to new */
3031 if (info->new_layout != UnSet &&
3032 info->new_layout != array.layout) {
3033 array.layout = info->new_layout;
3034 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3035 pr_err("failed to set new layout\n");
3036 goto release;
3037 } else if (verbose >= 0)
3038 printf("layout for %s set to %d\n",
3039 devname, array.layout);
3040 }
3041 if (info->delta_disks != UnSet &&
3042 info->delta_disks != 0 &&
3043 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3044 array.raid_disks += info->delta_disks;
3045 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3046 pr_err("failed to set raid disks\n");
3047 goto release;
3048 } else if (verbose >= 0) {
3049 printf("raid_disks for %s set to %d\n",
3050 devname, array.raid_disks);
3051 }
3052 }
3053 if (info->new_chunk != 0 &&
3054 info->new_chunk != array.chunk_size) {
3055 if (sysfs_set_num(info, NULL,
3056 "chunk_size", info->new_chunk) != 0) {
3057 pr_err("failed to set chunk size\n");
3058 goto release;
3059 } else if (verbose >= 0)
3060 printf("chunk size for %s set to %d\n",
3061 devname, array.chunk_size);
3062 }
3063 unfreeze(st);
3064 return 0;
3065 }
3066
3067 /*
3068 * There are three possibilities.
3069 * 1/ The array will shrink.
3070 * We need to ensure the reshape will pause before reaching
3071 * the 'critical section'. We also need to fork and wait for
3072 * that to happen. When it does we
3073 * suspend/backup/complete/unfreeze
3074 *
3075 * 2/ The array will not change size.
3076 * This requires that we keep a backup of a sliding window
3077 * so that we can restore data after a crash. So we need
3078 * to fork and monitor progress.
3079 * In future we will allow the data_offset to change, so
3080 * a sliding backup becomes unnecessary.
3081 *
3082 * 3/ The array will grow. This is relatively easy.
3083 * However the kernel's restripe routines will cheerfully
3084 * overwrite some early data before it is safe. So we
3085 * need to make a backup of the early parts of the array
3086 * and be ready to restore it if rebuild aborts very early.
3087 * For externally managed metadata, we still need a forked
3088 * child to monitor the reshape and suspend IO over the region
3089 * that is being reshaped.
3090 *
3091 * We backup data by writing it to one spare, or to a
3092 * file which was given on command line.
3093 *
3094 * In each case, we first make sure that storage is available
3095 * for the required backup.
3096 * Then we:
3097 * - request the shape change.
3098 * - fork to handle backup etc.
3099 */
3100 /* Check that we can hold all the data */
3101 get_dev_size(fd, NULL, &array_size);
3102 if (reshape.new_size < (array_size/512)) {
3103 pr_err("this change will reduce the size of the array.\n"
3104 " use --grow --array-size first to truncate array.\n"
3105 " e.g. mdadm --grow %s --array-size %llu\n",
3106 devname, reshape.new_size/2);
3107 goto release;
3108 }
3109
3110 if (array.level == 10) {
3111 /* Reshaping RAID10 does not require any data backup by
3112 * user-space. Instead it requires that the data_offset
3113 * is changed to avoid the need for backup.
3114 * So this is handled very separately
3115 */
3116 if (restart)
3117 /* Nothing to do. */
3118 return 0;
3119 return raid10_reshape(container, fd, devname, st, info,
3120 &reshape, data_offset,
3121 force, verbose);
3122 }
3123 sra = sysfs_read(fd, NULL,
3124 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3125 GET_CACHE);
3126 if (!sra) {
3127 pr_err("%s: Cannot get array details from sysfs\n",
3128 devname);
3129 goto release;
3130 }
3131
3132 if (!backup_file)
3133 switch(set_new_data_offset(sra, st, devname,
3134 reshape.after.data_disks - reshape.before.data_disks,
3135 data_offset,
3136 reshape.min_offset_change, 1)) {
3137 case -1:
3138 goto release;
3139 case 0:
3140 /* Updated data_offset, so it's easy now */
3141 update_cache_size(container, sra, info,
3142 min(reshape.before.data_disks,
3143 reshape.after.data_disks),
3144 reshape.backup_blocks);
3145
3146 /* Right, everything seems fine. Let's kick things off.
3147 */
3148 sync_metadata(st);
3149
3150 if (impose_reshape(sra, info, st, fd, restart,
3151 devname, container, &reshape) < 0)
3152 goto release;
3153 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3154 struct mdinfo *sd;
3155 if (errno != EINVAL) {
3156 pr_err("Failed to initiate reshape!\n");
3157 goto release;
3158 }
3159 /* revert data_offset and try the old way */
3160 for (sd = sra->devs; sd; sd = sd->next) {
3161 sysfs_set_num(sra, sd, "new_offset",
3162 sd->data_offset);
3163 sysfs_set_str(sra, NULL, "reshape_direction",
3164 "forwards");
3165 }
3166 break;
3167 }
3168 if (info->new_level == reshape.level)
3169 return 0;
3170 /* need to adjust level when reshape completes */
3171 switch(fork()) {
3172 case -1: /* ignore error, but don't wait */
3173 return 0;
3174 default: /* parent */
3175 return 0;
3176 case 0:
3177 map_fork();
3178 break;
3179 }
3180 close(fd);
3181 wait_reshape(sra);
3182 fd = open_dev(sra->sys_name);
3183 if (fd >= 0)
3184 impose_level(fd, info->new_level, devname, verbose);
3185 return 0;
3186 case 1: /* Couldn't set data_offset, try the old way */
3187 if (data_offset != INVALID_SECTORS) {
3188 pr_err("Cannot update data_offset on this array\n");
3189 goto release;
3190 }
3191 break;
3192 }
3193
3194 started:
3195 /* Decide how many blocks (sectors) for a reshape
3196 * unit. The number we have so far is just a minimum
3197 */
3198 blocks = reshape.backup_blocks;
3199 if (reshape.before.data_disks ==
3200 reshape.after.data_disks) {
3201 /* Make 'blocks' bigger for better throughput, but
3202 * not so big that we reject it below.
3203 * Try for 16 megabytes
3204 */
3205 while (blocks * 32 < sra->component_size &&
3206 blocks < 16*1024*2)
3207 blocks *= 2;
3208 } else
3209 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3210
3211 if (blocks >= sra->component_size/2) {
3212 pr_err("%s: Something wrong - reshape aborted\n",
3213 devname);
3214 goto release;
3215 }
3216
3217 /* Now we need to open all these devices so we can read/write.
3218 */
3219 nrdisks = max(reshape.before.data_disks,
3220 reshape.after.data_disks) + reshape.parity
3221 + sra->array.spare_disks;
3222 fdlist = xcalloc((1+nrdisks), sizeof(int));
3223 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3224
3225 odisks = reshape.before.data_disks + reshape.parity;
3226 d = reshape_prepare_fdlist(devname, sra, odisks,
3227 nrdisks, blocks, backup_file,
3228 fdlist, offsets);
3229 if (d < odisks) {
3230 goto release;
3231 }
3232 if ((st->ss->manage_reshape == NULL) ||
3233 (st->ss->recover_backup == NULL)) {
3234 if (backup_file == NULL) {
3235 if (reshape.after.data_disks <=
3236 reshape.before.data_disks) {
3237 pr_err("%s: Cannot grow - need backup-file\n",
3238 devname);
3239 pr_err(" Please provide one with \"--backup=...\"\n");
3240 goto release;
3241 } else if (d == odisks) {
3242 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3243 goto release;
3244 }
3245 } else {
3246 if (!reshape_open_backup_file(backup_file, fd, devname,
3247 (signed)blocks,
3248 fdlist+d, offsets+d,
3249 sra->sys_name,
3250 restart)) {
3251 goto release;
3252 }
3253 d++;
3254 }
3255 }
3256
3257 update_cache_size(container, sra, info,
3258 min(reshape.before.data_disks, reshape.after.data_disks),
3259 blocks);
3260
3261 /* Right, everything seems fine. Let's kick things off.
3262 * If only changing raid_disks, use ioctl, else use
3263 * sysfs.
3264 */
3265 sync_metadata(st);
3266
3267 if (impose_reshape(sra, info, st, fd, restart,
3268 devname, container, &reshape) < 0)
3269 goto release;
3270
3271 err = start_reshape(sra, restart, reshape.before.data_disks,
3272 reshape.after.data_disks);
3273 if (err) {
3274 pr_err("Cannot %s reshape for %s\n",
3275 restart ? "continue" : "start",
3276 devname);
3277 goto release;
3278 }
3279 if (restart)
3280 sysfs_set_str(sra, NULL, "array_state", "active");
3281 if (freeze_reshape) {
3282 free(fdlist);
3283 free(offsets);
3284 sysfs_free(sra);
3285 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3286 sra->reshape_progress);
3287 return 1;
3288 }
3289
3290 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3291 if (continue_via_systemd(container ?: sra->sys_name)) {
3292 free(fdlist);
3293 free(offsets);
3294 sysfs_free(sra);
3295 return 0;
3296 }
3297
3298 /* Now we just need to kick off the reshape and watch, while
3299 * handling backups of the data...
3300 * This is all done by a forked background process.
3301 */
3302 switch(forked ? 0 : fork()) {
3303 case -1:
3304 pr_err("Cannot run child to monitor reshape: %s\n",
3305 strerror(errno));
3306 abort_reshape(sra);
3307 goto release;
3308 default:
3309 free(fdlist);
3310 free(offsets);
3311 sysfs_free(sra);
3312 return 0;
3313 case 0:
3314 map_fork();
3315 break;
3316 }
3317
3318 /* If another array on the same devices is busy, the
3319 * reshape will wait for them. This would mean that
3320 * the first section that we suspend will stay suspended
3321 * for a long time. So check on that possibility
3322 * by looking for "DELAYED" in /proc/mdstat, and if found,
3323 * wait a while
3324 */
3325 do {
3326 struct mdstat_ent *mds, *m;
3327 delayed = 0;
3328 mds = mdstat_read(1, 0);
3329 for (m = mds; m; m = m->next)
3330 if (strcmp(m->devnm, sra->sys_name) == 0) {
3331 if (m->resync &&
3332 m->percent == RESYNC_DELAYED)
3333 delayed = 1;
3334 if (m->resync == 0)
3335 /* Haven't started the reshape thread
3336 * yet, wait a bit
3337 */
3338 delayed = 2;
3339 break;
3340 }
3341 free_mdstat(mds);
3342 if (delayed == 1 && get_linux_version() < 3007000) {
3343 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3344 " You might experience problems until other reshapes complete.\n");
3345 delayed = 0;
3346 }
3347 if (delayed)
3348 mdstat_wait(30 - (delayed-1) * 25);
3349 } while (delayed);
3350 mdstat_close();
3351 close(fd);
3352 if (check_env("MDADM_GROW_VERIFY"))
3353 fd = open(devname, O_RDONLY | O_DIRECT);
3354 else
3355 fd = -1;
3356 mlockall(MCL_FUTURE);
3357
3358 signal(SIGTERM, catch_term);
3359
3360 if (st->ss->external) {
3361 /* metadata handler takes it from here */
3362 done = st->ss->manage_reshape(
3363 fd, sra, &reshape, st, blocks,
3364 fdlist, offsets,
3365 d - odisks, fdlist+odisks,
3366 offsets+odisks);
3367 } else
3368 done = child_monitor(
3369 fd, sra, &reshape, st, blocks,
3370 fdlist, offsets,
3371 d - odisks, fdlist+odisks,
3372 offsets+odisks);
3373
3374 free(fdlist);
3375 free(offsets);
3376
3377 if (backup_file && done) {
3378 char *bul;
3379 bul = make_backup(sra->sys_name);
3380 if (bul) {
3381 char buf[1024];
3382 int l = readlink(bul, buf, sizeof(buf) - 1);
3383 if (l > 0) {
3384 buf[l]=0;
3385 unlink(buf);
3386 }
3387 unlink(bul);
3388 free(bul);
3389 }
3390 unlink(backup_file);
3391 }
3392 if (!done) {
3393 abort_reshape(sra);
3394 goto out;
3395 }
3396
3397 if (!st->ss->external &&
3398 !(reshape.before.data_disks != reshape.after.data_disks
3399 && info->custom_array_size) &&
3400 info->new_level == reshape.level &&
3401 !forked) {
3402 /* no need to wait for the reshape to finish as
3403 * there is nothing more to do.
3404 */
3405 sysfs_free(sra);
3406 exit(0);
3407 }
3408 wait_reshape(sra);
3409
3410 if (st->ss->external) {
3411 /* Re-load the metadata as much could have changed */
3412 int cfd = open_dev(st->container_devnm);
3413 if (cfd >= 0) {
3414 flush_mdmon(container);
3415 st->ss->free_super(st);
3416 st->ss->load_container(st, cfd, container);
3417 close(cfd);
3418 }
3419 }
3420
3421 /* set new array size if required customer_array_size is used
3422 * by this metadata.
3423 */
3424 if (reshape.before.data_disks !=
3425 reshape.after.data_disks &&
3426 info->custom_array_size)
3427 set_array_size(st, info, info->text_version);
3428
3429 if (info->new_level != reshape.level) {
3430 if (fd < 0)
3431 fd = open(devname, O_RDONLY);
3432 impose_level(fd, info->new_level, devname, verbose);
3433 close(fd);
3434 if (info->new_level == 0)
3435 st->update_tail = NULL;
3436 }
3437 out:
3438 sysfs_free(sra);
3439 if (forked)
3440 return 0;
3441 unfreeze(st);
3442 exit(0);
3443
3444 release:
3445 free(fdlist);
3446 free(offsets);
3447 if (orig_level != UnSet && sra) {
3448 c = map_num(pers, orig_level);
3449 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3450 pr_err("aborting level change\n");
3451 }
3452 sysfs_free(sra);
3453 if (!forked)
3454 unfreeze(st);
3455 return 1;
3456 }
3457
3458 /* mdfd handle is passed to be closed in child process (after fork).
3459 */
3460 int reshape_container(char *container, char *devname,
3461 int mdfd,
3462 struct supertype *st,
3463 struct mdinfo *info,
3464 int force,
3465 char *backup_file, int verbose,
3466 int forked, int restart, int freeze_reshape)
3467 {
3468 struct mdinfo *cc = NULL;
3469 int rv = restart;
3470 char last_devnm[32] = "";
3471
3472 /* component_size is not meaningful for a container,
3473 * so pass '0' meaning 'no change'
3474 */
3475 if (!restart &&
3476 reshape_super(st, 0, info->new_level,
3477 info->new_layout, info->new_chunk,
3478 info->array.raid_disks, info->delta_disks,
3479 backup_file, devname, APPLY_METADATA_CHANGES,
3480 verbose)) {
3481 unfreeze(st);
3482 return 1;
3483 }
3484
3485 sync_metadata(st);
3486
3487 /* ping monitor to be sure that update is on disk
3488 */
3489 ping_monitor(container);
3490
3491 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3492 if (continue_via_systemd(container))
3493 return 0;
3494
3495 switch (forked ? 0 : fork()) {
3496 case -1: /* error */
3497 perror("Cannot fork to complete reshape\n");
3498 unfreeze(st);
3499 return 1;
3500 default: /* parent */
3501 if (!freeze_reshape)
3502 printf("%s: multi-array reshape continues in background\n", Name);
3503 return 0;
3504 case 0: /* child */
3505 map_fork();
3506 break;
3507 }
3508
3509 /* close unused handle in child process
3510 */
3511 if (mdfd > -1)
3512 close(mdfd);
3513
3514 while(1) {
3515 /* For each member array with reshape_active,
3516 * we need to perform the reshape.
3517 * We pick the first array that needs reshaping and
3518 * reshape it. reshape_array() will re-read the metadata
3519 * so the next time through a different array should be
3520 * ready for reshape.
3521 * It is possible that the 'different' array will not
3522 * be assembled yet. In that case we simple exit.
3523 * When it is assembled, the mdadm which assembles it
3524 * will take over the reshape.
3525 */
3526 struct mdinfo *content;
3527 int fd;
3528 struct mdstat_ent *mdstat;
3529 char *adev;
3530 int devid;
3531
3532 sysfs_free(cc);
3533
3534 cc = st->ss->container_content(st, NULL);
3535
3536 for (content = cc; content ; content = content->next) {
3537 char *subarray;
3538 if (!content->reshape_active)
3539 continue;
3540
3541 subarray = strchr(content->text_version+1, '/')+1;
3542 mdstat = mdstat_by_subdev(subarray, container);
3543 if (!mdstat)
3544 continue;
3545 if (mdstat->active == 0) {
3546 pr_err("Skipping inactive array %s.\n",
3547 mdstat->devnm);
3548 free_mdstat(mdstat);
3549 mdstat = NULL;
3550 continue;
3551 }
3552 break;
3553 }
3554 if (!content)
3555 break;
3556
3557 devid = devnm2devid(mdstat->devnm);
3558 adev = map_dev(major(devid), minor(devid), 0);
3559 if (!adev)
3560 adev = content->text_version;
3561
3562 fd = open_dev(mdstat->devnm);
3563 if (fd < 0) {
3564 pr_err("Device %s cannot be opened for reshape.\n", adev);
3565 break;
3566 }
3567
3568 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3569 /* Do not allow for multiple reshape_array() calls for
3570 * the same array.
3571 * It can happen when reshape_array() returns without
3572 * error, when reshape is not finished (wrong reshape
3573 * starting/continuation conditions). Mdmon doesn't
3574 * switch to next array in container and reentry
3575 * conditions for the same array occur.
3576 * This is possibly interim until the behaviour of
3577 * reshape_array is resolved().
3578 */
3579 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3580 close(fd);
3581 break;
3582 }
3583 strcpy(last_devnm, mdstat->devnm);
3584
3585 sysfs_init(content, fd, mdstat->devnm);
3586
3587 if (mdmon_running(container))
3588 flush_mdmon(container);
3589
3590 rv = reshape_array(container, fd, adev, st,
3591 content, force, NULL, INVALID_SECTORS,
3592 backup_file, verbose, 1, restart,
3593 freeze_reshape);
3594 close(fd);
3595
3596 if (freeze_reshape) {
3597 sysfs_free(cc);
3598 exit(0);
3599 }
3600
3601 restart = 0;
3602 if (rv)
3603 break;
3604
3605 if (mdmon_running(container))
3606 flush_mdmon(container);
3607 }
3608 if (!rv)
3609 unfreeze(st);
3610 sysfs_free(cc);
3611 exit(0);
3612 }
3613
3614 /*
3615 * We run a child process in the background which performs the following
3616 * steps:
3617 * - wait for resync to reach a certain point
3618 * - suspend io to the following section
3619 * - backup that section
3620 * - allow resync to proceed further
3621 * - resume io
3622 * - discard the backup.
3623 *
3624 * When are combined in slightly different ways in the three cases.
3625 * Grow:
3626 * - suspend/backup/allow/wait/resume/discard
3627 * Shrink:
3628 * - allow/wait/suspend/backup/allow/wait/resume/discard
3629 * same-size:
3630 * - wait/resume/discard/suspend/backup/allow
3631 *
3632 * suspend/backup/allow always come together
3633 * wait/resume/discard do too.
3634 * For the same-size case we have two backups to improve flow.
3635 *
3636 */
3637
3638 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3639 unsigned long long backup_point,
3640 unsigned long long wait_point,
3641 unsigned long long *suspend_point,
3642 unsigned long long *reshape_completed, int *frozen)
3643 {
3644 /* This function is called repeatedly by the reshape manager.
3645 * It determines how much progress can safely be made and allows
3646 * that progress.
3647 * - 'info' identifies the array and particularly records in
3648 * ->reshape_progress the metadata's knowledge of progress
3649 * This is a sector offset from the start of the array
3650 * of the next array block to be relocated. This number
3651 * may increase from 0 or decrease from array_size, depending
3652 * on the type of reshape that is happening.
3653 * Note that in contrast, 'sync_completed' is a block count of the
3654 * reshape so far. It gives the distance between the start point
3655 * (head or tail of device) and the next place that data will be
3656 * written. It always increases.
3657 * - 'reshape' is the structure created by analyse_change
3658 * - 'backup_point' shows how much the metadata manager has backed-up
3659 * data. For reshapes with increasing progress, it is the next address
3660 * to be backed up, previous addresses have been backed-up. For
3661 * decreasing progress, it is the earliest address that has been
3662 * backed up - later address are also backed up.
3663 * So addresses between reshape_progress and backup_point are
3664 * backed up providing those are in the 'correct' order.
3665 * - 'wait_point' is an array address. When reshape_completed
3666 * passes this point, progress_reshape should return. It might
3667 * return earlier if it determines that ->reshape_progress needs
3668 * to be updated or further backup is needed.
3669 * - suspend_point is maintained by progress_reshape and the caller
3670 * should not touch it except to initialise to zero.
3671 * It is an array address and it only increases in 2.6.37 and earlier.
3672 * This makes it difficult to handle reducing reshapes with
3673 * external metadata.
3674 * However: it is similar to backup_point in that it records the
3675 * other end of a suspended region from reshape_progress.
3676 * it is moved to extend the region that is safe to backup and/or
3677 * reshape
3678 * - reshape_completed is read from sysfs and returned. The caller
3679 * should copy this into ->reshape_progress when it has reason to
3680 * believe that the metadata knows this, and any backup outside this
3681 * has been erased.
3682 *
3683 * Return value is:
3684 * 1 if more data from backup_point - but only as far as suspend_point,
3685 * should be backed up
3686 * 0 if things are progressing smoothly
3687 * -1 if the reshape is finished because it is all done,
3688 * -2 if the reshape is finished due to an error.
3689 */
3690
3691 int advancing = (reshape->after.data_disks
3692 >= reshape->before.data_disks);
3693 unsigned long long need_backup; /* All data between start of array and
3694 * here will at some point need to
3695 * be backed up.
3696 */
3697 unsigned long long read_offset, write_offset;
3698 unsigned long long write_range;
3699 unsigned long long max_progress, target, completed;
3700 unsigned long long array_size = (info->component_size
3701 * reshape->before.data_disks);
3702 int fd;
3703 char buf[20];
3704
3705 /* First, we unsuspend any region that is now known to be safe.
3706 * If suspend_point is on the 'wrong' side of reshape_progress, then
3707 * we don't have or need suspension at the moment. This is true for
3708 * native metadata when we don't need to back-up.
3709 */
3710 if (advancing) {
3711 if (info->reshape_progress <= *suspend_point)
3712 sysfs_set_num(info, NULL, "suspend_lo",
3713 info->reshape_progress);
3714 } else {
3715 /* Note: this won't work in 2.6.37 and before.
3716 * Something somewhere should make sure we don't need it!
3717 */
3718 if (info->reshape_progress >= *suspend_point)
3719 sysfs_set_num(info, NULL, "suspend_hi",
3720 info->reshape_progress);
3721 }
3722
3723 /* Now work out how far it is safe to progress.
3724 * If the read_offset for ->reshape_progress is less than
3725 * 'blocks' beyond the write_offset, we can only progress as far
3726 * as a backup.
3727 * Otherwise we can progress until the write_offset for the new location
3728 * reaches (within 'blocks' of) the read_offset at the current location.
3729 * However that region must be suspended unless we are using native
3730 * metadata.
3731 * If we need to suspend more, we limit it to 128M per device, which is
3732 * rather arbitrary and should be some time-based calculation.
3733 */
3734 read_offset = info->reshape_progress / reshape->before.data_disks;
3735 write_offset = info->reshape_progress / reshape->after.data_disks;
3736 write_range = info->new_chunk/512;
3737 if (reshape->before.data_disks == reshape->after.data_disks)
3738 need_backup = array_size;
3739 else
3740 need_backup = reshape->backup_blocks;
3741 if (advancing) {
3742 if (read_offset < write_offset + write_range)
3743 max_progress = backup_point;
3744 else
3745 max_progress =
3746 read_offset *
3747 reshape->after.data_disks;
3748 } else {
3749 if (read_offset > write_offset - write_range)
3750 /* Can only progress as far as has been backed up,
3751 * which must be suspended */
3752 max_progress = backup_point;
3753 else if (info->reshape_progress <= need_backup)
3754 max_progress = backup_point;
3755 else {
3756 if (info->array.major_version >= 0)
3757 /* Can progress until backup is needed */
3758 max_progress = need_backup;
3759 else {
3760 /* Can progress until metadata update is required */
3761 max_progress =
3762 read_offset *
3763 reshape->after.data_disks;
3764 /* but data must be suspended */
3765 if (max_progress < *suspend_point)
3766 max_progress = *suspend_point;
3767 }
3768 }
3769 }
3770
3771 /* We know it is safe to progress to 'max_progress' providing
3772 * it is suspended or we are using native metadata.
3773 * Consider extending suspend_point 128M per device if it
3774 * is less than 64M per device beyond reshape_progress.
3775 * But always do a multiple of 'blocks'
3776 * FIXME this is too big - it takes to long to complete
3777 * this much.
3778 */
3779 target = 64*1024*2 * min(reshape->before.data_disks,
3780 reshape->after.data_disks);
3781 target /= reshape->backup_blocks;
3782 if (target < 2)
3783 target = 2;
3784 target *= reshape->backup_blocks;
3785
3786 /* For externally managed metadata we always need to suspend IO to
3787 * the area being reshaped so we regularly push suspend_point forward.
3788 * For native metadata we only need the suspend if we are going to do
3789 * a backup.
3790 */
3791 if (advancing) {
3792 if ((need_backup > info->reshape_progress
3793 || info->array.major_version < 0) &&
3794 *suspend_point < info->reshape_progress + target) {
3795 if (need_backup < *suspend_point + 2 * target)
3796 *suspend_point = need_backup;
3797 else if (*suspend_point + 2 * target < array_size)
3798 *suspend_point += 2 * target;
3799 else
3800 *suspend_point = array_size;
3801 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3802 if (max_progress > *suspend_point)
3803 max_progress = *suspend_point;
3804 }
3805 } else {
3806 if (info->array.major_version >= 0) {
3807 /* Only need to suspend when about to backup */
3808 if (info->reshape_progress < need_backup * 2 &&
3809 *suspend_point > 0) {
3810 *suspend_point = 0;
3811 sysfs_set_num(info, NULL, "suspend_lo", 0);
3812 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3813 }
3814 } else {
3815 /* Need to suspend continually */
3816 if (info->reshape_progress < *suspend_point)
3817 *suspend_point = info->reshape_progress;
3818 if (*suspend_point + target < info->reshape_progress)
3819 /* No need to move suspend region yet */;
3820 else {
3821 if (*suspend_point >= 2 * target)
3822 *suspend_point -= 2 * target;
3823 else
3824 *suspend_point = 0;
3825 sysfs_set_num(info, NULL, "suspend_lo",
3826 *suspend_point);
3827 }
3828 if (max_progress < *suspend_point)
3829 max_progress = *suspend_point;
3830 }
3831 }
3832
3833 /* now set sync_max to allow that progress. sync_max, like
3834 * sync_completed is a count of sectors written per device, so
3835 * we find the difference between max_progress and the start point,
3836 * and divide that by after.data_disks to get a sync_max
3837 * number.
3838 * At the same time we convert wait_point to a similar number
3839 * for comparing against sync_completed.
3840 */
3841 /* scale down max_progress to per_disk */
3842 max_progress /= reshape->after.data_disks;
3843 /* Round to chunk size as some kernels give an erroneously high number */
3844 max_progress /= info->new_chunk/512;
3845 max_progress *= info->new_chunk/512;
3846 /* And round to old chunk size as the kernel wants that */
3847 max_progress /= info->array.chunk_size/512;
3848 max_progress *= info->array.chunk_size/512;
3849 /* Limit progress to the whole device */
3850 if (max_progress > info->component_size)
3851 max_progress = info->component_size;
3852 wait_point /= reshape->after.data_disks;
3853 if (!advancing) {
3854 /* switch from 'device offset' to 'processed block count' */
3855 max_progress = info->component_size - max_progress;
3856 wait_point = info->component_size - wait_point;
3857 }
3858
3859 if (!*frozen)
3860 sysfs_set_num(info, NULL, "sync_max", max_progress);
3861
3862 /* Now wait. If we have already reached the point that we were
3863 * asked to wait to, don't wait at all, else wait for any change.
3864 * We need to select on 'sync_completed' as that is the place that
3865 * notifications happen, but we are really interested in
3866 * 'reshape_position'
3867 */
3868 fd = sysfs_get_fd(info, NULL, "sync_completed");
3869 if (fd < 0)
3870 goto check_progress;
3871
3872 if (sysfs_fd_get_ll(fd, &completed) < 0)
3873 goto check_progress;
3874
3875 while (completed < max_progress && completed < wait_point) {
3876 /* Check that sync_action is still 'reshape' to avoid
3877 * waiting forever on a dead array
3878 */
3879 char action[20];
3880 if (sysfs_get_str(info, NULL, "sync_action",
3881 action, 20) <= 0 ||
3882 strncmp(action, "reshape", 7) != 0)
3883 break;
3884 /* Some kernels reset 'sync_completed' to zero
3885 * before setting 'sync_action' to 'idle'.
3886 * So we need these extra tests.
3887 */
3888 if (completed == 0 && advancing
3889 && strncmp(action, "idle", 4) == 0
3890 && info->reshape_progress > 0)
3891 break;
3892 if (completed == 0 && !advancing
3893 && strncmp(action, "idle", 4) == 0
3894 && info->reshape_progress < (info->component_size
3895 * reshape->after.data_disks))
3896 break;
3897 sysfs_wait(fd, NULL);
3898 if (sysfs_fd_get_ll(fd, &completed) < 0)
3899 goto check_progress;
3900 }
3901 /* Some kernels reset 'sync_completed' to zero,
3902 * we need to have real point we are in md.
3903 * So in that case, read 'reshape_position' from sysfs.
3904 */
3905 if (completed == 0) {
3906 unsigned long long reshapep;
3907 char action[20];
3908 if (sysfs_get_str(info, NULL, "sync_action",
3909 action, 20) > 0 &&
3910 strncmp(action, "idle", 4) == 0 &&
3911 sysfs_get_ll(info, NULL,
3912 "reshape_position", &reshapep) == 0)
3913 *reshape_completed = reshapep;
3914 } else {
3915 /* some kernels can give an incorrectly high
3916 * 'completed' number, so round down */
3917 completed /= (info->new_chunk/512);
3918 completed *= (info->new_chunk/512);
3919 /* Convert 'completed' back in to a 'progress' number */
3920 completed *= reshape->after.data_disks;
3921 if (!advancing)
3922 completed = (info->component_size
3923 * reshape->after.data_disks
3924 - completed);
3925 *reshape_completed = completed;
3926 }
3927
3928 close(fd);
3929
3930 /* We return the need_backup flag. Caller will decide
3931 * how much - a multiple of ->backup_blocks up to *suspend_point
3932 */
3933 if (advancing)
3934 return need_backup > info->reshape_progress;
3935 else
3936 return need_backup >= info->reshape_progress;
3937
3938 check_progress:
3939 /* if we couldn't read a number from sync_completed, then
3940 * either the reshape did complete, or it aborted.
3941 * We can tell which by checking for 'none' in reshape_position.
3942 * If it did abort, then it might immediately restart if it
3943 * it was just a device failure that leaves us degraded but
3944 * functioning.
3945 */
3946 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3947 || strncmp(buf, "none", 4) != 0) {
3948 /* The abort might only be temporary. Wait up to 10
3949 * seconds for fd to contain a valid number again.
3950 */
3951 int wait = 10000;
3952 int rv = -2;
3953 unsigned long long new_sync_max;
3954 while (fd >= 0 && rv < 0 && wait > 0) {
3955 if (sysfs_wait(fd, &wait) != 1)
3956 break;
3957 switch (sysfs_fd_get_ll(fd, &completed)) {
3958 case 0:
3959 /* all good again */
3960 rv = 1;
3961 /* If "sync_max" is no longer max_progress
3962 * we need to freeze things
3963 */
3964 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3965 *frozen = (new_sync_max != max_progress);
3966 break;
3967 case -2: /* read error - abort */
3968 wait = 0;
3969 break;
3970 }
3971 }
3972 if (fd >= 0)
3973 close(fd);
3974 return rv; /* abort */
3975 } else {
3976 /* Maybe racing with array shutdown - check state */
3977 if (fd >= 0)
3978 close(fd);
3979 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3980 || strncmp(buf, "inactive", 8) == 0
3981 || strncmp(buf, "clear",5) == 0)
3982 return -2; /* abort */
3983 return -1; /* complete */
3984 }
3985 }
3986
3987 /* FIXME return status is never checked */
3988 static int grow_backup(struct mdinfo *sra,
3989 unsigned long long offset, /* per device */
3990 unsigned long stripes, /* per device, in old chunks */
3991 int *sources, unsigned long long *offsets,
3992 int disks, int chunk, int level, int layout,
3993 int dests, int *destfd, unsigned long long *destoffsets,
3994 int part, int *degraded,
3995 char *buf)
3996 {
3997 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3998 * to storage 'destfd' (offset 'destoffsets'), after first
3999 * suspending IO. Then allow resync to continue
4000 * over the suspended section.
4001 * Use part 'part' of the backup-super-block.
4002 */
4003 int odata = disks;
4004 int rv = 0;
4005 int i;
4006 unsigned long long ll;
4007 int new_degraded;
4008 //printf("offset %llu\n", offset);
4009 if (level >= 4)
4010 odata--;
4011 if (level == 6)
4012 odata--;
4013
4014 /* Check that array hasn't become degraded, else we might backup the wrong data */
4015 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4016 return -1; /* FIXME this error is ignored */
4017 new_degraded = (int)ll;
4018 if (new_degraded != *degraded) {
4019 /* check each device to ensure it is still working */
4020 struct mdinfo *sd;
4021 for (sd = sra->devs ; sd ; sd = sd->next) {
4022 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4023 continue;
4024 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4025 char sbuf[20];
4026 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
4027 strstr(sbuf, "faulty") ||
4028 strstr(sbuf, "in_sync") == NULL) {
4029 /* this device is dead */
4030 sd->disk.state = (1<<MD_DISK_FAULTY);
4031 if (sd->disk.raid_disk >= 0 &&
4032 sources[sd->disk.raid_disk] >= 0) {
4033 close(sources[sd->disk.raid_disk]);
4034 sources[sd->disk.raid_disk] = -1;
4035 }
4036 }
4037 }
4038 }
4039 *degraded = new_degraded;
4040 }
4041 if (part) {
4042 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4043 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4044 } else {
4045 bsb.arraystart = __cpu_to_le64(offset * odata);
4046 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4047 }
4048 if (part)
4049 bsb.magic[15] = '2';
4050 for (i = 0; i < dests; i++)
4051 if (part)
4052 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4053 else
4054 lseek64(destfd[i], destoffsets[i], 0);
4055
4056 rv = save_stripes(sources, offsets,
4057 disks, chunk, level, layout,
4058 dests, destfd,
4059 offset*512*odata, stripes * chunk * odata,
4060 buf);
4061
4062 if (rv)
4063 return rv;
4064 bsb.mtime = __cpu_to_le64(time(0));
4065 for (i = 0; i < dests; i++) {
4066 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4067
4068 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4069 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4070 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4071 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4072
4073 rv = -1;
4074 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
4075 != destoffsets[i] - 4096)
4076 break;
4077 if (write(destfd[i], &bsb, 512) != 512)
4078 break;
4079 if (destoffsets[i] > 4096) {
4080 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4081 destoffsets[i]+stripes*chunk*odata)
4082 break;
4083 if (write(destfd[i], &bsb, 512) != 512)
4084 break;
4085 }
4086 fsync(destfd[i]);
4087 rv = 0;
4088 }
4089
4090 return rv;
4091 }
4092
4093 /* in 2.6.30, the value reported by sync_completed can be
4094 * less that it should be by one stripe.
4095 * This only happens when reshape hits sync_max and pauses.
4096 * So allow wait_backup to either extent sync_max further
4097 * than strictly necessary, or return before the
4098 * sync has got quite as far as we would really like.
4099 * This is what 'blocks2' is for.
4100 * The various caller give appropriate values so that
4101 * every works.
4102 */
4103 /* FIXME return value is often ignored */
4104 static int forget_backup(int dests, int *destfd,
4105 unsigned long long *destoffsets,
4106 int part)
4107 {
4108 /*
4109 * Erase backup 'part' (which is 0 or 1)
4110 */
4111 int i;
4112 int rv;
4113
4114 if (part) {
4115 bsb.arraystart2 = __cpu_to_le64(0);
4116 bsb.length2 = __cpu_to_le64(0);
4117 } else {
4118 bsb.arraystart = __cpu_to_le64(0);
4119 bsb.length = __cpu_to_le64(0);
4120 }
4121 bsb.mtime = __cpu_to_le64(time(0));
4122 rv = 0;
4123 for (i = 0; i < dests; i++) {
4124 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4125 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4126 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4127 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4128 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4129 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4130 destoffsets[i]-4096)
4131 rv = -1;
4132 if (rv == 0 &&
4133 write(destfd[i], &bsb, 512) != 512)
4134 rv = -1;
4135 fsync(destfd[i]);
4136 }
4137 return rv;
4138 }
4139
4140 static void fail(char *msg)
4141 {
4142 int rv;
4143 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4144 rv |= (write(2, "\n", 1) != 1);
4145 exit(rv ? 1 : 2);
4146 }
4147
4148 static char *abuf, *bbuf;
4149 static unsigned long long abuflen;
4150 static void validate(int afd, int bfd, unsigned long long offset)
4151 {
4152 /* check that the data in the backup against the array.
4153 * This is only used for regression testing and should not
4154 * be used while the array is active
4155 */
4156 if (afd < 0)
4157 return;
4158 lseek64(bfd, offset - 4096, 0);
4159 if (read(bfd, &bsb2, 512) != 512)
4160 fail("cannot read bsb");
4161 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4162 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4163 fail("first csum bad");
4164 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4165 fail("magic is bad");
4166 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4167 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4168 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4169 fail("second csum bad");
4170
4171 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4172 fail("devstart is wrong");
4173
4174 if (bsb2.length) {
4175 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4176
4177 if (abuflen < len) {
4178 free(abuf);
4179 free(bbuf);
4180 abuflen = len;
4181 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4182 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4183 abuflen = 0;
4184 /* just stop validating on mem-alloc failure */
4185 return;
4186 }
4187 }
4188
4189 lseek64(bfd, offset, 0);
4190 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4191 //printf("len %llu\n", len);
4192 fail("read first backup failed");
4193 }
4194 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4195 if ((unsigned long long)read(afd, abuf, len) != len)
4196 fail("read first from array failed");
4197 if (memcmp(bbuf, abuf, len) != 0) {
4198 #if 0
4199 int i;
4200 printf("offset=%llu len=%llu\n",
4201 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4202 for (i=0; i<len; i++)
4203 if (bbuf[i] != abuf[i]) {
4204 printf("first diff byte %d\n", i);
4205 break;
4206 }
4207 #endif
4208 fail("data1 compare failed");
4209 }
4210 }
4211 if (bsb2.length2) {
4212 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4213
4214 if (abuflen < len) {
4215 free(abuf);
4216 free(bbuf);
4217 abuflen = len;
4218 abuf = xmalloc(abuflen);
4219 bbuf = xmalloc(abuflen);
4220 }
4221
4222 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4223 if ((unsigned long long)read(bfd, bbuf, len) != len)
4224 fail("read second backup failed");
4225 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4226 if ((unsigned long long)read(afd, abuf, len) != len)
4227 fail("read second from array failed");
4228 if (memcmp(bbuf, abuf, len) != 0)
4229 fail("data2 compare failed");
4230 }
4231 }
4232
4233 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4234 struct supertype *st, unsigned long blocks,
4235 int *fds, unsigned long long *offsets,
4236 int dests, int *destfd, unsigned long long *destoffsets)
4237 {
4238 /* Monitor a reshape where backup is being performed using
4239 * 'native' mechanism - either to a backup file, or
4240 * to some space in a spare.
4241 */
4242 char *buf;
4243 int degraded = -1;
4244 unsigned long long speed;
4245 unsigned long long suspend_point, array_size;
4246 unsigned long long backup_point, wait_point;
4247 unsigned long long reshape_completed;
4248 int done = 0;
4249 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4250 int part = 0; /* The next part of the backup area to fill. It may already
4251 * be full, so we need to check */
4252 int level = reshape->level;
4253 int layout = reshape->before.layout;
4254 int data = reshape->before.data_disks;
4255 int disks = reshape->before.data_disks + reshape->parity;
4256 int chunk = sra->array.chunk_size;
4257 struct mdinfo *sd;
4258 unsigned long stripes;
4259 int uuid[4];
4260 int frozen = 0;
4261
4262 /* set up the backup-super-block. This requires the
4263 * uuid from the array.
4264 */
4265 /* Find a superblock */
4266 for (sd = sra->devs; sd; sd = sd->next) {
4267 char *dn;
4268 int devfd;
4269 int ok;
4270 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4271 continue;
4272 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4273 devfd = dev_open(dn, O_RDONLY);
4274 if (devfd < 0)
4275 continue;
4276 ok = st->ss->load_super(st, devfd, NULL);
4277 close(devfd);
4278 if (ok == 0)
4279 break;
4280 }
4281 if (!sd) {
4282 pr_err("Cannot find a superblock\n");
4283 return 0;
4284 }
4285
4286 memset(&bsb, 0, 512);
4287 memcpy(bsb.magic, "md_backup_data-1", 16);
4288 st->ss->uuid_from_super(st, uuid);
4289 memcpy(bsb.set_uuid, uuid, 16);
4290 bsb.mtime = __cpu_to_le64(time(0));
4291 bsb.devstart2 = blocks;
4292
4293 stripes = blocks / (sra->array.chunk_size/512) /
4294 reshape->before.data_disks;
4295
4296 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4297 /* Don't start the 'reshape' */
4298 return 0;
4299 if (reshape->before.data_disks == reshape->after.data_disks) {
4300 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4301 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4302 }
4303
4304 if (increasing) {
4305 array_size = sra->component_size * reshape->after.data_disks;
4306 backup_point = sra->reshape_progress;
4307 suspend_point = 0;
4308 } else {
4309 array_size = sra->component_size * reshape->before.data_disks;
4310 backup_point = reshape->backup_blocks;
4311 suspend_point = array_size;
4312 }
4313
4314 while (!done) {
4315 int rv;
4316
4317 /* Want to return as soon the oldest backup slot can
4318 * be released as that allows us to start backing up
4319 * some more, providing suspend_point has been
4320 * advanced, which it should have.
4321 */
4322 if (increasing) {
4323 wait_point = array_size;
4324 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4325 wait_point = (__le64_to_cpu(bsb.arraystart) +
4326 __le64_to_cpu(bsb.length));
4327 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4328 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4329 __le64_to_cpu(bsb.length2));
4330 } else {
4331 wait_point = 0;
4332 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4333 wait_point = __le64_to_cpu(bsb.arraystart);
4334 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4335 wait_point = __le64_to_cpu(bsb.arraystart2);
4336 }
4337
4338 reshape_completed = sra->reshape_progress;
4339 rv = progress_reshape(sra, reshape,
4340 backup_point, wait_point,
4341 &suspend_point, &reshape_completed,
4342 &frozen);
4343 /* external metadata would need to ping_monitor here */
4344 sra->reshape_progress = reshape_completed;
4345
4346 /* Clear any backup region that is before 'here' */
4347 if (increasing) {
4348 if (__le64_to_cpu(bsb.length) > 0 &&
4349 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4350 __le64_to_cpu(bsb.length)))
4351 forget_backup(dests, destfd,
4352 destoffsets, 0);
4353 if (__le64_to_cpu(bsb.length2) > 0 &&
4354 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4355 __le64_to_cpu(bsb.length2)))
4356 forget_backup(dests, destfd,
4357 destoffsets, 1);
4358 } else {
4359 if (__le64_to_cpu(bsb.length) > 0 &&
4360 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4361 forget_backup(dests, destfd,
4362 destoffsets, 0);
4363 if (__le64_to_cpu(bsb.length2) > 0 &&
4364 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4365 forget_backup(dests, destfd,
4366 destoffsets, 1);
4367 }
4368 if (sigterm)
4369 rv = -2;
4370 if (rv < 0) {
4371 if (rv == -1)
4372 done = 1;
4373 break;
4374 }
4375 if (rv == 0 && increasing && !st->ss->external) {
4376 /* No longer need to monitor this reshape */
4377 sysfs_set_str(sra, NULL, "sync_max", "max");
4378 done = 1;
4379 break;
4380 }
4381
4382 while (rv) {
4383 unsigned long long offset;
4384 unsigned long actual_stripes;
4385 /* Need to backup some data.
4386 * If 'part' is not used and the desired
4387 * backup size is suspended, do a backup,
4388 * then consider the next part.
4389 */
4390 /* Check that 'part' is unused */
4391 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4392 break;
4393 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4394 break;
4395
4396 offset = backup_point / data;
4397 actual_stripes = stripes;
4398 if (increasing) {
4399 if (offset + actual_stripes * (chunk/512) >
4400 sra->component_size)
4401 actual_stripes = ((sra->component_size - offset)
4402 / (chunk/512));
4403 if (offset + actual_stripes * (chunk/512) >
4404 suspend_point/data)
4405 break;
4406 } else {
4407 if (offset < actual_stripes * (chunk/512))
4408 actual_stripes = offset / (chunk/512);
4409 offset -= actual_stripes * (chunk/512);
4410 if (offset < suspend_point/data)
4411 break;
4412 }
4413 if (actual_stripes == 0)
4414 break;
4415 grow_backup(sra, offset, actual_stripes,
4416 fds, offsets,
4417 disks, chunk, level, layout,
4418 dests, destfd, destoffsets,
4419 part, &degraded, buf);
4420 validate(afd, destfd[0], destoffsets[0]);
4421 /* record where 'part' is up to */
4422 part = !part;
4423 if (increasing)
4424 backup_point += actual_stripes * (chunk/512) * data;
4425 else
4426 backup_point -= actual_stripes * (chunk/512) * data;
4427 }
4428 }
4429
4430 /* FIXME maybe call progress_reshape one more time instead */
4431 /* remove any remaining suspension */
4432 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4433 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4434 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4435 sysfs_set_num(sra, NULL, "sync_min", 0);
4436
4437 if (reshape->before.data_disks == reshape->after.data_disks)
4438 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4439 free(buf);
4440 return done;
4441 }
4442
4443 /*
4444 * If any spare contains md_back_data-1 which is recent wrt mtime,
4445 * write that data into the array and update the super blocks with
4446 * the new reshape_progress
4447 */
4448 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4449 char *backup_file, int verbose)
4450 {
4451 int i, j;
4452 int old_disks;
4453 unsigned long long *offsets;
4454 unsigned long long nstripe, ostripe;
4455 int ndata, odata;
4456
4457 odata = info->array.raid_disks - info->delta_disks - 1;
4458 if (info->array.level == 6) odata--; /* number of data disks */
4459 ndata = info->array.raid_disks - 1;
4460 if (info->new_level == 6) ndata--;
4461
4462 old_disks = info->array.raid_disks - info->delta_disks;
4463
4464 if (info->delta_disks <= 0)
4465 /* Didn't grow, so the backup file must have
4466 * been used
4467 */
4468 old_disks = cnt;
4469 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4470 struct mdinfo dinfo;
4471 int fd;
4472 int bsbsize;
4473 char *devname, namebuf[20];
4474 unsigned long long lo, hi;
4475
4476 /* This was a spare and may have some saved data on it.
4477 * Load the superblock, find and load the
4478 * backup_super_block.
4479 * If either fail, go on to next device.
4480 * If the backup contains no new info, just return
4481 * else restore data and update all superblocks
4482 */
4483 if (i == old_disks-1) {
4484 fd = open(backup_file, O_RDONLY);
4485 if (fd<0) {
4486 pr_err("backup file %s inaccessible: %s\n",
4487 backup_file, strerror(errno));
4488 continue;
4489 }
4490 devname = backup_file;
4491 } else {
4492 fd = fdlist[i];
4493 if (fd < 0)
4494 continue;
4495 if (st->ss->load_super(st, fd, NULL))
4496 continue;
4497
4498 st->ss->getinfo_super(st, &dinfo, NULL);
4499 st->ss->free_super(st);
4500
4501 if (lseek64(fd,
4502 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4503 0) < 0) {
4504 pr_err("Cannot seek on device %d\n", i);
4505 continue; /* Cannot seek */
4506 }
4507 sprintf(namebuf, "device-%d", i);
4508 devname = namebuf;
4509 }
4510 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4511 if (verbose)
4512 pr_err("Cannot read from %s\n", devname);
4513 continue; /* Cannot read */
4514 }
4515 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4516 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4517 if (verbose)
4518 pr_err("No backup metadata on %s\n", devname);
4519 continue;
4520 }
4521 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4522 if (verbose)
4523 pr_err("Bad backup-metadata checksum on %s\n", devname);
4524 continue; /* bad checksum */
4525 }
4526 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4527 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4528 if (verbose)
4529 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4530 continue; /* Bad second checksum */
4531 }
4532 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4533 if (verbose)
4534 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4535 continue; /* Wrong uuid */
4536 }
4537
4538 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4539 * sometimes they aren't... So allow considerable flexability in matching, and allow
4540 * this test to be overridden by an environment variable.
4541 */
4542 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4543 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4544 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4545 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4546 (unsigned long)__le64_to_cpu(bsb.mtime),
4547 (unsigned long)info->array.utime);
4548 } else {
4549 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4550 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4551 continue; /* time stamp is too bad */
4552 }
4553 }
4554
4555 if (bsb.magic[15] == '1') {
4556 if (bsb.length == 0)
4557 continue;
4558 if (info->delta_disks >= 0) {
4559 /* reshape_progress is increasing */
4560 if (__le64_to_cpu(bsb.arraystart)
4561 + __le64_to_cpu(bsb.length)
4562 < info->reshape_progress) {
4563 nonew:
4564 if (verbose)
4565 pr_err("backup-metadata found on %s but is not needed\n", devname);
4566 continue; /* No new data here */
4567 }
4568 } else {
4569 /* reshape_progress is decreasing */
4570 if (__le64_to_cpu(bsb.arraystart) >=
4571 info->reshape_progress)
4572 goto nonew; /* No new data here */
4573 }
4574 } else {
4575 if (bsb.length == 0 && bsb.length2 == 0)
4576 continue;
4577 if (info->delta_disks >= 0) {
4578 /* reshape_progress is increasing */
4579 if ((__le64_to_cpu(bsb.arraystart)
4580 + __le64_to_cpu(bsb.length)
4581 < info->reshape_progress)
4582 &&
4583 (__le64_to_cpu(bsb.arraystart2)
4584 + __le64_to_cpu(bsb.length2)
4585 < info->reshape_progress))
4586 goto nonew; /* No new data here */
4587 } else {
4588 /* reshape_progress is decreasing */
4589 if (__le64_to_cpu(bsb.arraystart) >=
4590 info->reshape_progress &&
4591 __le64_to_cpu(bsb.arraystart2) >=
4592 info->reshape_progress)
4593 goto nonew; /* No new data here */
4594 }
4595 }
4596 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4597 second_fail:
4598 if (verbose)
4599 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4600 devname);
4601 continue; /* Cannot seek */
4602 }
4603 /* There should be a duplicate backup superblock 4k before here */
4604 if (lseek64(fd, -4096, 1) < 0 ||
4605 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4606 goto second_fail; /* Cannot find leading superblock */
4607 if (bsb.magic[15] == '1')
4608 bsbsize = offsetof(struct mdp_backup_super, pad1);
4609 else
4610 bsbsize = offsetof(struct mdp_backup_super, pad);
4611 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4612 goto second_fail; /* Cannot find leading superblock */
4613
4614 /* Now need the data offsets for all devices. */
4615 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4616 for(j=0; j<info->array.raid_disks; j++) {
4617 if (fdlist[j] < 0)
4618 continue;
4619 if (st->ss->load_super(st, fdlist[j], NULL))
4620 /* FIXME should be this be an error */
4621 continue;
4622 st->ss->getinfo_super(st, &dinfo, NULL);
4623 st->ss->free_super(st);
4624 offsets[j] = dinfo.data_offset * 512;
4625 }
4626 printf("%s: restoring critical section\n", Name);
4627
4628 if (restore_stripes(fdlist, offsets,
4629 info->array.raid_disks,
4630 info->new_chunk,
4631 info->new_level,
4632 info->new_layout,
4633 fd, __le64_to_cpu(bsb.devstart)*512,
4634 __le64_to_cpu(bsb.arraystart)*512,
4635 __le64_to_cpu(bsb.length)*512, NULL)) {
4636 /* didn't succeed, so giveup */
4637 if (verbose)
4638 pr_err("Error restoring backup from %s\n",
4639 devname);
4640 free(offsets);
4641 return 1;
4642 }
4643
4644 if (bsb.magic[15] == '2' &&
4645 restore_stripes(fdlist, offsets,
4646 info->array.raid_disks,
4647 info->new_chunk,
4648 info->new_level,
4649 info->new_layout,
4650 fd, __le64_to_cpu(bsb.devstart)*512 +
4651 __le64_to_cpu(bsb.devstart2)*512,
4652 __le64_to_cpu(bsb.arraystart2)*512,
4653 __le64_to_cpu(bsb.length2)*512, NULL)) {
4654 /* didn't succeed, so giveup */
4655 if (verbose)
4656 pr_err("Error restoring second backup from %s\n",
4657 devname);
4658 free(offsets);
4659 return 1;
4660 }
4661
4662 free(offsets);
4663
4664 /* Ok, so the data is restored. Let's update those superblocks. */
4665
4666 lo = hi = 0;
4667 if (bsb.length) {
4668 lo = __le64_to_cpu(bsb.arraystart);
4669 hi = lo + __le64_to_cpu(bsb.length);
4670 }
4671 if (bsb.magic[15] == '2' && bsb.length2) {
4672 unsigned long long lo1, hi1;
4673 lo1 = __le64_to_cpu(bsb.arraystart2);
4674 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4675 if (lo == hi) {
4676 lo = lo1;
4677 hi = hi1;
4678 } else if (lo < lo1)
4679 hi = hi1;
4680 else
4681 lo = lo1;
4682 }
4683 if (lo < hi &&
4684 (info->reshape_progress < lo ||
4685 info->reshape_progress > hi))
4686 /* backup does not affect reshape_progress*/ ;
4687 else if (info->delta_disks >= 0) {
4688 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4689 __le64_to_cpu(bsb.length);
4690 if (bsb.magic[15] == '2') {
4691 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4692 __le64_to_cpu(bsb.length2);
4693 if (p2 > info->reshape_progress)
4694 info->reshape_progress = p2;
4695 }
4696 } else {
4697 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4698 if (bsb.magic[15] == '2') {
4699 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4700 if (p2 < info->reshape_progress)
4701 info->reshape_progress = p2;
4702 }
4703 }
4704 for (j=0; j<info->array.raid_disks; j++) {
4705 if (fdlist[j] < 0)
4706 continue;
4707 if (st->ss->load_super(st, fdlist[j], NULL))
4708 continue;
4709 st->ss->getinfo_super(st, &dinfo, NULL);
4710 dinfo.reshape_progress = info->reshape_progress;
4711 st->ss->update_super(st, &dinfo,
4712 "_reshape_progress",
4713 NULL,0, 0, NULL);
4714 st->ss->store_super(st, fdlist[j]);
4715 st->ss->free_super(st);
4716 }
4717 return 0;
4718 }
4719 /* Didn't find any backup data, try to see if any
4720 * was needed.
4721 */
4722 if (info->delta_disks < 0) {
4723 /* When shrinking, the critical section is at the end.
4724 * So see if we are before the critical section.
4725 */
4726 unsigned long long first_block;
4727 nstripe = ostripe = 0;
4728 first_block = 0;
4729 while (ostripe >= nstripe) {
4730 ostripe += info->array.chunk_size / 512;
4731 first_block = ostripe * odata;
4732 nstripe = first_block / ndata / (info->new_chunk/512) *
4733 (info->new_chunk/512);
4734 }
4735
4736 if (info->reshape_progress >= first_block)
4737 return 0;
4738 }
4739 if (info->delta_disks > 0) {
4740 /* See if we are beyond the critical section. */
4741 unsigned long long last_block;
4742 nstripe = ostripe = 0;
4743 last_block = 0;
4744 while (nstripe >= ostripe) {
4745 nstripe += info->new_chunk / 512;
4746 last_block = nstripe * ndata;
4747 ostripe = last_block / odata / (info->array.chunk_size/512) *
4748 (info->array.chunk_size/512);
4749 }
4750
4751 if (info->reshape_progress >= last_block)
4752 return 0;
4753 }
4754 /* needed to recover critical section! */
4755 if (verbose)
4756 pr_err("Failed to find backup of critical section\n");
4757 return 1;
4758 }
4759
4760 int Grow_continue_command(char *devname, int fd,
4761 char *backup_file, int verbose)
4762 {
4763 int ret_val = 0;
4764 struct supertype *st = NULL;
4765 struct mdinfo *content = NULL;
4766 struct mdinfo array;
4767 char *subarray = NULL;
4768 struct mdinfo *cc = NULL;
4769 struct mdstat_ent *mdstat = NULL;
4770 int cfd = -1;
4771 int fd2;
4772
4773 dprintf("Grow continue from command line called for %s\n",
4774 devname);
4775
4776 st = super_by_fd(fd, &subarray);
4777 if (!st || !st->ss) {
4778 pr_err("Unable to determine metadata format for %s\n",
4779 devname);
4780 return 1;
4781 }
4782 dprintf("Grow continue is run for ");
4783 if (st->ss->external == 0) {
4784 int d;
4785 dprintf_cont("native array (%s)\n", devname);
4786 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4787 pr_err("%s is not an active md array - aborting\n", devname);
4788 ret_val = 1;
4789 goto Grow_continue_command_exit;
4790 }
4791 content = &array;
4792 /* Need to load a superblock.
4793 * FIXME we should really get what we need from
4794 * sysfs
4795 */
4796 for (d = 0; d < MAX_DISKS; d++) {
4797 mdu_disk_info_t disk;
4798 char *dv;
4799 int err;
4800 disk.number = d;
4801 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4802 continue;
4803 if (disk.major == 0 && disk.minor == 0)
4804 continue;
4805 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4806 continue;
4807 dv = map_dev(disk.major, disk.minor, 1);
4808 if (!dv)
4809 continue;
4810 fd2 = dev_open(dv, O_RDONLY);
4811 if (fd2 < 0)
4812 continue;
4813 err = st->ss->load_super(st, fd2, NULL);
4814 close(fd2);
4815 if (err)
4816 continue;
4817 break;
4818 }
4819 if (d == MAX_DISKS) {
4820 pr_err("Unable to load metadata for %s\n",
4821 devname);
4822 ret_val = 1;
4823 goto Grow_continue_command_exit;
4824 }
4825 st->ss->getinfo_super(st, content, NULL);
4826 } else {
4827 char *container;
4828
4829 if (subarray) {
4830 dprintf_cont("subarray (%s)\n", subarray);
4831 container = st->container_devnm;
4832 cfd = open_dev_excl(st->container_devnm);
4833 } else {
4834 container = st->devnm;
4835 close(fd);
4836 cfd = open_dev_excl(st->devnm);
4837 dprintf_cont("container (%s)\n", container);
4838 fd = cfd;
4839 }
4840 if (cfd < 0) {
4841 pr_err("Unable to open container for %s\n", devname);
4842 ret_val = 1;
4843 goto Grow_continue_command_exit;
4844 }
4845
4846 /* find in container array under reshape
4847 */
4848 ret_val = st->ss->load_container(st, cfd, NULL);
4849 if (ret_val) {
4850 pr_err("Cannot read superblock for %s\n",
4851 devname);
4852 ret_val = 1;
4853 goto Grow_continue_command_exit;
4854 }
4855
4856 cc = st->ss->container_content(st, subarray);
4857 for (content = cc; content ; content = content->next) {
4858 char *array;
4859 int allow_reshape = 1;
4860
4861 if (content->reshape_active == 0)
4862 continue;
4863 /* The decision about array or container wide
4864 * reshape is taken in Grow_continue based
4865 * content->reshape_active state, therefore we
4866 * need to check_reshape based on
4867 * reshape_active and subarray name
4868 */
4869 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4870 allow_reshape = 0;
4871 if (content->reshape_active == CONTAINER_RESHAPE &&
4872 (content->array.state
4873 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4874 allow_reshape = 0;
4875
4876 if (!allow_reshape) {
4877 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
4878 devname, container);
4879 ret_val = 1;
4880 goto Grow_continue_command_exit;
4881 }
4882
4883 array = strchr(content->text_version+1, '/')+1;
4884 mdstat = mdstat_by_subdev(array, container);
4885 if (!mdstat)
4886 continue;
4887 if (mdstat->active == 0) {
4888 pr_err("Skipping inactive array %s.\n",
4889 mdstat->devnm);
4890 free_mdstat(mdstat);
4891 mdstat = NULL;
4892 continue;
4893 }
4894 break;
4895 }
4896 if (!content) {
4897 pr_err("Unable to determine reshaped array for %s\n", devname);
4898 ret_val = 1;
4899 goto Grow_continue_command_exit;
4900 }
4901 fd2 = open_dev(mdstat->devnm);
4902 if (fd2 < 0) {
4903 pr_err("cannot open (%s)\n", mdstat->devnm);
4904 ret_val = 1;
4905 goto Grow_continue_command_exit;
4906 }
4907
4908 sysfs_init(content, fd2, mdstat->devnm);
4909
4910 close(fd2);
4911
4912 /* start mdmon in case it is not running
4913 */
4914 if (!mdmon_running(container))
4915 start_mdmon(container);
4916 ping_monitor(container);
4917
4918 if (mdmon_running(container))
4919 st->update_tail = &st->updates;
4920 else {
4921 pr_err("No mdmon found. Grow cannot continue.\n");
4922 ret_val = 1;
4923 goto Grow_continue_command_exit;
4924 }
4925 }
4926
4927 /* verify that array under reshape is started from
4928 * correct position
4929 */
4930 if (verify_reshape_position(content, content->array.level) < 0) {
4931 ret_val = 1;
4932 goto Grow_continue_command_exit;
4933 }
4934
4935 /* continue reshape
4936 */
4937 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
4938
4939 Grow_continue_command_exit:
4940 if (cfd > -1)
4941 close(cfd);
4942 st->ss->free_super(st);
4943 free_mdstat(mdstat);
4944 sysfs_free(cc);
4945 free(subarray);
4946
4947 return ret_val;
4948 }
4949
4950 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4951 char *backup_file, int forked, int freeze_reshape)
4952 {
4953 int ret_val = 2;
4954
4955 if (!info->reshape_active)
4956 return ret_val;
4957
4958 if (st->ss->external) {
4959 int cfd = open_dev(st->container_devnm);
4960
4961 if (cfd < 0)
4962 return 1;
4963
4964 st->ss->load_container(st, cfd, st->container_devnm);
4965 close(cfd);
4966 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4967 st, info, 0, backup_file,
4968 0, forked,
4969 1 | info->reshape_active,
4970 freeze_reshape);
4971 } else
4972 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4973 NULL, INVALID_SECTORS,
4974 backup_file, 0, forked,
4975 1 | info->reshape_active,
4976 freeze_reshape);
4977
4978 return ret_val;
4979 }
4980
4981 char *make_backup(char *name)
4982 {
4983 char *base = "backup_file-";
4984 int len;
4985 char *fname;
4986
4987 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
4988 fname = xmalloc(len);
4989 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
4990 return fname;
4991 }
4992
4993 char *locate_backup(char *name)
4994 {
4995 char *fl = make_backup(name);
4996 struct stat stb;
4997
4998 if (stat(fl, &stb) == 0 &&
4999 S_ISREG(stb.st_mode))
5000 return fl;
5001
5002 free(fl);
5003 return NULL;
5004 }