]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Grow: Handle failure to load superblock in Grow_addbitmap()
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 struct stat stb;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 fstat(nfd, &stb);
149 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
150 pr_err("%s is not a block device!\n", newdev);
151 close(nfd);
152 free(st);
153 return 1;
154 }
155 /* now check out all the devices and make sure we can read the
156 * superblock */
157 for (d=0 ; d < info.array.raid_disks ; d++) {
158 mdu_disk_info_t disk;
159 char *dv;
160
161 st->ss->free_super(st);
162
163 disk.number = d;
164 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
165 pr_err("cannot get device detail for device %d\n",
166 d);
167 close(nfd);
168 free(st);
169 return 1;
170 }
171 dv = map_dev(disk.major, disk.minor, 1);
172 if (!dv) {
173 pr_err("cannot find device file for device %d\n",
174 d);
175 close(nfd);
176 free(st);
177 return 1;
178 }
179 fd2 = dev_open(dv, O_RDWR);
180 if (fd2 < 0) {
181 pr_err("cannot open device file %s\n", dv);
182 close(nfd);
183 free(st);
184 return 1;
185 }
186
187 if (st->ss->load_super(st, fd2, NULL)) {
188 pr_err("cannot find super block on %s\n", dv);
189 close(nfd);
190 close(fd2);
191 free(st);
192 return 1;
193 }
194 close(fd2);
195 }
196 /* Ok, looks good. Lets update the superblock and write it out to
197 * newdev.
198 */
199
200 info.disk.number = d;
201 info.disk.major = major(stb.st_rdev);
202 info.disk.minor = minor(stb.st_rdev);
203 info.disk.raid_disk = d;
204 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
205 st->ss->update_super(st, &info, "linear-grow-new", newdev,
206 0, 0, NULL);
207
208 if (st->ss->store_super(st, nfd)) {
209 pr_err("Cannot store new superblock on %s\n",
210 newdev);
211 close(nfd);
212 return 1;
213 }
214 close(nfd);
215
216 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
217 pr_err("Cannot add new disk to this array\n");
218 return 1;
219 }
220 /* Well, that seems to have worked.
221 * Now go through and update all superblocks
222 */
223
224 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
225 pr_err("cannot get array info for %s\n", devname);
226 return 1;
227 }
228
229 nd = d;
230 for (d=0 ; d < info.array.raid_disks ; d++) {
231 mdu_disk_info_t disk;
232 char *dv;
233
234 disk.number = d;
235 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
236 pr_err("cannot get device detail for device %d\n",
237 d);
238 return 1;
239 }
240 dv = map_dev(disk.major, disk.minor, 1);
241 if (!dv) {
242 pr_err("cannot find device file for device %d\n",
243 d);
244 return 1;
245 }
246 fd2 = dev_open(dv, O_RDWR);
247 if (fd2 < 0) {
248 pr_err("cannot open device file %s\n", dv);
249 return 1;
250 }
251 if (st->ss->load_super(st, fd2, NULL)) {
252 pr_err("cannot find super block on %s\n", dv);
253 close(fd);
254 return 1;
255 }
256 info.array.raid_disks = nd+1;
257 info.array.nr_disks = nd+1;
258 info.array.active_disks = nd+1;
259 info.array.working_disks = nd+1;
260
261 st->ss->update_super(st, &info, "linear-grow-update", dv,
262 0, 0, NULL);
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 int vers = md_get_version(fd);
292 unsigned long long bitmapsize, array_size;
293
294 if (vers < 9003) {
295 major = BITMAP_MAJOR_HOSTENDIAN;
296 pr_err("Warning - bitmaps created on this kernel are not portable\n"
297 " between different architectures. Consider upgrading the Linux kernel.\n");
298 }
299
300 /*
301 * We only ever get called if s->bitmap_file is != NULL, so this check
302 * is just here to quiet down static code checkers.
303 */
304 if (!s->bitmap_file)
305 return 1;
306
307 if (strcmp(s->bitmap_file, "clustered") == 0)
308 major = BITMAP_MAJOR_CLUSTERED;
309
310 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
311 if (errno == ENOMEM)
312 pr_err("Memory allocation failure.\n");
313 else
314 pr_err("bitmaps not supported by this kernel.\n");
315 return 1;
316 }
317 if (bmf.pathname[0]) {
318 if (strcmp(s->bitmap_file,"none")==0) {
319 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
320 pr_err("failed to remove bitmap %s\n",
321 bmf.pathname);
322 return 1;
323 }
324 return 0;
325 }
326 pr_err("%s already has a bitmap (%s)\n",
327 devname, bmf.pathname);
328 return 1;
329 }
330 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
331 pr_err("cannot get array status for %s\n", devname);
332 return 1;
333 }
334 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
335 if (strcmp(s->bitmap_file, "none")==0) {
336 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
337 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
338 if (array.state & (1<<MD_SB_CLUSTERED))
339 pr_err("failed to remove clustered bitmap.\n");
340 else
341 pr_err("failed to remove internal bitmap.\n");
342 return 1;
343 }
344 return 0;
345 }
346 pr_err("bitmap already present on %s\n", devname);
347 return 1;
348 }
349
350 if (strcmp(s->bitmap_file, "none") == 0) {
351 pr_err("no bitmap found on %s\n", devname);
352 return 1;
353 }
354 if (array.level <= 0) {
355 pr_err("Bitmaps not meaningful with level %s\n",
356 map_num(pers, array.level)?:"of this array");
357 return 1;
358 }
359 bitmapsize = array.size;
360 bitmapsize <<= 1;
361 if (get_dev_size(fd, NULL, &array_size) &&
362 array_size > (0x7fffffffULL<<9)) {
363 /* Array is big enough that we cannot trust array.size
364 * try other approaches
365 */
366 bitmapsize = get_component_size(fd);
367 }
368 if (bitmapsize == 0) {
369 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
370 return 1;
371 }
372
373 if (array.level == 10) {
374 int ncopies = (array.layout&255)*((array.layout>>8)&255);
375 bitmapsize = bitmapsize * array.raid_disks / ncopies;
376 }
377
378 st = super_by_fd(fd, &subarray);
379 if (!st) {
380 pr_err("Cannot understand version %d.%d\n",
381 array.major_version, array.minor_version);
382 return 1;
383 }
384 if (subarray) {
385 pr_err("Cannot add bitmaps to sub-arrays yet\n");
386 free(subarray);
387 free(st);
388 return 1;
389 }
390 if (strcmp(s->bitmap_file, "internal") == 0 ||
391 strcmp(s->bitmap_file, "clustered") == 0) {
392 int rv;
393 int d;
394 int offset_setable = 0;
395 struct mdinfo *mdi;
396 if (st->ss->add_internal_bitmap == NULL) {
397 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
398 return 1;
399 }
400 st->nodes = c->nodes;
401 st->cluster_name = c->homecluster;
402 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
403 if (mdi)
404 offset_setable = 1;
405 for (d=0; d< st->max_devs; d++) {
406 mdu_disk_info_t disk;
407 char *dv;
408 int fd2;
409
410 disk.number = d;
411 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
412 continue;
413 if (disk.major == 0 &&
414 disk.minor == 0)
415 continue;
416 if ((disk.state & (1<<MD_DISK_SYNC))==0)
417 continue;
418 dv = map_dev(disk.major, disk.minor, 1);
419 if (!dv)
420 continue;
421 fd2 = dev_open(dv, O_RDWR);
422 if (fd2 < 0)
423 continue;
424 rv = st->ss->load_super(st, fd2, NULL);
425 if (!rv) {
426 if (st->ss->add_internal_bitmap(
427 st, &s->bitmap_chunk, c->delay,
428 s->write_behind, bitmapsize,
429 offset_setable, major))
430 st->ss->write_bitmap(st, fd2, NodeNumUpdate);
431 else {
432 pr_err("failed to create internal bitmap - chunksize problem.\n");
433 close(fd2);
434 return 1;
435 }
436 } else {
437 pr_err("failed to load super-block.\n");
438 close(fd2);
439 return 1;
440 }
441 close(fd2);
442 }
443 if (offset_setable) {
444 st->ss->getinfo_super(st, mdi, NULL);
445 sysfs_init(mdi, fd, NULL);
446 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
447 mdi->bitmap_offset);
448 } else {
449 if (strcmp(s->bitmap_file, "clustered") == 0)
450 array.state |= (1<<MD_SB_CLUSTERED);
451 array.state |= (1<<MD_SB_BITMAP_PRESENT);
452 rv = ioctl(fd, SET_ARRAY_INFO, &array);
453 }
454 if (rv < 0) {
455 if (errno == EBUSY)
456 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
457 pr_err("failed to set internal bitmap.\n");
458 return 1;
459 }
460 } else {
461 int uuid[4];
462 int bitmap_fd;
463 int d;
464 int max_devs = st->max_devs;
465
466 /* try to load a superblock */
467 for (d = 0; d < max_devs; d++) {
468 mdu_disk_info_t disk;
469 char *dv;
470 int fd2;
471 disk.number = d;
472 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
473 continue;
474 if ((disk.major==0 && disk.minor==0) ||
475 (disk.state & (1<<MD_DISK_REMOVED)))
476 continue;
477 dv = map_dev(disk.major, disk.minor, 1);
478 if (!dv)
479 continue;
480 fd2 = dev_open(dv, O_RDONLY);
481 if (fd2 >= 0) {
482 if (st->ss->load_super(st, fd2, NULL) == 0) {
483 close(fd2);
484 st->ss->uuid_from_super(st, uuid);
485 break;
486 }
487 close(fd2);
488 }
489 }
490 if (d == max_devs) {
491 pr_err("cannot find UUID for array!\n");
492 return 1;
493 }
494 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
495 c->delay, s->write_behind, bitmapsize, major)) {
496 return 1;
497 }
498 bitmap_fd = open(s->bitmap_file, O_RDWR);
499 if (bitmap_fd < 0) {
500 pr_err("weird: %s cannot be opened\n",
501 s->bitmap_file);
502 return 1;
503 }
504 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
505 int err = errno;
506 if (errno == EBUSY)
507 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
508 pr_err("Cannot set bitmap file for %s: %s\n",
509 devname, strerror(err));
510 return 1;
511 }
512 }
513
514 return 0;
515 }
516
517 /*
518 * When reshaping an array we might need to backup some data.
519 * This is written to all spares with a 'super_block' describing it.
520 * The superblock goes 4K from the end of the used space on the
521 * device.
522 * It if written after the backup is complete.
523 * It has the following structure.
524 */
525
526 static struct mdp_backup_super {
527 char magic[16]; /* md_backup_data-1 or -2 */
528 __u8 set_uuid[16];
529 __u64 mtime;
530 /* start/sizes in 512byte sectors */
531 __u64 devstart; /* address on backup device/file of data */
532 __u64 arraystart;
533 __u64 length;
534 __u32 sb_csum; /* csum of preceeding bytes. */
535 __u32 pad1;
536 __u64 devstart2; /* offset in to data of second section */
537 __u64 arraystart2;
538 __u64 length2;
539 __u32 sb_csum2; /* csum of preceeding bytes. */
540 __u8 pad[512-68-32];
541 } __attribute__((aligned(512))) bsb, bsb2;
542
543 static __u32 bsb_csum(char *buf, int len)
544 {
545 int i;
546 int csum = 0;
547 for (i = 0; i < len; i++)
548 csum = (csum<<3) + buf[0];
549 return __cpu_to_le32(csum);
550 }
551
552 static int check_idle(struct supertype *st)
553 {
554 /* Check that all member arrays for this container, or the
555 * container of this array, are idle
556 */
557 char *container = (st->container_devnm[0]
558 ? st->container_devnm : st->devnm);
559 struct mdstat_ent *ent, *e;
560 int is_idle = 1;
561
562 ent = mdstat_read(0, 0);
563 for (e = ent ; e; e = e->next) {
564 if (!is_container_member(e, container))
565 continue;
566 if (e->percent >= 0) {
567 is_idle = 0;
568 break;
569 }
570 }
571 free_mdstat(ent);
572 return is_idle;
573 }
574
575 static int freeze_container(struct supertype *st)
576 {
577 char *container = (st->container_devnm[0]
578 ? st->container_devnm : st->devnm);
579
580 if (!check_idle(st))
581 return -1;
582
583 if (block_monitor(container, 1)) {
584 pr_err("failed to freeze container\n");
585 return -2;
586 }
587
588 return 1;
589 }
590
591 static void unfreeze_container(struct supertype *st)
592 {
593 char *container = (st->container_devnm[0]
594 ? st->container_devnm : st->devnm);
595
596 unblock_monitor(container, 1);
597 }
598
599 static int freeze(struct supertype *st)
600 {
601 /* Try to freeze resync/rebuild on this array/container.
602 * Return -1 if the array is busy,
603 * return -2 container cannot be frozen,
604 * return 0 if this kernel doesn't support 'frozen'
605 * return 1 if it worked.
606 */
607 if (st->ss->external)
608 return freeze_container(st);
609 else {
610 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
611 int err;
612 char buf[20];
613
614 if (!sra)
615 return -1;
616 /* Need to clear any 'read-auto' status */
617 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
618 strncmp(buf, "read-auto", 9) == 0)
619 sysfs_set_str(sra, NULL, "array_state", "clean");
620
621 err = sysfs_freeze_array(sra);
622 sysfs_free(sra);
623 return err;
624 }
625 }
626
627 static void unfreeze(struct supertype *st)
628 {
629 if (st->ss->external)
630 return unfreeze_container(st);
631 else {
632 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
633 char buf[20];
634
635 if (sra &&
636 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
637 && strcmp(buf, "frozen\n") == 0)
638 sysfs_set_str(sra, NULL, "sync_action", "idle");
639 sysfs_free(sra);
640 }
641 }
642
643 static void wait_reshape(struct mdinfo *sra)
644 {
645 int fd = sysfs_get_fd(sra, NULL, "sync_action");
646 char action[20];
647
648 if (fd < 0)
649 return;
650
651 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
652 strncmp(action, "reshape", 7) == 0)
653 sysfs_wait(fd, NULL);
654 close(fd);
655 }
656
657 static int reshape_super(struct supertype *st, unsigned long long size,
658 int level, int layout, int chunksize, int raid_disks,
659 int delta_disks, char *backup_file, char *dev,
660 int direction, int verbose)
661 {
662 /* nothing extra to check in the native case */
663 if (!st->ss->external)
664 return 0;
665 if (!st->ss->reshape_super ||
666 !st->ss->manage_reshape) {
667 pr_err("%s metadata does not support reshape\n",
668 st->ss->name);
669 return 1;
670 }
671
672 return st->ss->reshape_super(st, size, level, layout, chunksize,
673 raid_disks, delta_disks, backup_file, dev,
674 direction, verbose);
675 }
676
677 static void sync_metadata(struct supertype *st)
678 {
679 if (st->ss->external) {
680 if (st->update_tail) {
681 flush_metadata_updates(st);
682 st->update_tail = &st->updates;
683 } else
684 st->ss->sync_metadata(st);
685 }
686 }
687
688 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
689 {
690 /* when dealing with external metadata subarrays we need to be
691 * prepared to handle EAGAIN. The kernel may need to wait for
692 * mdmon to mark the array active so the kernel can handle
693 * allocations/writeback when preparing the reshape action
694 * (md_allow_write()). We temporarily disable safe_mode_delay
695 * to close a race with the array_state going clean before the
696 * next write to raid_disks / stripe_cache_size
697 */
698 char safe[50];
699 int rc;
700
701 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
702 if (!container ||
703 (strcmp(name, "raid_disks") != 0 &&
704 strcmp(name, "stripe_cache_size") != 0))
705 return sysfs_set_num(sra, NULL, name, n);
706
707 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
708 if (rc <= 0)
709 return -1;
710 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
711 rc = sysfs_set_num(sra, NULL, name, n);
712 if (rc < 0 && errno == EAGAIN) {
713 ping_monitor(container);
714 /* if we get EAGAIN here then the monitor is not active
715 * so stop trying
716 */
717 rc = sysfs_set_num(sra, NULL, name, n);
718 }
719 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
720 return rc;
721 }
722
723 int start_reshape(struct mdinfo *sra, int already_running,
724 int before_data_disks, int data_disks)
725 {
726 int err;
727 unsigned long long sync_max_to_set;
728
729 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
730 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
731 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
732 sra->reshape_progress);
733 if (before_data_disks <= data_disks)
734 sync_max_to_set = sra->reshape_progress / data_disks;
735 else
736 sync_max_to_set = (sra->component_size * data_disks
737 - sra->reshape_progress) / data_disks;
738 if (!already_running)
739 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
740 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
741 if (!already_running && err == 0) {
742 int cnt = 5;
743 do {
744 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
745 if (err)
746 sleep(1);
747 } while (err && errno == EBUSY && cnt-- > 0);
748 }
749 return err;
750 }
751
752 void abort_reshape(struct mdinfo *sra)
753 {
754 sysfs_set_str(sra, NULL, "sync_action", "idle");
755 /*
756 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
757 * suspend_hi to decrease as well as increase.")
758 * you could only increase suspend_{lo,hi} unless the region they
759 * covered was empty. So to reset to 0, you need to push suspend_lo
760 * up past suspend_hi first. So to maximize the chance of mdadm
761 * working on all kernels, we want to keep doing that.
762 */
763 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
764 sysfs_set_num(sra, NULL, "suspend_hi", 0);
765 sysfs_set_num(sra, NULL, "suspend_lo", 0);
766 sysfs_set_num(sra, NULL, "sync_min", 0);
767 // It isn't safe to reset sync_max as we aren't monitoring.
768 // Array really should be stopped at this point.
769 }
770
771 int remove_disks_for_takeover(struct supertype *st,
772 struct mdinfo *sra,
773 int layout)
774 {
775 int nr_of_copies;
776 struct mdinfo *remaining;
777 int slot;
778
779 if (sra->array.level == 10)
780 nr_of_copies = layout & 0xff;
781 else if (sra->array.level == 1)
782 nr_of_copies = sra->array.raid_disks;
783 else
784 return 1;
785
786 remaining = sra->devs;
787 sra->devs = NULL;
788 /* for each 'copy', select one device and remove from the list. */
789 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
790 struct mdinfo **diskp;
791 int found = 0;
792
793 /* Find a working device to keep */
794 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
795 struct mdinfo *disk = *diskp;
796
797 if (disk->disk.raid_disk < slot)
798 continue;
799 if (disk->disk.raid_disk >= slot + nr_of_copies)
800 continue;
801 if (disk->disk.state & (1<<MD_DISK_REMOVED))
802 continue;
803 if (disk->disk.state & (1<<MD_DISK_FAULTY))
804 continue;
805 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
806 continue;
807
808 /* We have found a good disk to use! */
809 *diskp = disk->next;
810 disk->next = sra->devs;
811 sra->devs = disk;
812 found = 1;
813 break;
814 }
815 if (!found)
816 break;
817 }
818
819 if (slot < sra->array.raid_disks) {
820 /* didn't find all slots */
821 struct mdinfo **e;
822 e = &remaining;
823 while (*e)
824 e = &(*e)->next;
825 *e = sra->devs;
826 sra->devs = remaining;
827 return 1;
828 }
829
830 /* Remove all 'remaining' devices from the array */
831 while (remaining) {
832 struct mdinfo *sd = remaining;
833 remaining = sd->next;
834
835 sysfs_set_str(sra, sd, "state", "faulty");
836 sysfs_set_str(sra, sd, "slot", "none");
837 /* for external metadata disks should be removed in mdmon */
838 if (!st->ss->external)
839 sysfs_set_str(sra, sd, "state", "remove");
840 sd->disk.state |= (1<<MD_DISK_REMOVED);
841 sd->disk.state &= ~(1<<MD_DISK_SYNC);
842 sd->next = sra->devs;
843 sra->devs = sd;
844 }
845 return 0;
846 }
847
848 void reshape_free_fdlist(int *fdlist,
849 unsigned long long *offsets,
850 int size)
851 {
852 int i;
853
854 for (i = 0; i < size; i++)
855 if (fdlist[i] >= 0)
856 close(fdlist[i]);
857
858 free(fdlist);
859 free(offsets);
860 }
861
862 int reshape_prepare_fdlist(char *devname,
863 struct mdinfo *sra,
864 int raid_disks,
865 int nrdisks,
866 unsigned long blocks,
867 char *backup_file,
868 int *fdlist,
869 unsigned long long *offsets)
870 {
871 int d = 0;
872 struct mdinfo *sd;
873
874 enable_fds(nrdisks);
875 for (d = 0; d <= nrdisks; d++)
876 fdlist[d] = -1;
877 d = raid_disks;
878 for (sd = sra->devs; sd; sd = sd->next) {
879 if (sd->disk.state & (1<<MD_DISK_FAULTY))
880 continue;
881 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
882 sd->disk.raid_disk < raid_disks) {
883 char *dn = map_dev(sd->disk.major,
884 sd->disk.minor, 1);
885 fdlist[sd->disk.raid_disk]
886 = dev_open(dn, O_RDONLY);
887 offsets[sd->disk.raid_disk] = sd->data_offset*512;
888 if (fdlist[sd->disk.raid_disk] < 0) {
889 pr_err("%s: cannot open component %s\n",
890 devname, dn ? dn : "-unknown-");
891 d = -1;
892 goto release;
893 }
894 } else if (backup_file == NULL) {
895 /* spare */
896 char *dn = map_dev(sd->disk.major,
897 sd->disk.minor, 1);
898 fdlist[d] = dev_open(dn, O_RDWR);
899 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
900 if (fdlist[d] < 0) {
901 pr_err("%s: cannot open component %s\n",
902 devname, dn ? dn : "-unknown-");
903 d = -1;
904 goto release;
905 }
906 d++;
907 }
908 }
909 release:
910 return d;
911 }
912
913 int reshape_open_backup_file(char *backup_file,
914 int fd,
915 char *devname,
916 long blocks,
917 int *fdlist,
918 unsigned long long *offsets,
919 char *sys_name,
920 int restart)
921 {
922 /* Return 1 on success, 0 on any form of failure */
923 /* need to check backup file is large enough */
924 char buf[512];
925 struct stat stb;
926 unsigned int dev;
927 int i;
928
929 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
930 S_IRUSR | S_IWUSR);
931 *offsets = 8 * 512;
932 if (*fdlist < 0) {
933 pr_err("%s: cannot create backup file %s: %s\n",
934 devname, backup_file, strerror(errno));
935 return 0;
936 }
937 /* Guard against backup file being on array device.
938 * If array is partitioned or if LVM etc is in the
939 * way this will not notice, but it is better than
940 * nothing.
941 */
942 fstat(*fdlist, &stb);
943 dev = stb.st_dev;
944 fstat(fd, &stb);
945 if (stb.st_rdev == dev) {
946 pr_err("backup file must NOT be on the array being reshaped.\n");
947 close(*fdlist);
948 return 0;
949 }
950
951 memset(buf, 0, 512);
952 for (i=0; i < blocks + 8 ; i++) {
953 if (write(*fdlist, buf, 512) != 512) {
954 pr_err("%s: cannot create backup file %s: %s\n",
955 devname, backup_file, strerror(errno));
956 return 0;
957 }
958 }
959 if (fsync(*fdlist) != 0) {
960 pr_err("%s: cannot create backup file %s: %s\n",
961 devname, backup_file, strerror(errno));
962 return 0;
963 }
964
965 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
966 char *bu = make_backup(sys_name);
967 if (symlink(backup_file, bu))
968 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
969 strerror(errno));
970 free(bu);
971 }
972
973 return 1;
974 }
975
976 unsigned long compute_backup_blocks(int nchunk, int ochunk,
977 unsigned int ndata, unsigned int odata)
978 {
979 unsigned long a, b, blocks;
980 /* So how much do we need to backup.
981 * We need an amount of data which is both a whole number of
982 * old stripes and a whole number of new stripes.
983 * So LCM for (chunksize*datadisks).
984 */
985 a = (ochunk/512) * odata;
986 b = (nchunk/512) * ndata;
987 /* Find GCD */
988 a = GCD(a, b);
989 /* LCM == product / GCD */
990 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
991
992 return blocks;
993 }
994
995 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
996 {
997 /* Based on the current array state in info->array and
998 * the changes in info->new_* etc, determine:
999 * - whether the change is possible
1000 * - Intermediate level/raid_disks/layout
1001 * - whether a restriping reshape is needed
1002 * - number of sectors in minimum change unit. This
1003 * will cover a whole number of stripes in 'before' and
1004 * 'after'.
1005 *
1006 * Return message if the change should be rejected
1007 * NULL if the change can be achieved
1008 *
1009 * This can be called as part of starting a reshape, or
1010 * when assembling an array that is undergoing reshape.
1011 */
1012 int near, far, offset, copies;
1013 int new_disks;
1014 int old_chunk, new_chunk;
1015 /* delta_parity records change in number of devices
1016 * caused by level change
1017 */
1018 int delta_parity = 0;
1019
1020 memset(re, 0, sizeof(*re));
1021
1022 /* If a new level not explicitly given, we assume no-change */
1023 if (info->new_level == UnSet)
1024 info->new_level = info->array.level;
1025
1026 if (info->new_chunk)
1027 switch (info->new_level) {
1028 case 0:
1029 case 4:
1030 case 5:
1031 case 6:
1032 case 10:
1033 /* chunk size is meaningful, must divide component_size
1034 * evenly
1035 */
1036 if (info->component_size % (info->new_chunk/512)) {
1037 unsigned long long shrink = info->component_size;
1038 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1039 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1040 info->new_chunk/1024, info->component_size/2);
1041 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1042 devname, shrink/2);
1043 pr_err("will shrink the array so the given chunk size would work.\n");
1044 return "";
1045 }
1046 break;
1047 default:
1048 return "chunk size not meaningful for this level";
1049 }
1050 else
1051 info->new_chunk = info->array.chunk_size;
1052
1053 switch (info->array.level) {
1054 default:
1055 return "No reshape is possibly for this RAID level";
1056 case LEVEL_LINEAR:
1057 if (info->delta_disks != UnSet)
1058 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1059 else
1060 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1061 case 1:
1062 /* RAID1 can convert to RAID1 with different disks, or
1063 * raid5 with 2 disks, or
1064 * raid0 with 1 disk
1065 */
1066 if (info->new_level > 1 &&
1067 (info->component_size & 7))
1068 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1069 if (info->new_level == 0) {
1070 if (info->delta_disks != UnSet &&
1071 info->delta_disks != 0)
1072 return "Cannot change number of disks with RAID1->RAID0 conversion";
1073 re->level = 0;
1074 re->before.data_disks = 1;
1075 re->after.data_disks = 1;
1076 return NULL;
1077 }
1078 if (info->new_level == 1) {
1079 if (info->delta_disks == UnSet)
1080 /* Don't know what to do */
1081 return "no change requested for Growing RAID1";
1082 re->level = 1;
1083 return NULL;
1084 }
1085 if (info->array.raid_disks != 2 &&
1086 info->new_level == 5)
1087 return "Can only convert a 2-device array to RAID5";
1088 if (info->array.raid_disks == 2 &&
1089 info->new_level == 5) {
1090
1091 re->level = 5;
1092 re->before.data_disks = 1;
1093 if (info->delta_disks != UnSet &&
1094 info->delta_disks != 0)
1095 re->after.data_disks = 1 + info->delta_disks;
1096 else
1097 re->after.data_disks = 1;
1098 if (re->after.data_disks < 1)
1099 return "Number of disks too small for RAID5";
1100
1101 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1102 info->array.chunk_size = 65536;
1103 break;
1104 }
1105 /* Could do some multi-stage conversions, but leave that to
1106 * later.
1107 */
1108 return "Impossibly level change request for RAID1";
1109
1110 case 10:
1111 /* RAID10 can be converted from near mode to
1112 * RAID0 by removing some devices.
1113 * It can also be reshaped if the kernel supports
1114 * new_data_offset.
1115 */
1116 switch (info->new_level) {
1117 case 0:
1118 if ((info->array.layout & ~0xff) != 0x100)
1119 return "Cannot Grow RAID10 with far/offset layout";
1120 /* number of devices must be multiple of number of copies */
1121 if (info->array.raid_disks % (info->array.layout & 0xff))
1122 return "RAID10 layout too complex for Grow operation";
1123
1124 new_disks = (info->array.raid_disks
1125 / (info->array.layout & 0xff));
1126 if (info->delta_disks == UnSet)
1127 info->delta_disks = (new_disks
1128 - info->array.raid_disks);
1129
1130 if (info->delta_disks != new_disks - info->array.raid_disks)
1131 return "New number of raid-devices impossible for RAID10";
1132 if (info->new_chunk &&
1133 info->new_chunk != info->array.chunk_size)
1134 return "Cannot change chunk-size with RAID10 Grow";
1135
1136 /* looks good */
1137 re->level = 0;
1138 re->before.data_disks = new_disks;
1139 re->after.data_disks = re->before.data_disks;
1140 return NULL;
1141
1142 case 10:
1143 near = info->array.layout & 0xff;
1144 far = (info->array.layout >> 8) & 0xff;
1145 offset = info->array.layout & 0x10000;
1146 if (far > 1 && !offset)
1147 return "Cannot reshape RAID10 in far-mode";
1148 copies = near * far;
1149
1150 old_chunk = info->array.chunk_size * far;
1151
1152 if (info->new_layout == UnSet)
1153 info->new_layout = info->array.layout;
1154 else {
1155 near = info->new_layout & 0xff;
1156 far = (info->new_layout >> 8) & 0xff;
1157 offset = info->new_layout & 0x10000;
1158 if (far > 1 && !offset)
1159 return "Cannot reshape RAID10 to far-mode";
1160 if (near * far != copies)
1161 return "Cannot change number of copies when reshaping RAID10";
1162 }
1163 if (info->delta_disks == UnSet)
1164 info->delta_disks = 0;
1165 new_disks = (info->array.raid_disks +
1166 info->delta_disks);
1167
1168 new_chunk = info->new_chunk * far;
1169
1170 re->level = 10;
1171 re->before.layout = info->array.layout;
1172 re->before.data_disks = info->array.raid_disks;
1173 re->after.layout = info->new_layout;
1174 re->after.data_disks = new_disks;
1175 /* For RAID10 we don't do backup but do allow reshape,
1176 * so set backup_blocks to INVALID_SECTORS rather than
1177 * zero.
1178 * And there is no need to synchronise stripes on both
1179 * 'old' and 'new'. So the important
1180 * number is the minimum data_offset difference
1181 * which is the larger of (offset copies * chunk).
1182 */
1183 re->backup_blocks = INVALID_SECTORS;
1184 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1185 if (new_disks < re->before.data_disks &&
1186 info->space_after < re->min_offset_change)
1187 /* Reduce component size by one chunk */
1188 re->new_size = (info->component_size -
1189 re->min_offset_change);
1190 else
1191 re->new_size = info->component_size;
1192 re->new_size = re->new_size * new_disks / copies;
1193 return NULL;
1194
1195 default:
1196 return "RAID10 can only be changed to RAID0";
1197 }
1198 case 0:
1199 /* RAID0 can be converted to RAID10, or to RAID456 */
1200 if (info->new_level == 10) {
1201 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1202 /* Assume near=2 layout */
1203 info->new_layout = 0x102;
1204 info->delta_disks = info->array.raid_disks;
1205 }
1206 if (info->new_layout == UnSet) {
1207 int copies = 1 + (info->delta_disks
1208 / info->array.raid_disks);
1209 if (info->array.raid_disks * (copies-1)
1210 != info->delta_disks)
1211 return "Impossible number of devices for RAID0->RAID10";
1212 info->new_layout = 0x100 + copies;
1213 }
1214 if (info->delta_disks == UnSet) {
1215 int copies = info->new_layout & 0xff;
1216 if (info->new_layout != 0x100 + copies)
1217 return "New layout impossible for RAID0->RAID10";;
1218 info->delta_disks = (copies - 1) *
1219 info->array.raid_disks;
1220 }
1221 if (info->new_chunk &&
1222 info->new_chunk != info->array.chunk_size)
1223 return "Cannot change chunk-size with RAID0->RAID10";
1224 /* looks good */
1225 re->level = 10;
1226 re->before.data_disks = (info->array.raid_disks +
1227 info->delta_disks);
1228 re->after.data_disks = re->before.data_disks;
1229 re->before.layout = info->new_layout;
1230 return NULL;
1231 }
1232
1233 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1234 * a raid4 style layout of the final level.
1235 */
1236 switch (info->new_level) {
1237 case 4:
1238 delta_parity = 1;
1239 case 0:
1240 re->level = 4;
1241 re->before.layout = 0;
1242 break;
1243 case 5:
1244 delta_parity = 1;
1245 re->level = 5;
1246 re->before.layout = ALGORITHM_PARITY_N;
1247 if (info->new_layout == UnSet)
1248 info->new_layout = map_name(r5layout, "default");
1249 break;
1250 case 6:
1251 delta_parity = 2;
1252 re->level = 6;
1253 re->before.layout = ALGORITHM_PARITY_N;
1254 if (info->new_layout == UnSet)
1255 info->new_layout = map_name(r6layout, "default");
1256 break;
1257 default:
1258 return "Impossible level change requested";
1259 }
1260 re->before.data_disks = info->array.raid_disks;
1261 /* determining 'after' layout happens outside this 'switch' */
1262 break;
1263
1264 case 4:
1265 info->array.layout = ALGORITHM_PARITY_N;
1266 case 5:
1267 switch (info->new_level) {
1268 case 0:
1269 delta_parity = -1;
1270 case 4:
1271 re->level = info->array.level;
1272 re->before.data_disks = info->array.raid_disks - 1;
1273 re->before.layout = info->array.layout;
1274 break;
1275 case 5:
1276 re->level = 5;
1277 re->before.data_disks = info->array.raid_disks - 1;
1278 re->before.layout = info->array.layout;
1279 break;
1280 case 6:
1281 delta_parity = 1;
1282 re->level = 6;
1283 re->before.data_disks = info->array.raid_disks - 1;
1284 switch (info->array.layout) {
1285 case ALGORITHM_LEFT_ASYMMETRIC:
1286 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1287 break;
1288 case ALGORITHM_RIGHT_ASYMMETRIC:
1289 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1290 break;
1291 case ALGORITHM_LEFT_SYMMETRIC:
1292 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1293 break;
1294 case ALGORITHM_RIGHT_SYMMETRIC:
1295 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1296 break;
1297 case ALGORITHM_PARITY_0:
1298 re->before.layout = ALGORITHM_PARITY_0_6;
1299 break;
1300 case ALGORITHM_PARITY_N:
1301 re->before.layout = ALGORITHM_PARITY_N_6;
1302 break;
1303 default:
1304 return "Cannot convert an array with this layout";
1305 }
1306 break;
1307 case 1:
1308 if (info->array.raid_disks != 2)
1309 return "Can only convert a 2-device array to RAID1";
1310 if (info->delta_disks != UnSet &&
1311 info->delta_disks != 0)
1312 return "Cannot set raid_disk when converting RAID5->RAID1";
1313 re->level = 1;
1314 info->new_chunk = 0;
1315 return NULL;
1316 default:
1317 return "Impossible level change requested";
1318 }
1319 break;
1320 case 6:
1321 switch (info->new_level) {
1322 case 4:
1323 case 5:
1324 delta_parity = -1;
1325 case 6:
1326 re->level = 6;
1327 re->before.data_disks = info->array.raid_disks - 2;
1328 re->before.layout = info->array.layout;
1329 break;
1330 default:
1331 return "Impossible level change requested";
1332 }
1333 break;
1334 }
1335
1336 /* If we reached here then it looks like a re-stripe is
1337 * happening. We have determined the intermediate level
1338 * and initial raid_disks/layout and stored these in 're'.
1339 *
1340 * We need to deduce the final layout that can be atomically
1341 * converted to the end state.
1342 */
1343 switch (info->new_level) {
1344 case 0:
1345 /* We can only get to RAID0 from RAID4 or RAID5
1346 * with appropriate layout and one extra device
1347 */
1348 if (re->level != 4 && re->level != 5)
1349 return "Cannot covert to RAID0 from this level";
1350
1351 switch (re->level) {
1352 case 4:
1353 re->before.layout = 0;
1354 re->after.layout = 0;
1355 break;
1356 case 5:
1357 re->after.layout = ALGORITHM_PARITY_N;
1358 break;
1359 }
1360 break;
1361
1362 case 4:
1363 /* We can only get to RAID4 from RAID5 */
1364 if (re->level != 4 && re->level != 5)
1365 return "Cannot convert to RAID4 from this level";
1366
1367 switch (re->level) {
1368 case 4:
1369 re->after.layout = 0;
1370 break;
1371 case 5:
1372 re->after.layout = ALGORITHM_PARITY_N;
1373 break;
1374 }
1375 break;
1376
1377 case 5:
1378 /* We get to RAID5 from RAID5 or RAID6 */
1379 if (re->level != 5 && re->level != 6)
1380 return "Cannot convert to RAID5 from this level";
1381
1382 switch (re->level) {
1383 case 5:
1384 if (info->new_layout == UnSet)
1385 re->after.layout = re->before.layout;
1386 else
1387 re->after.layout = info->new_layout;
1388 break;
1389 case 6:
1390 if (info->new_layout == UnSet)
1391 info->new_layout = re->before.layout;
1392
1393 /* after.layout needs to be raid6 version of new_layout */
1394 if (info->new_layout == ALGORITHM_PARITY_N)
1395 re->after.layout = ALGORITHM_PARITY_N;
1396 else {
1397 char layout[40];
1398 char *ls = map_num(r5layout, info->new_layout);
1399 int l;
1400 if (ls) {
1401 /* Current RAID6 layout has a RAID5
1402 * equivalent - good
1403 */
1404 strcat(strcpy(layout, ls), "-6");
1405 l = map_name(r6layout, layout);
1406 if (l == UnSet)
1407 return "Cannot find RAID6 layout to convert to";
1408 } else {
1409 /* Current RAID6 has no equivalent.
1410 * If it is already a '-6' layout we
1411 * can leave it unchanged, else we must
1412 * fail
1413 */
1414 ls = map_num(r6layout, info->new_layout);
1415 if (!ls ||
1416 strcmp(ls+strlen(ls)-2, "-6") != 0)
1417 return "Please specify new layout";
1418 l = info->new_layout;
1419 }
1420 re->after.layout = l;
1421 }
1422 }
1423 break;
1424
1425 case 6:
1426 /* We must already be at level 6 */
1427 if (re->level != 6)
1428 return "Impossible level change";
1429 if (info->new_layout == UnSet)
1430 re->after.layout = info->array.layout;
1431 else
1432 re->after.layout = info->new_layout;
1433 break;
1434 default:
1435 return "Impossible level change requested";
1436 }
1437 if (info->delta_disks == UnSet)
1438 info->delta_disks = delta_parity;
1439
1440 re->after.data_disks = (re->before.data_disks
1441 + info->delta_disks
1442 - delta_parity);
1443 switch (re->level) {
1444 case 6: re->parity = 2;
1445 break;
1446 case 4:
1447 case 5: re->parity = 1;
1448 break;
1449 default: re->parity = 0;
1450 break;
1451 }
1452 /* So we have a restripe operation, we need to calculate the number
1453 * of blocks per reshape operation.
1454 */
1455 re->new_size = info->component_size * re->before.data_disks;
1456 if (info->new_chunk == 0)
1457 info->new_chunk = info->array.chunk_size;
1458 if (re->after.data_disks == re->before.data_disks &&
1459 re->after.layout == re->before.layout &&
1460 info->new_chunk == info->array.chunk_size) {
1461 /* Nothing to change, can change level immediately. */
1462 re->level = info->new_level;
1463 re->backup_blocks = 0;
1464 return NULL;
1465 }
1466 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1467 /* chunk and layout changes make no difference */
1468 re->level = info->new_level;
1469 re->backup_blocks = 0;
1470 return NULL;
1471 }
1472
1473 if (re->after.data_disks == re->before.data_disks &&
1474 get_linux_version() < 2006032)
1475 return "in-place reshape is not safe before 2.6.32 - sorry.";
1476
1477 if (re->after.data_disks < re->before.data_disks &&
1478 get_linux_version() < 2006030)
1479 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1480
1481 re->backup_blocks = compute_backup_blocks(
1482 info->new_chunk, info->array.chunk_size,
1483 re->after.data_disks,
1484 re->before.data_disks);
1485 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1486
1487 re->new_size = info->component_size * re->after.data_disks;
1488 return NULL;
1489 }
1490
1491 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1492 char *text_version)
1493 {
1494 struct mdinfo *info;
1495 char *subarray;
1496 int ret_val = -1;
1497
1498 if ((st == NULL) || (sra == NULL))
1499 return ret_val;
1500
1501 if (text_version == NULL)
1502 text_version = sra->text_version;
1503 subarray = strchr(text_version+1, '/')+1;
1504 info = st->ss->container_content(st, subarray);
1505 if (info) {
1506 unsigned long long current_size = 0;
1507 unsigned long long new_size =
1508 info->custom_array_size/2;
1509
1510 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1511 new_size > current_size) {
1512 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1513 < 0)
1514 dprintf("Error: Cannot set array size");
1515 else {
1516 ret_val = 0;
1517 dprintf("Array size changed");
1518 }
1519 dprintf_cont(" from %llu to %llu.\n",
1520 current_size, new_size);
1521 }
1522 sysfs_free(info);
1523 } else
1524 dprintf("Error: set_array_size(): info pointer in NULL\n");
1525
1526 return ret_val;
1527 }
1528
1529 static int reshape_array(char *container, int fd, char *devname,
1530 struct supertype *st, struct mdinfo *info,
1531 int force, struct mddev_dev *devlist,
1532 unsigned long long data_offset,
1533 char *backup_file, int verbose, int forked,
1534 int restart, int freeze_reshape);
1535 static int reshape_container(char *container, char *devname,
1536 int mdfd,
1537 struct supertype *st,
1538 struct mdinfo *info,
1539 int force,
1540 char *backup_file, int verbose,
1541 int forked, int restart, int freeze_reshape);
1542
1543 int Grow_reshape(char *devname, int fd,
1544 struct mddev_dev *devlist,
1545 unsigned long long data_offset,
1546 struct context *c, struct shape *s)
1547 {
1548 /* Make some changes in the shape of an array.
1549 * The kernel must support the change.
1550 *
1551 * There are three different changes. Each can trigger
1552 * a resync or recovery so we freeze that until we have
1553 * requested everything (if kernel supports freezing - 2.6.30).
1554 * The steps are:
1555 * - change size (i.e. component_size)
1556 * - change level
1557 * - change layout/chunksize/ndisks
1558 *
1559 * The last can require a reshape. It is different on different
1560 * levels so we need to check the level before actioning it.
1561 * Some times the level change needs to be requested after the
1562 * reshape (e.g. raid6->raid5, raid5->raid0)
1563 *
1564 */
1565 struct mdu_array_info_s array;
1566 int rv = 0;
1567 struct supertype *st;
1568 char *subarray = NULL;
1569
1570 int frozen;
1571 int changed = 0;
1572 char *container = NULL;
1573 int cfd = -1;
1574
1575 struct mddev_dev *dv;
1576 int added_disks;
1577
1578 struct mdinfo info;
1579 struct mdinfo *sra;
1580
1581 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1582 pr_err("%s is not an active md array - aborting\n",
1583 devname);
1584 return 1;
1585 }
1586 if (data_offset != INVALID_SECTORS && array.level != 10
1587 && (array.level < 4 || array.level > 6)) {
1588 pr_err("--grow --data-offset not yet supported\n");
1589 return 1;
1590 }
1591
1592 if (s->size > 0 &&
1593 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1594 pr_err("cannot change component size at the same time as other changes.\n"
1595 " Change size first, then check data is intact before making other changes.\n");
1596 return 1;
1597 }
1598
1599 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1600 get_linux_version() < 2006032 &&
1601 !check_env("MDADM_FORCE_FEWER")) {
1602 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1603 " Please use a newer kernel\n");
1604 return 1;
1605 }
1606
1607 st = super_by_fd(fd, &subarray);
1608 if (!st) {
1609 pr_err("Unable to determine metadata format for %s\n", devname);
1610 return 1;
1611 }
1612 if (s->raiddisks > st->max_devs) {
1613 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1614 return 1;
1615 }
1616 if (s->level == 0 &&
1617 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1618 !(array.state & (1<<MD_SB_CLUSTERED))) {
1619 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1620 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
1621 pr_err("failed to remove internal bitmap.\n");
1622 return 1;
1623 }
1624 }
1625
1626 /* in the external case we need to check that the requested reshape is
1627 * supported, and perform an initial check that the container holds the
1628 * pre-requisite spare devices (mdmon owns final validation)
1629 */
1630 if (st->ss->external) {
1631 int rv;
1632
1633 if (subarray) {
1634 container = st->container_devnm;
1635 cfd = open_dev_excl(st->container_devnm);
1636 } else {
1637 container = st->devnm;
1638 close(fd);
1639 cfd = open_dev_excl(st->devnm);
1640 fd = cfd;
1641 }
1642 if (cfd < 0) {
1643 pr_err("Unable to open container for %s\n",
1644 devname);
1645 free(subarray);
1646 return 1;
1647 }
1648
1649 rv = st->ss->load_container(st, cfd, NULL);
1650
1651 if (rv) {
1652 pr_err("Cannot read superblock for %s\n",
1653 devname);
1654 free(subarray);
1655 return 1;
1656 }
1657
1658 /* check if operation is supported for metadata handler */
1659 if (st->ss->container_content) {
1660 struct mdinfo *cc = NULL;
1661 struct mdinfo *content = NULL;
1662
1663 cc = st->ss->container_content(st, subarray);
1664 for (content = cc; content ; content = content->next) {
1665 int allow_reshape = 1;
1666
1667 /* check if reshape is allowed based on metadata
1668 * indications stored in content.array.status
1669 */
1670 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1671 allow_reshape = 0;
1672 if (content->array.state
1673 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1674 allow_reshape = 0;
1675 if (!allow_reshape) {
1676 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1677 devname, container);
1678 sysfs_free(cc);
1679 free(subarray);
1680 return 1;
1681 }
1682 }
1683 sysfs_free(cc);
1684 }
1685 if (mdmon_running(container))
1686 st->update_tail = &st->updates;
1687 }
1688
1689 added_disks = 0;
1690 for (dv = devlist; dv; dv = dv->next)
1691 added_disks++;
1692 if (s->raiddisks > array.raid_disks &&
1693 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1694 !c->force) {
1695 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1696 " Use --force to over-ride this check.\n",
1697 s->raiddisks - array.raid_disks,
1698 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1699 array.spare_disks + added_disks);
1700 return 1;
1701 }
1702
1703 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1704 | GET_STATE | GET_VERSION);
1705 if (sra) {
1706 if (st->ss->external && subarray == NULL) {
1707 array.level = LEVEL_CONTAINER;
1708 sra->array.level = LEVEL_CONTAINER;
1709 }
1710 } else {
1711 pr_err("failed to read sysfs parameters for %s\n",
1712 devname);
1713 return 1;
1714 }
1715 frozen = freeze(st);
1716 if (frozen < -1) {
1717 /* freeze() already spewed the reason */
1718 sysfs_free(sra);
1719 return 1;
1720 } else if (frozen < 0) {
1721 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1722 sysfs_free(sra);
1723 return 1;
1724 }
1725
1726 /* ========= set size =============== */
1727 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1728 unsigned long long orig_size = get_component_size(fd)/2;
1729 unsigned long long min_csize;
1730 struct mdinfo *mdi;
1731 int raid0_takeover = 0;
1732
1733 if (orig_size == 0)
1734 orig_size = (unsigned) array.size;
1735
1736 if (orig_size == 0) {
1737 pr_err("Cannot set device size in this type of array.\n");
1738 rv = 1;
1739 goto release;
1740 }
1741
1742 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1743 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1744 rv = 1;
1745 goto release;
1746 }
1747 sync_metadata(st);
1748 if (st->ss->external) {
1749 /* metadata can have size limitation
1750 * update size value according to metadata information
1751 */
1752 struct mdinfo *sizeinfo =
1753 st->ss->container_content(st, subarray);
1754 if (sizeinfo) {
1755 unsigned long long new_size =
1756 sizeinfo->custom_array_size/2;
1757 int data_disks = get_data_disks(
1758 sizeinfo->array.level,
1759 sizeinfo->array.layout,
1760 sizeinfo->array.raid_disks);
1761 new_size /= data_disks;
1762 dprintf("Metadata size correction from %llu to %llu (%llu)\n", orig_size, new_size,
1763 new_size * data_disks);
1764 s->size = new_size;
1765 sysfs_free(sizeinfo);
1766 }
1767 }
1768
1769 /* Update the size of each member device in case
1770 * they have been resized. This will never reduce
1771 * below the current used-size. The "size" attribute
1772 * understands '0' to mean 'max'.
1773 */
1774 min_csize = 0;
1775 rv = 0;
1776 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1777 if (sysfs_set_num(sra, mdi, "size",
1778 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1779 /* Probably kernel refusing to let us
1780 * reduce the size - not an error.
1781 */
1782 break;
1783 }
1784 if (array.not_persistent == 0 &&
1785 array.major_version == 0 &&
1786 get_linux_version() < 3001000) {
1787 /* Dangerous to allow size to exceed 2TB */
1788 unsigned long long csize;
1789 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1790 if (csize >= 2ULL*1024*1024*1024)
1791 csize = 2ULL*1024*1024*1024;
1792 if ((min_csize == 0 || (min_csize
1793 > csize)))
1794 min_csize = csize;
1795 }
1796 }
1797 }
1798 if (rv) {
1799 pr_err("Cannot set size on array members.\n");
1800 goto size_change_error;
1801 }
1802 if (min_csize && s->size > min_csize) {
1803 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
1804 rv = 1;
1805 goto size_change_error;
1806 }
1807 if (min_csize && s->size == MAX_SIZE) {
1808 /* Don't let the kernel choose a size - it will get
1809 * it wrong
1810 */
1811 pr_err("Limited v0.90 array to 2TB per device\n");
1812 s->size = min_csize;
1813 }
1814 if (st->ss->external) {
1815 if (sra->array.level == 0) {
1816 rv = sysfs_set_str(sra, NULL, "level",
1817 "raid5");
1818 if (!rv) {
1819 raid0_takeover = 1;
1820 /* get array parameters after takeover
1821 * to change one parameter at time only
1822 */
1823 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1824 }
1825 }
1826 /* make sure mdmon is
1827 * aware of the new level */
1828 if (!mdmon_running(st->container_devnm))
1829 start_mdmon(st->container_devnm);
1830 ping_monitor(container);
1831 if (mdmon_running(st->container_devnm) &&
1832 st->update_tail == NULL)
1833 st->update_tail = &st->updates;
1834 }
1835
1836 if (s->size == MAX_SIZE)
1837 s->size = 0;
1838 array.size = s->size;
1839 if (s->size & ~INT32_MAX) {
1840 /* got truncated to 32bit, write to
1841 * component_size instead
1842 */
1843 if (sra)
1844 rv = sysfs_set_num(sra, NULL,
1845 "component_size", s->size);
1846 else
1847 rv = -1;
1848 } else {
1849 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1850
1851 /* manage array size when it is managed externally
1852 */
1853 if ((rv == 0) && st->ss->external)
1854 rv = set_array_size(st, sra, sra->text_version);
1855 }
1856
1857 if (raid0_takeover) {
1858 /* do not recync non-existing parity,
1859 * we will drop it anyway
1860 */
1861 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1862 /* go back to raid0, drop parity disk
1863 */
1864 sysfs_set_str(sra, NULL, "level", "raid0");
1865 ioctl(fd, GET_ARRAY_INFO, &array);
1866 }
1867
1868 size_change_error:
1869 if (rv != 0) {
1870 int err = errno;
1871
1872 /* restore metadata */
1873 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1874 UnSet, NULL, devname,
1875 ROLLBACK_METADATA_CHANGES,
1876 c->verbose) == 0)
1877 sync_metadata(st);
1878 pr_err("Cannot set device size for %s: %s\n",
1879 devname, strerror(err));
1880 if (err == EBUSY &&
1881 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1882 cont_err("Bitmap must be removed before size can be changed\n");
1883 rv = 1;
1884 goto release;
1885 }
1886 if (s->assume_clean) {
1887 /* This will fail on kernels older than 3.0 unless
1888 * a backport has been arranged.
1889 */
1890 if (sra == NULL ||
1891 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1892 pr_err("--assume-clean not supported with --grow on this kernel\n");
1893 }
1894 ioctl(fd, GET_ARRAY_INFO, &array);
1895 s->size = get_component_size(fd)/2;
1896 if (s->size == 0)
1897 s->size = array.size;
1898 if (c->verbose >= 0) {
1899 if (s->size == orig_size)
1900 pr_err("component size of %s unchanged at %lluK\n",
1901 devname, s->size);
1902 else
1903 pr_err("component size of %s has been set to %lluK\n",
1904 devname, s->size);
1905 }
1906 changed = 1;
1907 } else if (array.level != LEVEL_CONTAINER) {
1908 s->size = get_component_size(fd)/2;
1909 if (s->size == 0)
1910 s->size = array.size;
1911 }
1912
1913 /* See if there is anything else to do */
1914 if ((s->level == UnSet || s->level == array.level) &&
1915 (s->layout_str == NULL) &&
1916 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1917 data_offset == INVALID_SECTORS &&
1918 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1919 /* Nothing more to do */
1920 if (!changed && c->verbose >= 0)
1921 pr_err("%s: no change requested\n",
1922 devname);
1923 goto release;
1924 }
1925
1926 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1927 * current implementation assumes that following conditions must be met:
1928 * - RAID10:
1929 * - far_copies == 1
1930 * - near_copies == 2
1931 */
1932 if ((s->level == 0 && array.level == 10 && sra &&
1933 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1934 (s->level == 0 && array.level == 1 && sra)) {
1935 int err;
1936 err = remove_disks_for_takeover(st, sra, array.layout);
1937 if (err) {
1938 dprintf("Array cannot be reshaped\n");
1939 if (cfd > -1)
1940 close(cfd);
1941 rv = 1;
1942 goto release;
1943 }
1944 /* Make sure mdmon has seen the device removal
1945 * and updated metadata before we continue with
1946 * level change
1947 */
1948 if (container)
1949 ping_monitor(container);
1950 }
1951
1952 memset(&info, 0, sizeof(info));
1953 info.array = array;
1954 sysfs_init(&info, fd, NULL);
1955 strcpy(info.text_version, sra->text_version);
1956 info.component_size = s->size*2;
1957 info.new_level = s->level;
1958 info.new_chunk = s->chunk * 1024;
1959 if (info.array.level == LEVEL_CONTAINER) {
1960 info.delta_disks = UnSet;
1961 info.array.raid_disks = s->raiddisks;
1962 } else if (s->raiddisks)
1963 info.delta_disks = s->raiddisks - info.array.raid_disks;
1964 else
1965 info.delta_disks = UnSet;
1966 if (s->layout_str == NULL) {
1967 info.new_layout = UnSet;
1968 if (info.array.level == 6 &&
1969 (info.new_level == 6 || info.new_level == UnSet) &&
1970 info.array.layout >= 16) {
1971 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
1972 cont_err("during the reshape, please specify --layout=preserve\n");
1973 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
1974 rv = 1;
1975 goto release;
1976 }
1977 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1978 strcmp(s->layout_str, "normalize") == 0) {
1979 /* If we have a -6 RAID6 layout, remove the '-6'. */
1980 info.new_layout = UnSet;
1981 if (info.array.level == 6 && info.new_level == UnSet) {
1982 char l[40], *h;
1983 strcpy(l, map_num(r6layout, info.array.layout));
1984 h = strrchr(l, '-');
1985 if (h && strcmp(h, "-6") == 0) {
1986 *h = 0;
1987 info.new_layout = map_name(r6layout, l);
1988 }
1989 } else {
1990 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
1991 rv = 1;
1992 goto release;
1993 }
1994 } else if (strcmp(s->layout_str, "preserve") == 0) {
1995 /* This means that a non-standard RAID6 layout
1996 * is OK.
1997 * In particular:
1998 * - When reshape a RAID6 (e.g. adding a device)
1999 * which is in a non-standard layout, it is OK
2000 * to preserve that layout.
2001 * - When converting a RAID5 to RAID6, leave it in
2002 * the XXX-6 layout, don't re-layout.
2003 */
2004 if (info.array.level == 6 && info.new_level == UnSet)
2005 info.new_layout = info.array.layout;
2006 else if (info.array.level == 5 && info.new_level == 6) {
2007 char l[40];
2008 strcpy(l, map_num(r5layout, info.array.layout));
2009 strcat(l, "-6");
2010 info.new_layout = map_name(r6layout, l);
2011 } else {
2012 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
2013 rv = 1;
2014 goto release;
2015 }
2016 } else {
2017 int l = info.new_level;
2018 if (l == UnSet)
2019 l = info.array.level;
2020 switch (l) {
2021 case 5:
2022 info.new_layout = map_name(r5layout, s->layout_str);
2023 break;
2024 case 6:
2025 info.new_layout = map_name(r6layout, s->layout_str);
2026 break;
2027 case 10:
2028 info.new_layout = parse_layout_10(s->layout_str);
2029 break;
2030 case LEVEL_FAULTY:
2031 info.new_layout = parse_layout_faulty(s->layout_str);
2032 break;
2033 default:
2034 pr_err("layout not meaningful with this level\n");
2035 rv = 1;
2036 goto release;
2037 }
2038 if (info.new_layout == UnSet) {
2039 pr_err("layout %s not understood for this level\n",
2040 s->layout_str);
2041 rv = 1;
2042 goto release;
2043 }
2044 }
2045
2046 if (array.level == LEVEL_FAULTY) {
2047 if (s->level != UnSet && s->level != array.level) {
2048 pr_err("cannot change level of Faulty device\n");
2049 rv =1 ;
2050 }
2051 if (s->chunk) {
2052 pr_err("cannot set chunksize of Faulty device\n");
2053 rv =1 ;
2054 }
2055 if (s->raiddisks && s->raiddisks != 1) {
2056 pr_err("cannot set raid_disks of Faulty device\n");
2057 rv =1 ;
2058 }
2059 if (s->layout_str) {
2060 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2061 dprintf("Cannot get array information.\n");
2062 goto release;
2063 }
2064 array.layout = info.new_layout;
2065 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2066 pr_err("failed to set new layout\n");
2067 rv = 1;
2068 } else if (c->verbose >= 0)
2069 printf("layout for %s set to %d\n",
2070 devname, array.layout);
2071 }
2072 } else if (array.level == LEVEL_CONTAINER) {
2073 /* This change is to be applied to every array in the
2074 * container. This is only needed when the metadata imposes
2075 * restraints of the various arrays in the container.
2076 * Currently we only know that IMSM requires all arrays
2077 * to have the same number of devices so changing the
2078 * number of devices (On-Line Capacity Expansion) must be
2079 * performed at the level of the container
2080 */
2081 if (fd > 0) {
2082 close(fd);
2083 fd = -1;
2084 }
2085 rv = reshape_container(container, devname, -1, st, &info,
2086 c->force, c->backup_file, c->verbose, 0, 0, 0);
2087 frozen = 0;
2088 } else {
2089 /* get spare devices from external metadata
2090 */
2091 if (st->ss->external) {
2092 struct mdinfo *info2;
2093
2094 info2 = st->ss->container_content(st, subarray);
2095 if (info2) {
2096 info.array.spare_disks =
2097 info2->array.spare_disks;
2098 sysfs_free(info2);
2099 }
2100 }
2101
2102 /* Impose these changes on a single array. First
2103 * check that the metadata is OK with the change. */
2104
2105 if (reshape_super(st, 0, info.new_level,
2106 info.new_layout, info.new_chunk,
2107 info.array.raid_disks, info.delta_disks,
2108 c->backup_file, devname, APPLY_METADATA_CHANGES,
2109 c->verbose)) {
2110 rv = 1;
2111 goto release;
2112 }
2113 sync_metadata(st);
2114 rv = reshape_array(container, fd, devname, st, &info, c->force,
2115 devlist, data_offset, c->backup_file, c->verbose,
2116 0, 0, 0);
2117 frozen = 0;
2118 }
2119 release:
2120 sysfs_free(sra);
2121 if (frozen > 0)
2122 unfreeze(st);
2123 return rv;
2124 }
2125
2126 /* verify_reshape_position()
2127 * Function checks if reshape position in metadata is not farther
2128 * than position in md.
2129 * Return value:
2130 * 0 : not valid sysfs entry
2131 * it can be caused by not started reshape, it should be started
2132 * by reshape array or raid0 array is before takeover
2133 * -1 : error, reshape position is obviously wrong
2134 * 1 : success, reshape progress correct or updated
2135 */
2136 static int verify_reshape_position(struct mdinfo *info, int level)
2137 {
2138 int ret_val = 0;
2139 char buf[40];
2140 int rv;
2141
2142 /* read sync_max, failure can mean raid0 array */
2143 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2144
2145 if (rv > 0) {
2146 char *ep;
2147 unsigned long long position = strtoull(buf, &ep, 0);
2148
2149 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2150 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2151 position *= get_data_disks(level,
2152 info->new_layout,
2153 info->array.raid_disks);
2154 if (info->reshape_progress < position) {
2155 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2156 info->reshape_progress, position);
2157 info->reshape_progress = position;
2158 ret_val = 1;
2159 } else if (info->reshape_progress > position) {
2160 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2161 position, info->reshape_progress);
2162 ret_val = -1;
2163 } else {
2164 dprintf("Reshape position in md and metadata are the same;");
2165 ret_val = 1;
2166 }
2167 }
2168 } else if (rv == 0) {
2169 /* for valid sysfs entry, 0-length content
2170 * should be indicated as error
2171 */
2172 ret_val = -1;
2173 }
2174
2175 return ret_val;
2176 }
2177
2178 static unsigned long long choose_offset(unsigned long long lo,
2179 unsigned long long hi,
2180 unsigned long long min,
2181 unsigned long long max)
2182 {
2183 /* Choose a new offset between hi and lo.
2184 * It must be between min and max, but
2185 * we would prefer something near the middle of hi/lo, and also
2186 * prefer to be aligned to a big power of 2.
2187 *
2188 * So we start with the middle, then for each bit,
2189 * starting at '1' and increasing, if it is set, we either
2190 * add it or subtract it if possible, preferring the option
2191 * which is furthest from the boundary.
2192 *
2193 * We stop once we get a 1MB alignment. As units are in sectors,
2194 * 1MB = 2*1024 sectors.
2195 */
2196 unsigned long long choice = (lo + hi) / 2;
2197 unsigned long long bit = 1;
2198
2199 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2200 unsigned long long bigger, smaller;
2201 if (! (bit & choice))
2202 continue;
2203 bigger = choice + bit;
2204 smaller = choice - bit;
2205 if (bigger > max && smaller < min)
2206 break;
2207 if (bigger > max)
2208 choice = smaller;
2209 else if (smaller < min)
2210 choice = bigger;
2211 else if (hi - bigger > smaller - lo)
2212 choice = bigger;
2213 else
2214 choice = smaller;
2215 }
2216 return choice;
2217 }
2218
2219 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2220 char *devname, int delta_disks,
2221 unsigned long long data_offset,
2222 unsigned long long min,
2223 int can_fallback)
2224 {
2225 struct mdinfo *sd;
2226 int dir = 0;
2227 int err = 0;
2228 unsigned long long before, after;
2229
2230 /* Need to find min space before and after so same is used
2231 * on all devices
2232 */
2233 before = UINT64_MAX;
2234 after = UINT64_MAX;
2235 for (sd = sra->devs; sd; sd = sd->next) {
2236 char *dn;
2237 int dfd;
2238 int rv;
2239 struct supertype *st2;
2240 struct mdinfo info2;
2241
2242 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2243 continue;
2244 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2245 dfd = dev_open(dn, O_RDONLY);
2246 if (dfd < 0) {
2247 pr_err("%s: cannot open component %s\n",
2248 devname, dn ? dn : "-unknown-");
2249 goto release;
2250 }
2251 st2 = dup_super(st);
2252 rv = st2->ss->load_super(st2,dfd, NULL);
2253 close(dfd);
2254 if (rv) {
2255 free(st2);
2256 pr_err("%s: cannot get superblock from %s\n",
2257 devname, dn);
2258 goto release;
2259 }
2260 st2->ss->getinfo_super(st2, &info2, NULL);
2261 st2->ss->free_super(st2);
2262 free(st2);
2263 if (info2.space_before == 0 &&
2264 info2.space_after == 0) {
2265 /* Metadata doesn't support data_offset changes */
2266 if (!can_fallback)
2267 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2268 devname);
2269 goto fallback;
2270 }
2271 if (before > info2.space_before)
2272 before = info2.space_before;
2273 if (after > info2.space_after)
2274 after = info2.space_after;
2275
2276 if (data_offset != INVALID_SECTORS) {
2277 if (dir == 0) {
2278 if (info2.data_offset == data_offset) {
2279 pr_err("%s: already has that data_offset\n",
2280 dn);
2281 goto release;
2282 }
2283 if (data_offset < info2.data_offset)
2284 dir = -1;
2285 else
2286 dir = 1;
2287 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2288 (data_offset >= info2.data_offset && dir == -1)) {
2289 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2290 dn);
2291 goto release;
2292 }
2293 }
2294 }
2295 if (before == UINT64_MAX)
2296 /* impossible really, there must be no devices */
2297 return 1;
2298
2299 for (sd = sra->devs; sd; sd = sd->next) {
2300 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2301 unsigned long long new_data_offset;
2302
2303 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2304 continue;
2305 if (delta_disks < 0) {
2306 /* Don't need any space as array is shrinking
2307 * just move data_offset up by min
2308 */
2309 if (data_offset == INVALID_SECTORS)
2310 new_data_offset = sd->data_offset + min;
2311 else {
2312 if (data_offset < sd->data_offset + min) {
2313 pr_err("--data-offset too small for %s\n",
2314 dn);
2315 goto release;
2316 }
2317 new_data_offset = data_offset;
2318 }
2319 } else if (delta_disks > 0) {
2320 /* need space before */
2321 if (before < min) {
2322 if (can_fallback)
2323 goto fallback;
2324 pr_err("Insufficient head-space for reshape on %s\n",
2325 dn);
2326 goto release;
2327 }
2328 if (data_offset == INVALID_SECTORS)
2329 new_data_offset = sd->data_offset - min;
2330 else {
2331 if (data_offset > sd->data_offset - min) {
2332 pr_err("--data-offset too large for %s\n",
2333 dn);
2334 goto release;
2335 }
2336 new_data_offset = data_offset;
2337 }
2338 } else {
2339 if (dir == 0) {
2340 /* can move up or down. If 'data_offset'
2341 * was set we would have already decided,
2342 * so just choose direction with most space.
2343 */
2344 if (before > after)
2345 dir = -1;
2346 else
2347 dir = 1;
2348 }
2349 sysfs_set_str(sra, NULL, "reshape_direction",
2350 dir == 1 ? "backwards" : "forwards");
2351 if (dir > 0) {
2352 /* Increase data offset */
2353 if (after < min) {
2354 if (can_fallback)
2355 goto fallback;
2356 pr_err("Insufficient tail-space for reshape on %s\n",
2357 dn);
2358 goto release;
2359 }
2360 if (data_offset != INVALID_SECTORS &&
2361 data_offset < sd->data_offset + min) {
2362 pr_err("--data-offset too small on %s\n",
2363 dn);
2364 goto release;
2365 }
2366 if (data_offset != INVALID_SECTORS)
2367 new_data_offset = data_offset;
2368 else
2369 new_data_offset = choose_offset(sd->data_offset,
2370 sd->data_offset + after,
2371 sd->data_offset + min,
2372 sd->data_offset + after);
2373 } else {
2374 /* Decrease data offset */
2375 if (before < min) {
2376 if (can_fallback)
2377 goto fallback;
2378 pr_err("insufficient head-room on %s\n",
2379 dn);
2380 goto release;
2381 }
2382 if (data_offset != INVALID_SECTORS &&
2383 data_offset < sd->data_offset - min) {
2384 pr_err("--data-offset too small on %s\n",
2385 dn);
2386 goto release;
2387 }
2388 if (data_offset != INVALID_SECTORS)
2389 new_data_offset = data_offset;
2390 else
2391 new_data_offset = choose_offset(sd->data_offset - before,
2392 sd->data_offset,
2393 sd->data_offset - before,
2394 sd->data_offset - min);
2395 }
2396 }
2397 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2398 if (err < 0 && errno == E2BIG) {
2399 /* try again after increasing data size to max */
2400 err = sysfs_set_num(sra, sd, "size", 0);
2401 if (err < 0 && errno == EINVAL &&
2402 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2403 /* some kernels have a bug where you cannot
2404 * use '0' on spare devices. */
2405 sysfs_set_num(sra, sd, "size",
2406 (sra->component_size + after)/2);
2407 }
2408 err = sysfs_set_num(sra, sd, "new_offset",
2409 new_data_offset);
2410 }
2411 if (err < 0) {
2412 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2413 pr_err("data-offset is too big for %s\n",
2414 dn);
2415 goto release;
2416 }
2417 if (sd == sra->devs &&
2418 (errno == ENOENT || errno == E2BIG))
2419 /* Early kernel, no 'new_offset' file,
2420 * or kernel doesn't like us.
2421 * For RAID5/6 this is not fatal
2422 */
2423 return 1;
2424 pr_err("Cannot set new_offset for %s\n",
2425 dn);
2426 break;
2427 }
2428 }
2429 return err;
2430 release:
2431 return -1;
2432 fallback:
2433 /* Just use a backup file */
2434 return 1;
2435 }
2436
2437 static int raid10_reshape(char *container, int fd, char *devname,
2438 struct supertype *st, struct mdinfo *info,
2439 struct reshape *reshape,
2440 unsigned long long data_offset,
2441 int force, int verbose)
2442 {
2443 /* Changing raid_disks, layout, chunksize or possibly
2444 * just data_offset for a RAID10.
2445 * We must always change data_offset. We change by at least
2446 * ->min_offset_change which is the largest of the old and new
2447 * chunk sizes.
2448 * If raid_disks is increasing, then data_offset must decrease
2449 * by at least this copy size.
2450 * If raid_disks is unchanged, data_offset must increase or
2451 * decrease by at least min_offset_change but preferably by much more.
2452 * We choose half of the available space.
2453 * If raid_disks is decreasing, data_offset must increase by
2454 * at least min_offset_change. To allow of this, component_size
2455 * must be decreased by the same amount.
2456 *
2457 * So we calculate the required minimum and direction, possibly
2458 * reduce the component_size, then iterate through the devices
2459 * and set the new_data_offset.
2460 * If that all works, we set chunk_size, layout, raid_disks, and start
2461 * 'reshape'
2462 */
2463 struct mdinfo *sra;
2464 unsigned long long min;
2465 int err = 0;
2466
2467 sra = sysfs_read(fd, NULL,
2468 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2469 );
2470 if (!sra) {
2471 pr_err("%s: Cannot get array details from sysfs\n",
2472 devname);
2473 goto release;
2474 }
2475 min = reshape->min_offset_change;
2476
2477 if (info->delta_disks)
2478 sysfs_set_str(sra, NULL, "reshape_direction",
2479 info->delta_disks < 0 ? "backwards" : "forwards");
2480 if (info->delta_disks < 0 &&
2481 info->space_after < min) {
2482 int rv = sysfs_set_num(sra, NULL, "component_size",
2483 (sra->component_size -
2484 min)/2);
2485 if (rv) {
2486 pr_err("cannot reduce component size\n");
2487 goto release;
2488 }
2489 }
2490 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2491 min, 0);
2492 if (err == 1) {
2493 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2494 cont_err("supported on this kernel\n");
2495 err = -1;
2496 }
2497 if (err < 0)
2498 goto release;
2499
2500 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2501 err = errno;
2502 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2503 err = errno;
2504 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2505 info->array.raid_disks + info->delta_disks) < 0)
2506 err = errno;
2507 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2508 err = errno;
2509 if (err) {
2510 pr_err("Cannot set array shape for %s\n",
2511 devname);
2512 if (err == EBUSY &&
2513 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2514 cont_err(" Bitmap must be removed before shape can be changed\n");
2515 goto release;
2516 }
2517 sysfs_free(sra);
2518 return 0;
2519 release:
2520 sysfs_free(sra);
2521 return 1;
2522 }
2523
2524 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2525 {
2526 struct mdinfo *sra, *sd;
2527 /* Initialisation to silence compiler warning */
2528 unsigned long long min_space_before = 0, min_space_after = 0;
2529 int first = 1;
2530
2531 sra = sysfs_read(fd, NULL, GET_DEVS);
2532 if (!sra)
2533 return;
2534 for (sd = sra->devs; sd; sd = sd->next) {
2535 char *dn;
2536 int dfd;
2537 struct supertype *st2;
2538 struct mdinfo info2;
2539
2540 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2541 continue;
2542 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2543 dfd = dev_open(dn, O_RDONLY);
2544 if (dfd < 0)
2545 break;
2546 st2 = dup_super(st);
2547 if (st2->ss->load_super(st2,dfd, NULL)) {
2548 close(dfd);
2549 free(st2);
2550 break;
2551 }
2552 close(dfd);
2553 st2->ss->getinfo_super(st2, &info2, NULL);
2554 st2->ss->free_super(st2);
2555 free(st2);
2556 if (first ||
2557 min_space_before > info2.space_before)
2558 min_space_before = info2.space_before;
2559 if (first ||
2560 min_space_after > info2.space_after)
2561 min_space_after = info2.space_after;
2562 first = 0;
2563 }
2564 if (sd == NULL && !first) {
2565 info->space_after = min_space_after;
2566 info->space_before = min_space_before;
2567 }
2568 sysfs_free(sra);
2569 }
2570
2571 static void update_cache_size(char *container, struct mdinfo *sra,
2572 struct mdinfo *info,
2573 int disks, unsigned long long blocks)
2574 {
2575 /* Check that the internal stripe cache is
2576 * large enough, or it won't work.
2577 * It must hold at least 4 stripes of the larger
2578 * chunk size
2579 */
2580 unsigned long cache;
2581 cache = max(info->array.chunk_size, info->new_chunk);
2582 cache *= 4; /* 4 stripes minimum */
2583 cache /= 512; /* convert to sectors */
2584 /* make sure there is room for 'blocks' with a bit to spare */
2585 if (cache < 16 + blocks / disks)
2586 cache = 16 + blocks / disks;
2587 cache /= (4096/512); /* Convert from sectors to pages */
2588
2589 if (sra->cache_size < cache)
2590 subarray_set_num(container, sra, "stripe_cache_size",
2591 cache+1);
2592 }
2593
2594 static int impose_reshape(struct mdinfo *sra,
2595 struct mdinfo *info,
2596 struct supertype *st,
2597 int fd,
2598 int restart,
2599 char *devname, char *container,
2600 struct reshape *reshape)
2601 {
2602 struct mdu_array_info_s array;
2603
2604 sra->new_chunk = info->new_chunk;
2605
2606 if (restart) {
2607 /* for external metadata checkpoint saved by mdmon can be lost
2608 * or missed /due to e.g. crash/. Check if md is not during
2609 * restart farther than metadata points to.
2610 * If so, this means metadata information is obsolete.
2611 */
2612 if (st->ss->external)
2613 verify_reshape_position(info, reshape->level);
2614 sra->reshape_progress = info->reshape_progress;
2615 } else {
2616 sra->reshape_progress = 0;
2617 if (reshape->after.data_disks < reshape->before.data_disks)
2618 /* start from the end of the new array */
2619 sra->reshape_progress = (sra->component_size
2620 * reshape->after.data_disks);
2621 }
2622
2623 ioctl(fd, GET_ARRAY_INFO, &array);
2624 if (info->array.chunk_size == info->new_chunk &&
2625 reshape->before.layout == reshape->after.layout &&
2626 st->ss->external == 0) {
2627 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2628 array.raid_disks = reshape->after.data_disks + reshape->parity;
2629 if (!restart &&
2630 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2631 int err = errno;
2632
2633 pr_err("Cannot set device shape for %s: %s\n",
2634 devname, strerror(errno));
2635
2636 if (err == EBUSY &&
2637 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2638 cont_err("Bitmap must be removed before shape can be changed\n");
2639
2640 goto release;
2641 }
2642 } else if (!restart) {
2643 /* set them all just in case some old 'new_*' value
2644 * persists from some earlier problem.
2645 */
2646 int err = 0;
2647 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2648 err = errno;
2649 if (!err && sysfs_set_num(sra, NULL, "layout",
2650 reshape->after.layout) < 0)
2651 err = errno;
2652 if (!err && subarray_set_num(container, sra, "raid_disks",
2653 reshape->after.data_disks +
2654 reshape->parity) < 0)
2655 err = errno;
2656 if (err) {
2657 pr_err("Cannot set device shape for %s\n",
2658 devname);
2659
2660 if (err == EBUSY &&
2661 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2662 cont_err("Bitmap must be removed before shape can be changed\n");
2663 goto release;
2664 }
2665 }
2666 return 0;
2667 release:
2668 return -1;
2669 }
2670
2671 static int impose_level(int fd, int level, char *devname, int verbose)
2672 {
2673 char *c;
2674 struct mdu_array_info_s array;
2675 struct mdinfo info;
2676 sysfs_init(&info, fd, NULL);
2677
2678 ioctl(fd, GET_ARRAY_INFO, &array);
2679 if (level == 0 &&
2680 (array.level >= 4 && array.level <= 6)) {
2681 /* To convert to RAID0 we need to fail and
2682 * remove any non-data devices. */
2683 int found = 0;
2684 int d;
2685 int data_disks = array.raid_disks - 1;
2686 if (array.level == 6)
2687 data_disks -= 1;
2688 if (array.level == 5 &&
2689 array.layout != ALGORITHM_PARITY_N)
2690 return -1;
2691 if (array.level == 6 &&
2692 array.layout != ALGORITHM_PARITY_N_6)
2693 return -1;
2694 sysfs_set_str(&info, NULL,"sync_action", "idle");
2695 /* First remove any spares so no recovery starts */
2696 for (d = 0, found = 0;
2697 d < MAX_DISKS && found < array.nr_disks;
2698 d++) {
2699 mdu_disk_info_t disk;
2700 disk.number = d;
2701 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2702 continue;
2703 if (disk.major == 0 && disk.minor == 0)
2704 continue;
2705 found++;
2706 if ((disk.state & (1 << MD_DISK_ACTIVE))
2707 && disk.raid_disk < data_disks)
2708 /* keep this */
2709 continue;
2710 ioctl(fd, HOT_REMOVE_DISK,
2711 makedev(disk.major, disk.minor));
2712 }
2713 /* Now fail anything left */
2714 ioctl(fd, GET_ARRAY_INFO, &array);
2715 for (d = 0, found = 0;
2716 d < MAX_DISKS && found < array.nr_disks;
2717 d++) {
2718 int cnt;
2719 mdu_disk_info_t disk;
2720 disk.number = d;
2721 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2722 continue;
2723 if (disk.major == 0 && disk.minor == 0)
2724 continue;
2725 found++;
2726 if ((disk.state & (1 << MD_DISK_ACTIVE))
2727 && disk.raid_disk < data_disks)
2728 /* keep this */
2729 continue;
2730 ioctl(fd, SET_DISK_FAULTY,
2731 makedev(disk.major, disk.minor));
2732 cnt = 5;
2733 while (ioctl(fd, HOT_REMOVE_DISK,
2734 makedev(disk.major, disk.minor)) < 0
2735 && errno == EBUSY
2736 && cnt--) {
2737 usleep(10000);
2738 }
2739 }
2740 }
2741 c = map_num(pers, level);
2742 if (c) {
2743 int err = sysfs_set_str(&info, NULL, "level", c);
2744 if (err) {
2745 err = errno;
2746 pr_err("%s: could not set level to %s\n",
2747 devname, c);
2748 if (err == EBUSY &&
2749 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2750 cont_err("Bitmap must be removed before level can be changed\n");
2751 return err;
2752 }
2753 if (verbose >= 0)
2754 pr_err("level of %s changed to %s\n",
2755 devname, c);
2756 }
2757 return 0;
2758 }
2759
2760 int sigterm = 0;
2761 static void catch_term(int sig)
2762 {
2763 sigterm = 1;
2764 }
2765
2766 static int continue_via_systemd(char *devnm)
2767 {
2768 int skipped, i, pid, status;
2769 char pathbuf[1024];
2770 /* In a systemd/udev world, it is best to get systemd to
2771 * run "mdadm --grow --continue" rather than running in the
2772 * background.
2773 */
2774 switch(fork()) {
2775 case 0:
2776 /* FIXME yuk. CLOSE_EXEC?? */
2777 skipped = 0;
2778 for (i = 3; skipped < 20; i++)
2779 if (close(i) < 0)
2780 skipped++;
2781 else
2782 skipped = 0;
2783
2784 /* Don't want to see error messages from
2785 * systemctl. If the service doesn't exist,
2786 * we fork ourselves.
2787 */
2788 close(2);
2789 open("/dev/null", O_WRONLY);
2790 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
2791 devnm);
2792 status = execl("/usr/bin/systemctl", "systemctl",
2793 "start",
2794 pathbuf, NULL);
2795 status = execl("/bin/systemctl", "systemctl", "start",
2796 pathbuf, NULL);
2797 exit(1);
2798 case -1: /* Just do it ourselves. */
2799 break;
2800 default: /* parent - good */
2801 pid = wait(&status);
2802 if (pid >= 0 && status == 0)
2803 return 1;
2804 }
2805 return 0;
2806 }
2807
2808 static int reshape_array(char *container, int fd, char *devname,
2809 struct supertype *st, struct mdinfo *info,
2810 int force, struct mddev_dev *devlist,
2811 unsigned long long data_offset,
2812 char *backup_file, int verbose, int forked,
2813 int restart, int freeze_reshape)
2814 {
2815 struct reshape reshape;
2816 int spares_needed;
2817 char *msg;
2818 int orig_level = UnSet;
2819 int odisks;
2820 int delayed;
2821
2822 struct mdu_array_info_s array;
2823 char *c;
2824
2825 struct mddev_dev *dv;
2826 int added_disks;
2827
2828 int *fdlist = NULL;
2829 unsigned long long *offsets = NULL;
2830 int d;
2831 int nrdisks;
2832 int err;
2833 unsigned long blocks;
2834 unsigned long long array_size;
2835 int done;
2836 struct mdinfo *sra = NULL;
2837 char buf[20];
2838
2839 /* when reshaping a RAID0, the component_size might be zero.
2840 * So try to fix that up.
2841 */
2842 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2843 dprintf("Cannot get array information.\n");
2844 goto release;
2845 }
2846 if (array.level == 0 && info->component_size == 0) {
2847 get_dev_size(fd, NULL, &array_size);
2848 info->component_size = array_size / array.raid_disks;
2849 }
2850
2851 if (array.level == 10)
2852 /* Need space_after info */
2853 get_space_after(fd, st, info);
2854
2855 if (info->reshape_active) {
2856 int new_level = info->new_level;
2857 info->new_level = UnSet;
2858 if (info->delta_disks > 0)
2859 info->array.raid_disks -= info->delta_disks;
2860 msg = analyse_change(devname, info, &reshape);
2861 info->new_level = new_level;
2862 if (info->delta_disks > 0)
2863 info->array.raid_disks += info->delta_disks;
2864 if (!restart)
2865 /* Make sure the array isn't read-only */
2866 ioctl(fd, RESTART_ARRAY_RW, 0);
2867 } else
2868 msg = analyse_change(devname, info, &reshape);
2869 if (msg) {
2870 /* if msg == "", error has already been printed */
2871 if (msg[0])
2872 pr_err("%s\n", msg);
2873 goto release;
2874 }
2875 if (restart &&
2876 (reshape.level != info->array.level ||
2877 reshape.before.layout != info->array.layout ||
2878 reshape.before.data_disks + reshape.parity
2879 != info->array.raid_disks - max(0, info->delta_disks))) {
2880 pr_err("reshape info is not in native format - cannot continue.\n");
2881 goto release;
2882 }
2883
2884 if (st->ss->external && restart && (info->reshape_progress == 0) &&
2885 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
2886 (strncmp(buf, "reshape", 7) == 0))) {
2887 /* When reshape is restarted from '0', very begin of array
2888 * it is possible that for external metadata reshape and array
2889 * configuration doesn't happen.
2890 * Check if md has the same opinion, and reshape is restarted
2891 * from 0. If so, this is regular reshape start after reshape
2892 * switch in metadata to next array only.
2893 */
2894 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2895 (info->reshape_progress == 0))
2896 restart = 0;
2897 }
2898 if (restart) {
2899 /* reshape already started. just skip to monitoring the reshape */
2900 if (reshape.backup_blocks == 0)
2901 return 0;
2902 if (restart & RESHAPE_NO_BACKUP)
2903 return 0;
2904
2905 /* Need 'sra' down at 'started:' */
2906 sra = sysfs_read(fd, NULL,
2907 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2908 GET_CACHE);
2909 if (!sra) {
2910 pr_err("%s: Cannot get array details from sysfs\n",
2911 devname);
2912 goto release;
2913 }
2914
2915 if (!backup_file)
2916 backup_file = locate_backup(sra->sys_name);
2917
2918 goto started;
2919 }
2920 /* The container is frozen but the array may not be.
2921 * So freeze the array so spares don't get put to the wrong use
2922 * FIXME there should probably be a cleaner separation between
2923 * freeze_array and freeze_container.
2924 */
2925 sysfs_freeze_array(info);
2926 /* Check we have enough spares to not be degraded */
2927 added_disks = 0;
2928 for (dv = devlist; dv ; dv=dv->next)
2929 added_disks++;
2930 spares_needed = max(reshape.before.data_disks,
2931 reshape.after.data_disks)
2932 + reshape.parity - array.raid_disks;
2933
2934 if (!force &&
2935 info->new_level > 1 && info->array.level > 1 &&
2936 spares_needed > info->array.spare_disks + added_disks) {
2937 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
2938 " Use --force to over-ride this check.\n",
2939 spares_needed,
2940 spares_needed == 1 ? "" : "s",
2941 info->array.spare_disks + added_disks);
2942 goto release;
2943 }
2944 /* Check we have enough spares to not fail */
2945 spares_needed = max(reshape.before.data_disks,
2946 reshape.after.data_disks)
2947 - array.raid_disks;
2948 if ((info->new_level > 1 || info->new_level == 0) &&
2949 spares_needed > info->array.spare_disks +added_disks) {
2950 pr_err("Need %d spare%s to create working array, and only have %d.\n",
2951 spares_needed,
2952 spares_needed == 1 ? "" : "s",
2953 info->array.spare_disks + added_disks);
2954 goto release;
2955 }
2956
2957 if (reshape.level != array.level) {
2958 int err = impose_level(fd, reshape.level, devname, verbose);
2959 if (err)
2960 goto release;
2961 info->new_layout = UnSet; /* after level change,
2962 * layout is meaningless */
2963 orig_level = array.level;
2964 sysfs_freeze_array(info);
2965
2966 if (reshape.level > 0 && st->ss->external) {
2967 /* make sure mdmon is aware of the new level */
2968 if (mdmon_running(container))
2969 flush_mdmon(container);
2970
2971 if (!mdmon_running(container))
2972 start_mdmon(container);
2973 ping_monitor(container);
2974 if (mdmon_running(container) &&
2975 st->update_tail == NULL)
2976 st->update_tail = &st->updates;
2977 }
2978 }
2979 /* ->reshape_super might have chosen some spares from the
2980 * container that it wants to be part of the new array.
2981 * We can collect them with ->container_content and give
2982 * them to the kernel.
2983 */
2984 if (st->ss->reshape_super && st->ss->container_content) {
2985 char *subarray = strchr(info->text_version+1, '/')+1;
2986 struct mdinfo *info2 =
2987 st->ss->container_content(st, subarray);
2988 struct mdinfo *d;
2989
2990 if (info2) {
2991 sysfs_init(info2, fd, st->devnm);
2992 /* When increasing number of devices, we need to set
2993 * new raid_disks before adding these, or they might
2994 * be rejected.
2995 */
2996 if (reshape.backup_blocks &&
2997 reshape.after.data_disks > reshape.before.data_disks)
2998 subarray_set_num(container, info2, "raid_disks",
2999 reshape.after.data_disks +
3000 reshape.parity);
3001 for (d = info2->devs; d; d = d->next) {
3002 if (d->disk.state == 0 &&
3003 d->disk.raid_disk >= 0) {
3004 /* This is a spare that wants to
3005 * be part of the array.
3006 */
3007 add_disk(fd, st, info2, d);
3008 }
3009 }
3010 sysfs_free(info2);
3011 }
3012 }
3013 /* We might have been given some devices to add to the
3014 * array. Now that the array has been changed to the right
3015 * level and frozen, we can safely add them.
3016 */
3017 if (devlist) {
3018 if (Manage_subdevs(devname, fd, devlist, verbose,
3019 0, NULL, 0))
3020 goto release;
3021 }
3022
3023 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3024 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3025 if (reshape.backup_blocks == 0) {
3026 /* No restriping needed, but we might need to impose
3027 * some more changes: layout, raid_disks, chunk_size
3028 */
3029 /* read current array info */
3030 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3031 dprintf("Cannot get array information.\n");
3032 goto release;
3033 }
3034 /* compare current array info with new values and if
3035 * it is different update them to new */
3036 if (info->new_layout != UnSet &&
3037 info->new_layout != array.layout) {
3038 array.layout = info->new_layout;
3039 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3040 pr_err("failed to set new layout\n");
3041 goto release;
3042 } else if (verbose >= 0)
3043 printf("layout for %s set to %d\n",
3044 devname, array.layout);
3045 }
3046 if (info->delta_disks != UnSet &&
3047 info->delta_disks != 0 &&
3048 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3049 array.raid_disks += info->delta_disks;
3050 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3051 pr_err("failed to set raid disks\n");
3052 goto release;
3053 } else if (verbose >= 0) {
3054 printf("raid_disks for %s set to %d\n",
3055 devname, array.raid_disks);
3056 }
3057 }
3058 if (info->new_chunk != 0 &&
3059 info->new_chunk != array.chunk_size) {
3060 if (sysfs_set_num(info, NULL,
3061 "chunk_size", info->new_chunk) != 0) {
3062 pr_err("failed to set chunk size\n");
3063 goto release;
3064 } else if (verbose >= 0)
3065 printf("chunk size for %s set to %d\n",
3066 devname, array.chunk_size);
3067 }
3068 unfreeze(st);
3069 return 0;
3070 }
3071
3072 /*
3073 * There are three possibilities.
3074 * 1/ The array will shrink.
3075 * We need to ensure the reshape will pause before reaching
3076 * the 'critical section'. We also need to fork and wait for
3077 * that to happen. When it does we
3078 * suspend/backup/complete/unfreeze
3079 *
3080 * 2/ The array will not change size.
3081 * This requires that we keep a backup of a sliding window
3082 * so that we can restore data after a crash. So we need
3083 * to fork and monitor progress.
3084 * In future we will allow the data_offset to change, so
3085 * a sliding backup becomes unnecessary.
3086 *
3087 * 3/ The array will grow. This is relatively easy.
3088 * However the kernel's restripe routines will cheerfully
3089 * overwrite some early data before it is safe. So we
3090 * need to make a backup of the early parts of the array
3091 * and be ready to restore it if rebuild aborts very early.
3092 * For externally managed metadata, we still need a forked
3093 * child to monitor the reshape and suspend IO over the region
3094 * that is being reshaped.
3095 *
3096 * We backup data by writing it to one spare, or to a
3097 * file which was given on command line.
3098 *
3099 * In each case, we first make sure that storage is available
3100 * for the required backup.
3101 * Then we:
3102 * - request the shape change.
3103 * - fork to handle backup etc.
3104 */
3105 /* Check that we can hold all the data */
3106 get_dev_size(fd, NULL, &array_size);
3107 if (reshape.new_size < (array_size/512)) {
3108 pr_err("this change will reduce the size of the array.\n"
3109 " use --grow --array-size first to truncate array.\n"
3110 " e.g. mdadm --grow %s --array-size %llu\n",
3111 devname, reshape.new_size/2);
3112 goto release;
3113 }
3114
3115 if (array.level == 10) {
3116 /* Reshaping RAID10 does not require any data backup by
3117 * user-space. Instead it requires that the data_offset
3118 * is changed to avoid the need for backup.
3119 * So this is handled very separately
3120 */
3121 if (restart)
3122 /* Nothing to do. */
3123 return 0;
3124 return raid10_reshape(container, fd, devname, st, info,
3125 &reshape, data_offset,
3126 force, verbose);
3127 }
3128 sra = sysfs_read(fd, NULL,
3129 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3130 GET_CACHE);
3131 if (!sra) {
3132 pr_err("%s: Cannot get array details from sysfs\n",
3133 devname);
3134 goto release;
3135 }
3136
3137 if (!backup_file)
3138 switch(set_new_data_offset(sra, st, devname,
3139 reshape.after.data_disks - reshape.before.data_disks,
3140 data_offset,
3141 reshape.min_offset_change, 1)) {
3142 case -1:
3143 goto release;
3144 case 0:
3145 /* Updated data_offset, so it's easy now */
3146 update_cache_size(container, sra, info,
3147 min(reshape.before.data_disks,
3148 reshape.after.data_disks),
3149 reshape.backup_blocks);
3150
3151 /* Right, everything seems fine. Let's kick things off.
3152 */
3153 sync_metadata(st);
3154
3155 if (impose_reshape(sra, info, st, fd, restart,
3156 devname, container, &reshape) < 0)
3157 goto release;
3158 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3159 struct mdinfo *sd;
3160 if (errno != EINVAL) {
3161 pr_err("Failed to initiate reshape!\n");
3162 goto release;
3163 }
3164 /* revert data_offset and try the old way */
3165 for (sd = sra->devs; sd; sd = sd->next) {
3166 sysfs_set_num(sra, sd, "new_offset",
3167 sd->data_offset);
3168 sysfs_set_str(sra, NULL, "reshape_direction",
3169 "forwards");
3170 }
3171 break;
3172 }
3173 if (info->new_level == reshape.level)
3174 return 0;
3175 /* need to adjust level when reshape completes */
3176 switch(fork()) {
3177 case -1: /* ignore error, but don't wait */
3178 return 0;
3179 default: /* parent */
3180 return 0;
3181 case 0:
3182 map_fork();
3183 break;
3184 }
3185 close(fd);
3186 wait_reshape(sra);
3187 fd = open_dev(sra->sys_name);
3188 if (fd >= 0)
3189 impose_level(fd, info->new_level, devname, verbose);
3190 return 0;
3191 case 1: /* Couldn't set data_offset, try the old way */
3192 if (data_offset != INVALID_SECTORS) {
3193 pr_err("Cannot update data_offset on this array\n");
3194 goto release;
3195 }
3196 break;
3197 }
3198
3199 started:
3200 /* Decide how many blocks (sectors) for a reshape
3201 * unit. The number we have so far is just a minimum
3202 */
3203 blocks = reshape.backup_blocks;
3204 if (reshape.before.data_disks ==
3205 reshape.after.data_disks) {
3206 /* Make 'blocks' bigger for better throughput, but
3207 * not so big that we reject it below.
3208 * Try for 16 megabytes
3209 */
3210 while (blocks * 32 < sra->component_size &&
3211 blocks < 16*1024*2)
3212 blocks *= 2;
3213 } else
3214 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3215
3216 if (blocks >= sra->component_size/2) {
3217 pr_err("%s: Something wrong - reshape aborted\n",
3218 devname);
3219 goto release;
3220 }
3221
3222 /* Now we need to open all these devices so we can read/write.
3223 */
3224 nrdisks = max(reshape.before.data_disks,
3225 reshape.after.data_disks) + reshape.parity
3226 + sra->array.spare_disks;
3227 fdlist = xcalloc((1+nrdisks), sizeof(int));
3228 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3229
3230 odisks = reshape.before.data_disks + reshape.parity;
3231 d = reshape_prepare_fdlist(devname, sra, odisks,
3232 nrdisks, blocks, backup_file,
3233 fdlist, offsets);
3234 if (d < odisks) {
3235 goto release;
3236 }
3237 if ((st->ss->manage_reshape == NULL) ||
3238 (st->ss->recover_backup == NULL)) {
3239 if (backup_file == NULL) {
3240 if (reshape.after.data_disks <=
3241 reshape.before.data_disks) {
3242 pr_err("%s: Cannot grow - need backup-file\n",
3243 devname);
3244 pr_err(" Please provide one with \"--backup=...\"\n");
3245 goto release;
3246 } else if (d == odisks) {
3247 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3248 goto release;
3249 }
3250 } else {
3251 if (!reshape_open_backup_file(backup_file, fd, devname,
3252 (signed)blocks,
3253 fdlist+d, offsets+d,
3254 sra->sys_name,
3255 restart)) {
3256 goto release;
3257 }
3258 d++;
3259 }
3260 }
3261
3262 update_cache_size(container, sra, info,
3263 min(reshape.before.data_disks, reshape.after.data_disks),
3264 blocks);
3265
3266 /* Right, everything seems fine. Let's kick things off.
3267 * If only changing raid_disks, use ioctl, else use
3268 * sysfs.
3269 */
3270 sync_metadata(st);
3271
3272 if (impose_reshape(sra, info, st, fd, restart,
3273 devname, container, &reshape) < 0)
3274 goto release;
3275
3276 err = start_reshape(sra, restart, reshape.before.data_disks,
3277 reshape.after.data_disks);
3278 if (err) {
3279 pr_err("Cannot %s reshape for %s\n",
3280 restart ? "continue" : "start",
3281 devname);
3282 goto release;
3283 }
3284 if (restart)
3285 sysfs_set_str(sra, NULL, "array_state", "active");
3286 if (freeze_reshape) {
3287 free(fdlist);
3288 free(offsets);
3289 sysfs_free(sra);
3290 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3291 sra->reshape_progress);
3292 return 1;
3293 }
3294
3295 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3296 if (continue_via_systemd(container ?: sra->sys_name)) {
3297 free(fdlist);
3298 free(offsets);
3299 sysfs_free(sra);
3300 return 0;
3301 }
3302
3303 /* Now we just need to kick off the reshape and watch, while
3304 * handling backups of the data...
3305 * This is all done by a forked background process.
3306 */
3307 switch(forked ? 0 : fork()) {
3308 case -1:
3309 pr_err("Cannot run child to monitor reshape: %s\n",
3310 strerror(errno));
3311 abort_reshape(sra);
3312 goto release;
3313 default:
3314 free(fdlist);
3315 free(offsets);
3316 sysfs_free(sra);
3317 return 0;
3318 case 0:
3319 map_fork();
3320 break;
3321 }
3322
3323 /* If another array on the same devices is busy, the
3324 * reshape will wait for them. This would mean that
3325 * the first section that we suspend will stay suspended
3326 * for a long time. So check on that possibility
3327 * by looking for "DELAYED" in /proc/mdstat, and if found,
3328 * wait a while
3329 */
3330 do {
3331 struct mdstat_ent *mds, *m;
3332 delayed = 0;
3333 mds = mdstat_read(1, 0);
3334 for (m = mds; m; m = m->next)
3335 if (strcmp(m->devnm, sra->sys_name) == 0) {
3336 if (m->resync &&
3337 m->percent == RESYNC_DELAYED)
3338 delayed = 1;
3339 if (m->resync == 0)
3340 /* Haven't started the reshape thread
3341 * yet, wait a bit
3342 */
3343 delayed = 2;
3344 break;
3345 }
3346 free_mdstat(mds);
3347 if (delayed == 1 && get_linux_version() < 3007000) {
3348 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3349 " You might experience problems until other reshapes complete.\n");
3350 delayed = 0;
3351 }
3352 if (delayed)
3353 mdstat_wait(30 - (delayed-1) * 25);
3354 } while (delayed);
3355 mdstat_close();
3356 close(fd);
3357 if (check_env("MDADM_GROW_VERIFY"))
3358 fd = open(devname, O_RDONLY | O_DIRECT);
3359 else
3360 fd = -1;
3361 mlockall(MCL_FUTURE);
3362
3363 signal(SIGTERM, catch_term);
3364
3365 if (st->ss->external) {
3366 /* metadata handler takes it from here */
3367 done = st->ss->manage_reshape(
3368 fd, sra, &reshape, st, blocks,
3369 fdlist, offsets,
3370 d - odisks, fdlist+odisks,
3371 offsets+odisks);
3372 } else
3373 done = child_monitor(
3374 fd, sra, &reshape, st, blocks,
3375 fdlist, offsets,
3376 d - odisks, fdlist+odisks,
3377 offsets+odisks);
3378
3379 free(fdlist);
3380 free(offsets);
3381
3382 if (backup_file && done) {
3383 char *bul;
3384 bul = make_backup(sra->sys_name);
3385 if (bul) {
3386 char buf[1024];
3387 int l = readlink(bul, buf, sizeof(buf) - 1);
3388 if (l > 0) {
3389 buf[l]=0;
3390 unlink(buf);
3391 }
3392 unlink(bul);
3393 free(bul);
3394 }
3395 unlink(backup_file);
3396 }
3397 if (!done) {
3398 abort_reshape(sra);
3399 goto out;
3400 }
3401
3402 if (!st->ss->external &&
3403 !(reshape.before.data_disks != reshape.after.data_disks
3404 && info->custom_array_size) &&
3405 info->new_level == reshape.level &&
3406 !forked) {
3407 /* no need to wait for the reshape to finish as
3408 * there is nothing more to do.
3409 */
3410 sysfs_free(sra);
3411 exit(0);
3412 }
3413 wait_reshape(sra);
3414
3415 if (st->ss->external) {
3416 /* Re-load the metadata as much could have changed */
3417 int cfd = open_dev(st->container_devnm);
3418 if (cfd >= 0) {
3419 flush_mdmon(container);
3420 st->ss->free_super(st);
3421 st->ss->load_container(st, cfd, container);
3422 close(cfd);
3423 }
3424 }
3425
3426 /* set new array size if required customer_array_size is used
3427 * by this metadata.
3428 */
3429 if (reshape.before.data_disks !=
3430 reshape.after.data_disks &&
3431 info->custom_array_size)
3432 set_array_size(st, info, info->text_version);
3433
3434 if (info->new_level != reshape.level) {
3435 if (fd < 0)
3436 fd = open(devname, O_RDONLY);
3437 impose_level(fd, info->new_level, devname, verbose);
3438 close(fd);
3439 if (info->new_level == 0)
3440 st->update_tail = NULL;
3441 }
3442 out:
3443 sysfs_free(sra);
3444 if (forked)
3445 return 0;
3446 unfreeze(st);
3447 exit(0);
3448
3449 release:
3450 free(fdlist);
3451 free(offsets);
3452 if (orig_level != UnSet && sra) {
3453 c = map_num(pers, orig_level);
3454 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3455 pr_err("aborting level change\n");
3456 }
3457 sysfs_free(sra);
3458 if (!forked)
3459 unfreeze(st);
3460 return 1;
3461 }
3462
3463 /* mdfd handle is passed to be closed in child process (after fork).
3464 */
3465 int reshape_container(char *container, char *devname,
3466 int mdfd,
3467 struct supertype *st,
3468 struct mdinfo *info,
3469 int force,
3470 char *backup_file, int verbose,
3471 int forked, int restart, int freeze_reshape)
3472 {
3473 struct mdinfo *cc = NULL;
3474 int rv = restart;
3475 char last_devnm[32] = "";
3476
3477 /* component_size is not meaningful for a container,
3478 * so pass '0' meaning 'no change'
3479 */
3480 if (!restart &&
3481 reshape_super(st, 0, info->new_level,
3482 info->new_layout, info->new_chunk,
3483 info->array.raid_disks, info->delta_disks,
3484 backup_file, devname, APPLY_METADATA_CHANGES,
3485 verbose)) {
3486 unfreeze(st);
3487 return 1;
3488 }
3489
3490 sync_metadata(st);
3491
3492 /* ping monitor to be sure that update is on disk
3493 */
3494 ping_monitor(container);
3495
3496 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3497 if (continue_via_systemd(container))
3498 return 0;
3499
3500 switch (forked ? 0 : fork()) {
3501 case -1: /* error */
3502 perror("Cannot fork to complete reshape\n");
3503 unfreeze(st);
3504 return 1;
3505 default: /* parent */
3506 if (!freeze_reshape)
3507 printf("%s: multi-array reshape continues in background\n", Name);
3508 return 0;
3509 case 0: /* child */
3510 map_fork();
3511 break;
3512 }
3513
3514 /* close unused handle in child process
3515 */
3516 if (mdfd > -1)
3517 close(mdfd);
3518
3519 while(1) {
3520 /* For each member array with reshape_active,
3521 * we need to perform the reshape.
3522 * We pick the first array that needs reshaping and
3523 * reshape it. reshape_array() will re-read the metadata
3524 * so the next time through a different array should be
3525 * ready for reshape.
3526 * It is possible that the 'different' array will not
3527 * be assembled yet. In that case we simple exit.
3528 * When it is assembled, the mdadm which assembles it
3529 * will take over the reshape.
3530 */
3531 struct mdinfo *content;
3532 int fd;
3533 struct mdstat_ent *mdstat;
3534 char *adev;
3535 int devid;
3536
3537 sysfs_free(cc);
3538
3539 cc = st->ss->container_content(st, NULL);
3540
3541 for (content = cc; content ; content = content->next) {
3542 char *subarray;
3543 if (!content->reshape_active)
3544 continue;
3545
3546 subarray = strchr(content->text_version+1, '/')+1;
3547 mdstat = mdstat_by_subdev(subarray, container);
3548 if (!mdstat)
3549 continue;
3550 if (mdstat->active == 0) {
3551 pr_err("Skipping inactive array %s.\n",
3552 mdstat->devnm);
3553 free_mdstat(mdstat);
3554 mdstat = NULL;
3555 continue;
3556 }
3557 break;
3558 }
3559 if (!content)
3560 break;
3561
3562 devid = devnm2devid(mdstat->devnm);
3563 adev = map_dev(major(devid), minor(devid), 0);
3564 if (!adev)
3565 adev = content->text_version;
3566
3567 fd = open_dev(mdstat->devnm);
3568 if (fd < 0) {
3569 pr_err("Device %s cannot be opened for reshape.\n", adev);
3570 break;
3571 }
3572
3573 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3574 /* Do not allow for multiple reshape_array() calls for
3575 * the same array.
3576 * It can happen when reshape_array() returns without
3577 * error, when reshape is not finished (wrong reshape
3578 * starting/continuation conditions). Mdmon doesn't
3579 * switch to next array in container and reentry
3580 * conditions for the same array occur.
3581 * This is possibly interim until the behaviour of
3582 * reshape_array is resolved().
3583 */
3584 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3585 close(fd);
3586 break;
3587 }
3588 strcpy(last_devnm, mdstat->devnm);
3589
3590 sysfs_init(content, fd, mdstat->devnm);
3591
3592 if (mdmon_running(container))
3593 flush_mdmon(container);
3594
3595 rv = reshape_array(container, fd, adev, st,
3596 content, force, NULL, INVALID_SECTORS,
3597 backup_file, verbose, 1, restart,
3598 freeze_reshape);
3599 close(fd);
3600
3601 if (freeze_reshape) {
3602 sysfs_free(cc);
3603 exit(0);
3604 }
3605
3606 restart = 0;
3607 if (rv)
3608 break;
3609
3610 if (mdmon_running(container))
3611 flush_mdmon(container);
3612 }
3613 if (!rv)
3614 unfreeze(st);
3615 sysfs_free(cc);
3616 exit(0);
3617 }
3618
3619 /*
3620 * We run a child process in the background which performs the following
3621 * steps:
3622 * - wait for resync to reach a certain point
3623 * - suspend io to the following section
3624 * - backup that section
3625 * - allow resync to proceed further
3626 * - resume io
3627 * - discard the backup.
3628 *
3629 * When are combined in slightly different ways in the three cases.
3630 * Grow:
3631 * - suspend/backup/allow/wait/resume/discard
3632 * Shrink:
3633 * - allow/wait/suspend/backup/allow/wait/resume/discard
3634 * same-size:
3635 * - wait/resume/discard/suspend/backup/allow
3636 *
3637 * suspend/backup/allow always come together
3638 * wait/resume/discard do too.
3639 * For the same-size case we have two backups to improve flow.
3640 *
3641 */
3642
3643 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3644 unsigned long long backup_point,
3645 unsigned long long wait_point,
3646 unsigned long long *suspend_point,
3647 unsigned long long *reshape_completed, int *frozen)
3648 {
3649 /* This function is called repeatedly by the reshape manager.
3650 * It determines how much progress can safely be made and allows
3651 * that progress.
3652 * - 'info' identifies the array and particularly records in
3653 * ->reshape_progress the metadata's knowledge of progress
3654 * This is a sector offset from the start of the array
3655 * of the next array block to be relocated. This number
3656 * may increase from 0 or decrease from array_size, depending
3657 * on the type of reshape that is happening.
3658 * Note that in contrast, 'sync_completed' is a block count of the
3659 * reshape so far. It gives the distance between the start point
3660 * (head or tail of device) and the next place that data will be
3661 * written. It always increases.
3662 * - 'reshape' is the structure created by analyse_change
3663 * - 'backup_point' shows how much the metadata manager has backed-up
3664 * data. For reshapes with increasing progress, it is the next address
3665 * to be backed up, previous addresses have been backed-up. For
3666 * decreasing progress, it is the earliest address that has been
3667 * backed up - later address are also backed up.
3668 * So addresses between reshape_progress and backup_point are
3669 * backed up providing those are in the 'correct' order.
3670 * - 'wait_point' is an array address. When reshape_completed
3671 * passes this point, progress_reshape should return. It might
3672 * return earlier if it determines that ->reshape_progress needs
3673 * to be updated or further backup is needed.
3674 * - suspend_point is maintained by progress_reshape and the caller
3675 * should not touch it except to initialise to zero.
3676 * It is an array address and it only increases in 2.6.37 and earlier.
3677 * This makes it difficult to handle reducing reshapes with
3678 * external metadata.
3679 * However: it is similar to backup_point in that it records the
3680 * other end of a suspended region from reshape_progress.
3681 * it is moved to extend the region that is safe to backup and/or
3682 * reshape
3683 * - reshape_completed is read from sysfs and returned. The caller
3684 * should copy this into ->reshape_progress when it has reason to
3685 * believe that the metadata knows this, and any backup outside this
3686 * has been erased.
3687 *
3688 * Return value is:
3689 * 1 if more data from backup_point - but only as far as suspend_point,
3690 * should be backed up
3691 * 0 if things are progressing smoothly
3692 * -1 if the reshape is finished because it is all done,
3693 * -2 if the reshape is finished due to an error.
3694 */
3695
3696 int advancing = (reshape->after.data_disks
3697 >= reshape->before.data_disks);
3698 unsigned long long need_backup; /* All data between start of array and
3699 * here will at some point need to
3700 * be backed up.
3701 */
3702 unsigned long long read_offset, write_offset;
3703 unsigned long long write_range;
3704 unsigned long long max_progress, target, completed;
3705 unsigned long long array_size = (info->component_size
3706 * reshape->before.data_disks);
3707 int fd;
3708 char buf[20];
3709
3710 /* First, we unsuspend any region that is now known to be safe.
3711 * If suspend_point is on the 'wrong' side of reshape_progress, then
3712 * we don't have or need suspension at the moment. This is true for
3713 * native metadata when we don't need to back-up.
3714 */
3715 if (advancing) {
3716 if (info->reshape_progress <= *suspend_point)
3717 sysfs_set_num(info, NULL, "suspend_lo",
3718 info->reshape_progress);
3719 } else {
3720 /* Note: this won't work in 2.6.37 and before.
3721 * Something somewhere should make sure we don't need it!
3722 */
3723 if (info->reshape_progress >= *suspend_point)
3724 sysfs_set_num(info, NULL, "suspend_hi",
3725 info->reshape_progress);
3726 }
3727
3728 /* Now work out how far it is safe to progress.
3729 * If the read_offset for ->reshape_progress is less than
3730 * 'blocks' beyond the write_offset, we can only progress as far
3731 * as a backup.
3732 * Otherwise we can progress until the write_offset for the new location
3733 * reaches (within 'blocks' of) the read_offset at the current location.
3734 * However that region must be suspended unless we are using native
3735 * metadata.
3736 * If we need to suspend more, we limit it to 128M per device, which is
3737 * rather arbitrary and should be some time-based calculation.
3738 */
3739 read_offset = info->reshape_progress / reshape->before.data_disks;
3740 write_offset = info->reshape_progress / reshape->after.data_disks;
3741 write_range = info->new_chunk/512;
3742 if (reshape->before.data_disks == reshape->after.data_disks)
3743 need_backup = array_size;
3744 else
3745 need_backup = reshape->backup_blocks;
3746 if (advancing) {
3747 if (read_offset < write_offset + write_range)
3748 max_progress = backup_point;
3749 else
3750 max_progress =
3751 read_offset *
3752 reshape->after.data_disks;
3753 } else {
3754 if (read_offset > write_offset - write_range)
3755 /* Can only progress as far as has been backed up,
3756 * which must be suspended */
3757 max_progress = backup_point;
3758 else if (info->reshape_progress <= need_backup)
3759 max_progress = backup_point;
3760 else {
3761 if (info->array.major_version >= 0)
3762 /* Can progress until backup is needed */
3763 max_progress = need_backup;
3764 else {
3765 /* Can progress until metadata update is required */
3766 max_progress =
3767 read_offset *
3768 reshape->after.data_disks;
3769 /* but data must be suspended */
3770 if (max_progress < *suspend_point)
3771 max_progress = *suspend_point;
3772 }
3773 }
3774 }
3775
3776 /* We know it is safe to progress to 'max_progress' providing
3777 * it is suspended or we are using native metadata.
3778 * Consider extending suspend_point 128M per device if it
3779 * is less than 64M per device beyond reshape_progress.
3780 * But always do a multiple of 'blocks'
3781 * FIXME this is too big - it takes to long to complete
3782 * this much.
3783 */
3784 target = 64*1024*2 * min(reshape->before.data_disks,
3785 reshape->after.data_disks);
3786 target /= reshape->backup_blocks;
3787 if (target < 2)
3788 target = 2;
3789 target *= reshape->backup_blocks;
3790
3791 /* For externally managed metadata we always need to suspend IO to
3792 * the area being reshaped so we regularly push suspend_point forward.
3793 * For native metadata we only need the suspend if we are going to do
3794 * a backup.
3795 */
3796 if (advancing) {
3797 if ((need_backup > info->reshape_progress
3798 || info->array.major_version < 0) &&
3799 *suspend_point < info->reshape_progress + target) {
3800 if (need_backup < *suspend_point + 2 * target)
3801 *suspend_point = need_backup;
3802 else if (*suspend_point + 2 * target < array_size)
3803 *suspend_point += 2 * target;
3804 else
3805 *suspend_point = array_size;
3806 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3807 if (max_progress > *suspend_point)
3808 max_progress = *suspend_point;
3809 }
3810 } else {
3811 if (info->array.major_version >= 0) {
3812 /* Only need to suspend when about to backup */
3813 if (info->reshape_progress < need_backup * 2 &&
3814 *suspend_point > 0) {
3815 *suspend_point = 0;
3816 sysfs_set_num(info, NULL, "suspend_lo", 0);
3817 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3818 }
3819 } else {
3820 /* Need to suspend continually */
3821 if (info->reshape_progress < *suspend_point)
3822 *suspend_point = info->reshape_progress;
3823 if (*suspend_point + target < info->reshape_progress)
3824 /* No need to move suspend region yet */;
3825 else {
3826 if (*suspend_point >= 2 * target)
3827 *suspend_point -= 2 * target;
3828 else
3829 *suspend_point = 0;
3830 sysfs_set_num(info, NULL, "suspend_lo",
3831 *suspend_point);
3832 }
3833 if (max_progress < *suspend_point)
3834 max_progress = *suspend_point;
3835 }
3836 }
3837
3838 /* now set sync_max to allow that progress. sync_max, like
3839 * sync_completed is a count of sectors written per device, so
3840 * we find the difference between max_progress and the start point,
3841 * and divide that by after.data_disks to get a sync_max
3842 * number.
3843 * At the same time we convert wait_point to a similar number
3844 * for comparing against sync_completed.
3845 */
3846 /* scale down max_progress to per_disk */
3847 max_progress /= reshape->after.data_disks;
3848 /* Round to chunk size as some kernels give an erroneously high number */
3849 max_progress /= info->new_chunk/512;
3850 max_progress *= info->new_chunk/512;
3851 /* And round to old chunk size as the kernel wants that */
3852 max_progress /= info->array.chunk_size/512;
3853 max_progress *= info->array.chunk_size/512;
3854 /* Limit progress to the whole device */
3855 if (max_progress > info->component_size)
3856 max_progress = info->component_size;
3857 wait_point /= reshape->after.data_disks;
3858 if (!advancing) {
3859 /* switch from 'device offset' to 'processed block count' */
3860 max_progress = info->component_size - max_progress;
3861 wait_point = info->component_size - wait_point;
3862 }
3863
3864 if (!*frozen)
3865 sysfs_set_num(info, NULL, "sync_max", max_progress);
3866
3867 /* Now wait. If we have already reached the point that we were
3868 * asked to wait to, don't wait at all, else wait for any change.
3869 * We need to select on 'sync_completed' as that is the place that
3870 * notifications happen, but we are really interested in
3871 * 'reshape_position'
3872 */
3873 fd = sysfs_get_fd(info, NULL, "sync_completed");
3874 if (fd < 0)
3875 goto check_progress;
3876
3877 if (sysfs_fd_get_ll(fd, &completed) < 0)
3878 goto check_progress;
3879
3880 while (completed < max_progress && completed < wait_point) {
3881 /* Check that sync_action is still 'reshape' to avoid
3882 * waiting forever on a dead array
3883 */
3884 char action[20];
3885 if (sysfs_get_str(info, NULL, "sync_action",
3886 action, 20) <= 0 ||
3887 strncmp(action, "reshape", 7) != 0)
3888 break;
3889 /* Some kernels reset 'sync_completed' to zero
3890 * before setting 'sync_action' to 'idle'.
3891 * So we need these extra tests.
3892 */
3893 if (completed == 0 && advancing
3894 && strncmp(action, "idle", 4) == 0
3895 && info->reshape_progress > 0)
3896 break;
3897 if (completed == 0 && !advancing
3898 && strncmp(action, "idle", 4) == 0
3899 && info->reshape_progress < (info->component_size
3900 * reshape->after.data_disks))
3901 break;
3902 sysfs_wait(fd, NULL);
3903 if (sysfs_fd_get_ll(fd, &completed) < 0)
3904 goto check_progress;
3905 }
3906 /* Some kernels reset 'sync_completed' to zero,
3907 * we need to have real point we are in md.
3908 * So in that case, read 'reshape_position' from sysfs.
3909 */
3910 if (completed == 0) {
3911 unsigned long long reshapep;
3912 char action[20];
3913 if (sysfs_get_str(info, NULL, "sync_action",
3914 action, 20) > 0 &&
3915 strncmp(action, "idle", 4) == 0 &&
3916 sysfs_get_ll(info, NULL,
3917 "reshape_position", &reshapep) == 0)
3918 *reshape_completed = reshapep;
3919 } else {
3920 /* some kernels can give an incorrectly high
3921 * 'completed' number, so round down */
3922 completed /= (info->new_chunk/512);
3923 completed *= (info->new_chunk/512);
3924 /* Convert 'completed' back in to a 'progress' number */
3925 completed *= reshape->after.data_disks;
3926 if (!advancing)
3927 completed = (info->component_size
3928 * reshape->after.data_disks
3929 - completed);
3930 *reshape_completed = completed;
3931 }
3932
3933 close(fd);
3934
3935 /* We return the need_backup flag. Caller will decide
3936 * how much - a multiple of ->backup_blocks up to *suspend_point
3937 */
3938 if (advancing)
3939 return need_backup > info->reshape_progress;
3940 else
3941 return need_backup >= info->reshape_progress;
3942
3943 check_progress:
3944 /* if we couldn't read a number from sync_completed, then
3945 * either the reshape did complete, or it aborted.
3946 * We can tell which by checking for 'none' in reshape_position.
3947 * If it did abort, then it might immediately restart if it
3948 * it was just a device failure that leaves us degraded but
3949 * functioning.
3950 */
3951 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3952 || strncmp(buf, "none", 4) != 0) {
3953 /* The abort might only be temporary. Wait up to 10
3954 * seconds for fd to contain a valid number again.
3955 */
3956 int wait = 10000;
3957 int rv = -2;
3958 unsigned long long new_sync_max;
3959 while (fd >= 0 && rv < 0 && wait > 0) {
3960 if (sysfs_wait(fd, &wait) != 1)
3961 break;
3962 switch (sysfs_fd_get_ll(fd, &completed)) {
3963 case 0:
3964 /* all good again */
3965 rv = 1;
3966 /* If "sync_max" is no longer max_progress
3967 * we need to freeze things
3968 */
3969 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3970 *frozen = (new_sync_max != max_progress);
3971 break;
3972 case -2: /* read error - abort */
3973 wait = 0;
3974 break;
3975 }
3976 }
3977 if (fd >= 0)
3978 close(fd);
3979 return rv; /* abort */
3980 } else {
3981 /* Maybe racing with array shutdown - check state */
3982 if (fd >= 0)
3983 close(fd);
3984 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3985 || strncmp(buf, "inactive", 8) == 0
3986 || strncmp(buf, "clear",5) == 0)
3987 return -2; /* abort */
3988 return -1; /* complete */
3989 }
3990 }
3991
3992 /* FIXME return status is never checked */
3993 static int grow_backup(struct mdinfo *sra,
3994 unsigned long long offset, /* per device */
3995 unsigned long stripes, /* per device, in old chunks */
3996 int *sources, unsigned long long *offsets,
3997 int disks, int chunk, int level, int layout,
3998 int dests, int *destfd, unsigned long long *destoffsets,
3999 int part, int *degraded,
4000 char *buf)
4001 {
4002 /* Backup 'blocks' sectors at 'offset' on each device of the array,
4003 * to storage 'destfd' (offset 'destoffsets'), after first
4004 * suspending IO. Then allow resync to continue
4005 * over the suspended section.
4006 * Use part 'part' of the backup-super-block.
4007 */
4008 int odata = disks;
4009 int rv = 0;
4010 int i;
4011 unsigned long long ll;
4012 int new_degraded;
4013 //printf("offset %llu\n", offset);
4014 if (level >= 4)
4015 odata--;
4016 if (level == 6)
4017 odata--;
4018
4019 /* Check that array hasn't become degraded, else we might backup the wrong data */
4020 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4021 return -1; /* FIXME this error is ignored */
4022 new_degraded = (int)ll;
4023 if (new_degraded != *degraded) {
4024 /* check each device to ensure it is still working */
4025 struct mdinfo *sd;
4026 for (sd = sra->devs ; sd ; sd = sd->next) {
4027 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4028 continue;
4029 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4030 char sbuf[20];
4031 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
4032 strstr(sbuf, "faulty") ||
4033 strstr(sbuf, "in_sync") == NULL) {
4034 /* this device is dead */
4035 sd->disk.state = (1<<MD_DISK_FAULTY);
4036 if (sd->disk.raid_disk >= 0 &&
4037 sources[sd->disk.raid_disk] >= 0) {
4038 close(sources[sd->disk.raid_disk]);
4039 sources[sd->disk.raid_disk] = -1;
4040 }
4041 }
4042 }
4043 }
4044 *degraded = new_degraded;
4045 }
4046 if (part) {
4047 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4048 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4049 } else {
4050 bsb.arraystart = __cpu_to_le64(offset * odata);
4051 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4052 }
4053 if (part)
4054 bsb.magic[15] = '2';
4055 for (i = 0; i < dests; i++)
4056 if (part)
4057 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4058 else
4059 lseek64(destfd[i], destoffsets[i], 0);
4060
4061 rv = save_stripes(sources, offsets,
4062 disks, chunk, level, layout,
4063 dests, destfd,
4064 offset*512*odata, stripes * chunk * odata,
4065 buf);
4066
4067 if (rv)
4068 return rv;
4069 bsb.mtime = __cpu_to_le64(time(0));
4070 for (i = 0; i < dests; i++) {
4071 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4072
4073 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4074 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4075 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4076 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4077
4078 rv = -1;
4079 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
4080 != destoffsets[i] - 4096)
4081 break;
4082 if (write(destfd[i], &bsb, 512) != 512)
4083 break;
4084 if (destoffsets[i] > 4096) {
4085 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4086 destoffsets[i]+stripes*chunk*odata)
4087 break;
4088 if (write(destfd[i], &bsb, 512) != 512)
4089 break;
4090 }
4091 fsync(destfd[i]);
4092 rv = 0;
4093 }
4094
4095 return rv;
4096 }
4097
4098 /* in 2.6.30, the value reported by sync_completed can be
4099 * less that it should be by one stripe.
4100 * This only happens when reshape hits sync_max and pauses.
4101 * So allow wait_backup to either extent sync_max further
4102 * than strictly necessary, or return before the
4103 * sync has got quite as far as we would really like.
4104 * This is what 'blocks2' is for.
4105 * The various caller give appropriate values so that
4106 * every works.
4107 */
4108 /* FIXME return value is often ignored */
4109 static int forget_backup(int dests, int *destfd,
4110 unsigned long long *destoffsets,
4111 int part)
4112 {
4113 /*
4114 * Erase backup 'part' (which is 0 or 1)
4115 */
4116 int i;
4117 int rv;
4118
4119 if (part) {
4120 bsb.arraystart2 = __cpu_to_le64(0);
4121 bsb.length2 = __cpu_to_le64(0);
4122 } else {
4123 bsb.arraystart = __cpu_to_le64(0);
4124 bsb.length = __cpu_to_le64(0);
4125 }
4126 bsb.mtime = __cpu_to_le64(time(0));
4127 rv = 0;
4128 for (i = 0; i < dests; i++) {
4129 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4130 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4131 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4132 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4133 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4134 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4135 destoffsets[i]-4096)
4136 rv = -1;
4137 if (rv == 0 &&
4138 write(destfd[i], &bsb, 512) != 512)
4139 rv = -1;
4140 fsync(destfd[i]);
4141 }
4142 return rv;
4143 }
4144
4145 static void fail(char *msg)
4146 {
4147 int rv;
4148 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4149 rv |= (write(2, "\n", 1) != 1);
4150 exit(rv ? 1 : 2);
4151 }
4152
4153 static char *abuf, *bbuf;
4154 static unsigned long long abuflen;
4155 static void validate(int afd, int bfd, unsigned long long offset)
4156 {
4157 /* check that the data in the backup against the array.
4158 * This is only used for regression testing and should not
4159 * be used while the array is active
4160 */
4161 if (afd < 0)
4162 return;
4163 lseek64(bfd, offset - 4096, 0);
4164 if (read(bfd, &bsb2, 512) != 512)
4165 fail("cannot read bsb");
4166 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4167 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4168 fail("first csum bad");
4169 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4170 fail("magic is bad");
4171 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4172 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4173 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4174 fail("second csum bad");
4175
4176 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4177 fail("devstart is wrong");
4178
4179 if (bsb2.length) {
4180 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4181
4182 if (abuflen < len) {
4183 free(abuf);
4184 free(bbuf);
4185 abuflen = len;
4186 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4187 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4188 abuflen = 0;
4189 /* just stop validating on mem-alloc failure */
4190 return;
4191 }
4192 }
4193
4194 lseek64(bfd, offset, 0);
4195 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4196 //printf("len %llu\n", len);
4197 fail("read first backup failed");
4198 }
4199 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4200 if ((unsigned long long)read(afd, abuf, len) != len)
4201 fail("read first from array failed");
4202 if (memcmp(bbuf, abuf, len) != 0) {
4203 #if 0
4204 int i;
4205 printf("offset=%llu len=%llu\n",
4206 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4207 for (i=0; i<len; i++)
4208 if (bbuf[i] != abuf[i]) {
4209 printf("first diff byte %d\n", i);
4210 break;
4211 }
4212 #endif
4213 fail("data1 compare failed");
4214 }
4215 }
4216 if (bsb2.length2) {
4217 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4218
4219 if (abuflen < len) {
4220 free(abuf);
4221 free(bbuf);
4222 abuflen = len;
4223 abuf = xmalloc(abuflen);
4224 bbuf = xmalloc(abuflen);
4225 }
4226
4227 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4228 if ((unsigned long long)read(bfd, bbuf, len) != len)
4229 fail("read second backup failed");
4230 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4231 if ((unsigned long long)read(afd, abuf, len) != len)
4232 fail("read second from array failed");
4233 if (memcmp(bbuf, abuf, len) != 0)
4234 fail("data2 compare failed");
4235 }
4236 }
4237
4238 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4239 struct supertype *st, unsigned long blocks,
4240 int *fds, unsigned long long *offsets,
4241 int dests, int *destfd, unsigned long long *destoffsets)
4242 {
4243 /* Monitor a reshape where backup is being performed using
4244 * 'native' mechanism - either to a backup file, or
4245 * to some space in a spare.
4246 */
4247 char *buf;
4248 int degraded = -1;
4249 unsigned long long speed;
4250 unsigned long long suspend_point, array_size;
4251 unsigned long long backup_point, wait_point;
4252 unsigned long long reshape_completed;
4253 int done = 0;
4254 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4255 int part = 0; /* The next part of the backup area to fill. It may already
4256 * be full, so we need to check */
4257 int level = reshape->level;
4258 int layout = reshape->before.layout;
4259 int data = reshape->before.data_disks;
4260 int disks = reshape->before.data_disks + reshape->parity;
4261 int chunk = sra->array.chunk_size;
4262 struct mdinfo *sd;
4263 unsigned long stripes;
4264 int uuid[4];
4265 int frozen = 0;
4266
4267 /* set up the backup-super-block. This requires the
4268 * uuid from the array.
4269 */
4270 /* Find a superblock */
4271 for (sd = sra->devs; sd; sd = sd->next) {
4272 char *dn;
4273 int devfd;
4274 int ok;
4275 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4276 continue;
4277 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4278 devfd = dev_open(dn, O_RDONLY);
4279 if (devfd < 0)
4280 continue;
4281 ok = st->ss->load_super(st, devfd, NULL);
4282 close(devfd);
4283 if (ok == 0)
4284 break;
4285 }
4286 if (!sd) {
4287 pr_err("Cannot find a superblock\n");
4288 return 0;
4289 }
4290
4291 memset(&bsb, 0, 512);
4292 memcpy(bsb.magic, "md_backup_data-1", 16);
4293 st->ss->uuid_from_super(st, uuid);
4294 memcpy(bsb.set_uuid, uuid, 16);
4295 bsb.mtime = __cpu_to_le64(time(0));
4296 bsb.devstart2 = blocks;
4297
4298 stripes = blocks / (sra->array.chunk_size/512) /
4299 reshape->before.data_disks;
4300
4301 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4302 /* Don't start the 'reshape' */
4303 return 0;
4304 if (reshape->before.data_disks == reshape->after.data_disks) {
4305 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4306 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4307 }
4308
4309 if (increasing) {
4310 array_size = sra->component_size * reshape->after.data_disks;
4311 backup_point = sra->reshape_progress;
4312 suspend_point = 0;
4313 } else {
4314 array_size = sra->component_size * reshape->before.data_disks;
4315 backup_point = reshape->backup_blocks;
4316 suspend_point = array_size;
4317 }
4318
4319 while (!done) {
4320 int rv;
4321
4322 /* Want to return as soon the oldest backup slot can
4323 * be released as that allows us to start backing up
4324 * some more, providing suspend_point has been
4325 * advanced, which it should have.
4326 */
4327 if (increasing) {
4328 wait_point = array_size;
4329 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4330 wait_point = (__le64_to_cpu(bsb.arraystart) +
4331 __le64_to_cpu(bsb.length));
4332 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4333 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4334 __le64_to_cpu(bsb.length2));
4335 } else {
4336 wait_point = 0;
4337 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4338 wait_point = __le64_to_cpu(bsb.arraystart);
4339 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4340 wait_point = __le64_to_cpu(bsb.arraystart2);
4341 }
4342
4343 reshape_completed = sra->reshape_progress;
4344 rv = progress_reshape(sra, reshape,
4345 backup_point, wait_point,
4346 &suspend_point, &reshape_completed,
4347 &frozen);
4348 /* external metadata would need to ping_monitor here */
4349 sra->reshape_progress = reshape_completed;
4350
4351 /* Clear any backup region that is before 'here' */
4352 if (increasing) {
4353 if (__le64_to_cpu(bsb.length) > 0 &&
4354 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4355 __le64_to_cpu(bsb.length)))
4356 forget_backup(dests, destfd,
4357 destoffsets, 0);
4358 if (__le64_to_cpu(bsb.length2) > 0 &&
4359 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4360 __le64_to_cpu(bsb.length2)))
4361 forget_backup(dests, destfd,
4362 destoffsets, 1);
4363 } else {
4364 if (__le64_to_cpu(bsb.length) > 0 &&
4365 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4366 forget_backup(dests, destfd,
4367 destoffsets, 0);
4368 if (__le64_to_cpu(bsb.length2) > 0 &&
4369 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4370 forget_backup(dests, destfd,
4371 destoffsets, 1);
4372 }
4373 if (sigterm)
4374 rv = -2;
4375 if (rv < 0) {
4376 if (rv == -1)
4377 done = 1;
4378 break;
4379 }
4380 if (rv == 0 && increasing && !st->ss->external) {
4381 /* No longer need to monitor this reshape */
4382 sysfs_set_str(sra, NULL, "sync_max", "max");
4383 done = 1;
4384 break;
4385 }
4386
4387 while (rv) {
4388 unsigned long long offset;
4389 unsigned long actual_stripes;
4390 /* Need to backup some data.
4391 * If 'part' is not used and the desired
4392 * backup size is suspended, do a backup,
4393 * then consider the next part.
4394 */
4395 /* Check that 'part' is unused */
4396 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4397 break;
4398 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4399 break;
4400
4401 offset = backup_point / data;
4402 actual_stripes = stripes;
4403 if (increasing) {
4404 if (offset + actual_stripes * (chunk/512) >
4405 sra->component_size)
4406 actual_stripes = ((sra->component_size - offset)
4407 / (chunk/512));
4408 if (offset + actual_stripes * (chunk/512) >
4409 suspend_point/data)
4410 break;
4411 } else {
4412 if (offset < actual_stripes * (chunk/512))
4413 actual_stripes = offset / (chunk/512);
4414 offset -= actual_stripes * (chunk/512);
4415 if (offset < suspend_point/data)
4416 break;
4417 }
4418 if (actual_stripes == 0)
4419 break;
4420 grow_backup(sra, offset, actual_stripes,
4421 fds, offsets,
4422 disks, chunk, level, layout,
4423 dests, destfd, destoffsets,
4424 part, &degraded, buf);
4425 validate(afd, destfd[0], destoffsets[0]);
4426 /* record where 'part' is up to */
4427 part = !part;
4428 if (increasing)
4429 backup_point += actual_stripes * (chunk/512) * data;
4430 else
4431 backup_point -= actual_stripes * (chunk/512) * data;
4432 }
4433 }
4434
4435 /* FIXME maybe call progress_reshape one more time instead */
4436 /* remove any remaining suspension */
4437 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4438 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4439 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4440 sysfs_set_num(sra, NULL, "sync_min", 0);
4441
4442 if (reshape->before.data_disks == reshape->after.data_disks)
4443 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4444 free(buf);
4445 return done;
4446 }
4447
4448 /*
4449 * If any spare contains md_back_data-1 which is recent wrt mtime,
4450 * write that data into the array and update the super blocks with
4451 * the new reshape_progress
4452 */
4453 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4454 char *backup_file, int verbose)
4455 {
4456 int i, j;
4457 int old_disks;
4458 unsigned long long *offsets;
4459 unsigned long long nstripe, ostripe;
4460 int ndata, odata;
4461
4462 odata = info->array.raid_disks - info->delta_disks - 1;
4463 if (info->array.level == 6) odata--; /* number of data disks */
4464 ndata = info->array.raid_disks - 1;
4465 if (info->new_level == 6) ndata--;
4466
4467 old_disks = info->array.raid_disks - info->delta_disks;
4468
4469 if (info->delta_disks <= 0)
4470 /* Didn't grow, so the backup file must have
4471 * been used
4472 */
4473 old_disks = cnt;
4474 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4475 struct mdinfo dinfo;
4476 int fd;
4477 int bsbsize;
4478 char *devname, namebuf[20];
4479 unsigned long long lo, hi;
4480
4481 /* This was a spare and may have some saved data on it.
4482 * Load the superblock, find and load the
4483 * backup_super_block.
4484 * If either fail, go on to next device.
4485 * If the backup contains no new info, just return
4486 * else restore data and update all superblocks
4487 */
4488 if (i == old_disks-1) {
4489 fd = open(backup_file, O_RDONLY);
4490 if (fd<0) {
4491 pr_err("backup file %s inaccessible: %s\n",
4492 backup_file, strerror(errno));
4493 continue;
4494 }
4495 devname = backup_file;
4496 } else {
4497 fd = fdlist[i];
4498 if (fd < 0)
4499 continue;
4500 if (st->ss->load_super(st, fd, NULL))
4501 continue;
4502
4503 st->ss->getinfo_super(st, &dinfo, NULL);
4504 st->ss->free_super(st);
4505
4506 if (lseek64(fd,
4507 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4508 0) < 0) {
4509 pr_err("Cannot seek on device %d\n", i);
4510 continue; /* Cannot seek */
4511 }
4512 sprintf(namebuf, "device-%d", i);
4513 devname = namebuf;
4514 }
4515 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4516 if (verbose)
4517 pr_err("Cannot read from %s\n", devname);
4518 continue; /* Cannot read */
4519 }
4520 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4521 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4522 if (verbose)
4523 pr_err("No backup metadata on %s\n", devname);
4524 continue;
4525 }
4526 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4527 if (verbose)
4528 pr_err("Bad backup-metadata checksum on %s\n", devname);
4529 continue; /* bad checksum */
4530 }
4531 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4532 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4533 if (verbose)
4534 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4535 continue; /* Bad second checksum */
4536 }
4537 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4538 if (verbose)
4539 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4540 continue; /* Wrong uuid */
4541 }
4542
4543 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4544 * sometimes they aren't... So allow considerable flexability in matching, and allow
4545 * this test to be overridden by an environment variable.
4546 */
4547 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4548 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4549 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4550 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4551 (unsigned long)__le64_to_cpu(bsb.mtime),
4552 (unsigned long)info->array.utime);
4553 } else {
4554 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4555 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4556 continue; /* time stamp is too bad */
4557 }
4558 }
4559
4560 if (bsb.magic[15] == '1') {
4561 if (bsb.length == 0)
4562 continue;
4563 if (info->delta_disks >= 0) {
4564 /* reshape_progress is increasing */
4565 if (__le64_to_cpu(bsb.arraystart)
4566 + __le64_to_cpu(bsb.length)
4567 < info->reshape_progress) {
4568 nonew:
4569 if (verbose)
4570 pr_err("backup-metadata found on %s but is not needed\n", devname);
4571 continue; /* No new data here */
4572 }
4573 } else {
4574 /* reshape_progress is decreasing */
4575 if (__le64_to_cpu(bsb.arraystart) >=
4576 info->reshape_progress)
4577 goto nonew; /* No new data here */
4578 }
4579 } else {
4580 if (bsb.length == 0 && bsb.length2 == 0)
4581 continue;
4582 if (info->delta_disks >= 0) {
4583 /* reshape_progress is increasing */
4584 if ((__le64_to_cpu(bsb.arraystart)
4585 + __le64_to_cpu(bsb.length)
4586 < info->reshape_progress)
4587 &&
4588 (__le64_to_cpu(bsb.arraystart2)
4589 + __le64_to_cpu(bsb.length2)
4590 < info->reshape_progress))
4591 goto nonew; /* No new data here */
4592 } else {
4593 /* reshape_progress is decreasing */
4594 if (__le64_to_cpu(bsb.arraystart) >=
4595 info->reshape_progress &&
4596 __le64_to_cpu(bsb.arraystart2) >=
4597 info->reshape_progress)
4598 goto nonew; /* No new data here */
4599 }
4600 }
4601 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4602 second_fail:
4603 if (verbose)
4604 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4605 devname);
4606 continue; /* Cannot seek */
4607 }
4608 /* There should be a duplicate backup superblock 4k before here */
4609 if (lseek64(fd, -4096, 1) < 0 ||
4610 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4611 goto second_fail; /* Cannot find leading superblock */
4612 if (bsb.magic[15] == '1')
4613 bsbsize = offsetof(struct mdp_backup_super, pad1);
4614 else
4615 bsbsize = offsetof(struct mdp_backup_super, pad);
4616 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4617 goto second_fail; /* Cannot find leading superblock */
4618
4619 /* Now need the data offsets for all devices. */
4620 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4621 for(j=0; j<info->array.raid_disks; j++) {
4622 if (fdlist[j] < 0)
4623 continue;
4624 if (st->ss->load_super(st, fdlist[j], NULL))
4625 /* FIXME should be this be an error */
4626 continue;
4627 st->ss->getinfo_super(st, &dinfo, NULL);
4628 st->ss->free_super(st);
4629 offsets[j] = dinfo.data_offset * 512;
4630 }
4631 printf("%s: restoring critical section\n", Name);
4632
4633 if (restore_stripes(fdlist, offsets,
4634 info->array.raid_disks,
4635 info->new_chunk,
4636 info->new_level,
4637 info->new_layout,
4638 fd, __le64_to_cpu(bsb.devstart)*512,
4639 __le64_to_cpu(bsb.arraystart)*512,
4640 __le64_to_cpu(bsb.length)*512, NULL)) {
4641 /* didn't succeed, so giveup */
4642 if (verbose)
4643 pr_err("Error restoring backup from %s\n",
4644 devname);
4645 free(offsets);
4646 return 1;
4647 }
4648
4649 if (bsb.magic[15] == '2' &&
4650 restore_stripes(fdlist, offsets,
4651 info->array.raid_disks,
4652 info->new_chunk,
4653 info->new_level,
4654 info->new_layout,
4655 fd, __le64_to_cpu(bsb.devstart)*512 +
4656 __le64_to_cpu(bsb.devstart2)*512,
4657 __le64_to_cpu(bsb.arraystart2)*512,
4658 __le64_to_cpu(bsb.length2)*512, NULL)) {
4659 /* didn't succeed, so giveup */
4660 if (verbose)
4661 pr_err("Error restoring second backup from %s\n",
4662 devname);
4663 free(offsets);
4664 return 1;
4665 }
4666
4667 free(offsets);
4668
4669 /* Ok, so the data is restored. Let's update those superblocks. */
4670
4671 lo = hi = 0;
4672 if (bsb.length) {
4673 lo = __le64_to_cpu(bsb.arraystart);
4674 hi = lo + __le64_to_cpu(bsb.length);
4675 }
4676 if (bsb.magic[15] == '2' && bsb.length2) {
4677 unsigned long long lo1, hi1;
4678 lo1 = __le64_to_cpu(bsb.arraystart2);
4679 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4680 if (lo == hi) {
4681 lo = lo1;
4682 hi = hi1;
4683 } else if (lo < lo1)
4684 hi = hi1;
4685 else
4686 lo = lo1;
4687 }
4688 if (lo < hi &&
4689 (info->reshape_progress < lo ||
4690 info->reshape_progress > hi))
4691 /* backup does not affect reshape_progress*/ ;
4692 else if (info->delta_disks >= 0) {
4693 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4694 __le64_to_cpu(bsb.length);
4695 if (bsb.magic[15] == '2') {
4696 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4697 __le64_to_cpu(bsb.length2);
4698 if (p2 > info->reshape_progress)
4699 info->reshape_progress = p2;
4700 }
4701 } else {
4702 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4703 if (bsb.magic[15] == '2') {
4704 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4705 if (p2 < info->reshape_progress)
4706 info->reshape_progress = p2;
4707 }
4708 }
4709 for (j=0; j<info->array.raid_disks; j++) {
4710 if (fdlist[j] < 0)
4711 continue;
4712 if (st->ss->load_super(st, fdlist[j], NULL))
4713 continue;
4714 st->ss->getinfo_super(st, &dinfo, NULL);
4715 dinfo.reshape_progress = info->reshape_progress;
4716 st->ss->update_super(st, &dinfo,
4717 "_reshape_progress",
4718 NULL,0, 0, NULL);
4719 st->ss->store_super(st, fdlist[j]);
4720 st->ss->free_super(st);
4721 }
4722 return 0;
4723 }
4724 /* Didn't find any backup data, try to see if any
4725 * was needed.
4726 */
4727 if (info->delta_disks < 0) {
4728 /* When shrinking, the critical section is at the end.
4729 * So see if we are before the critical section.
4730 */
4731 unsigned long long first_block;
4732 nstripe = ostripe = 0;
4733 first_block = 0;
4734 while (ostripe >= nstripe) {
4735 ostripe += info->array.chunk_size / 512;
4736 first_block = ostripe * odata;
4737 nstripe = first_block / ndata / (info->new_chunk/512) *
4738 (info->new_chunk/512);
4739 }
4740
4741 if (info->reshape_progress >= first_block)
4742 return 0;
4743 }
4744 if (info->delta_disks > 0) {
4745 /* See if we are beyond the critical section. */
4746 unsigned long long last_block;
4747 nstripe = ostripe = 0;
4748 last_block = 0;
4749 while (nstripe >= ostripe) {
4750 nstripe += info->new_chunk / 512;
4751 last_block = nstripe * ndata;
4752 ostripe = last_block / odata / (info->array.chunk_size/512) *
4753 (info->array.chunk_size/512);
4754 }
4755
4756 if (info->reshape_progress >= last_block)
4757 return 0;
4758 }
4759 /* needed to recover critical section! */
4760 if (verbose)
4761 pr_err("Failed to find backup of critical section\n");
4762 return 1;
4763 }
4764
4765 int Grow_continue_command(char *devname, int fd,
4766 char *backup_file, int verbose)
4767 {
4768 int ret_val = 0;
4769 struct supertype *st = NULL;
4770 struct mdinfo *content = NULL;
4771 struct mdinfo array;
4772 char *subarray = NULL;
4773 struct mdinfo *cc = NULL;
4774 struct mdstat_ent *mdstat = NULL;
4775 int cfd = -1;
4776 int fd2;
4777
4778 dprintf("Grow continue from command line called for %s\n",
4779 devname);
4780
4781 st = super_by_fd(fd, &subarray);
4782 if (!st || !st->ss) {
4783 pr_err("Unable to determine metadata format for %s\n",
4784 devname);
4785 return 1;
4786 }
4787 dprintf("Grow continue is run for ");
4788 if (st->ss->external == 0) {
4789 int d;
4790 dprintf_cont("native array (%s)\n", devname);
4791 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4792 pr_err("%s is not an active md array - aborting\n", devname);
4793 ret_val = 1;
4794 goto Grow_continue_command_exit;
4795 }
4796 content = &array;
4797 /* Need to load a superblock.
4798 * FIXME we should really get what we need from
4799 * sysfs
4800 */
4801 for (d = 0; d < MAX_DISKS; d++) {
4802 mdu_disk_info_t disk;
4803 char *dv;
4804 int err;
4805 disk.number = d;
4806 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4807 continue;
4808 if (disk.major == 0 && disk.minor == 0)
4809 continue;
4810 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4811 continue;
4812 dv = map_dev(disk.major, disk.minor, 1);
4813 if (!dv)
4814 continue;
4815 fd2 = dev_open(dv, O_RDONLY);
4816 if (fd2 < 0)
4817 continue;
4818 err = st->ss->load_super(st, fd2, NULL);
4819 close(fd2);
4820 if (err)
4821 continue;
4822 break;
4823 }
4824 if (d == MAX_DISKS) {
4825 pr_err("Unable to load metadata for %s\n",
4826 devname);
4827 ret_val = 1;
4828 goto Grow_continue_command_exit;
4829 }
4830 st->ss->getinfo_super(st, content, NULL);
4831 } else {
4832 char *container;
4833
4834 if (subarray) {
4835 dprintf_cont("subarray (%s)\n", subarray);
4836 container = st->container_devnm;
4837 cfd = open_dev_excl(st->container_devnm);
4838 } else {
4839 container = st->devnm;
4840 close(fd);
4841 cfd = open_dev_excl(st->devnm);
4842 dprintf_cont("container (%s)\n", container);
4843 fd = cfd;
4844 }
4845 if (cfd < 0) {
4846 pr_err("Unable to open container for %s\n", devname);
4847 ret_val = 1;
4848 goto Grow_continue_command_exit;
4849 }
4850
4851 /* find in container array under reshape
4852 */
4853 ret_val = st->ss->load_container(st, cfd, NULL);
4854 if (ret_val) {
4855 pr_err("Cannot read superblock for %s\n",
4856 devname);
4857 ret_val = 1;
4858 goto Grow_continue_command_exit;
4859 }
4860
4861 cc = st->ss->container_content(st, subarray);
4862 for (content = cc; content ; content = content->next) {
4863 char *array;
4864 int allow_reshape = 1;
4865
4866 if (content->reshape_active == 0)
4867 continue;
4868 /* The decision about array or container wide
4869 * reshape is taken in Grow_continue based
4870 * content->reshape_active state, therefore we
4871 * need to check_reshape based on
4872 * reshape_active and subarray name
4873 */
4874 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4875 allow_reshape = 0;
4876 if (content->reshape_active == CONTAINER_RESHAPE &&
4877 (content->array.state
4878 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4879 allow_reshape = 0;
4880
4881 if (!allow_reshape) {
4882 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
4883 devname, container);
4884 ret_val = 1;
4885 goto Grow_continue_command_exit;
4886 }
4887
4888 array = strchr(content->text_version+1, '/')+1;
4889 mdstat = mdstat_by_subdev(array, container);
4890 if (!mdstat)
4891 continue;
4892 if (mdstat->active == 0) {
4893 pr_err("Skipping inactive array %s.\n",
4894 mdstat->devnm);
4895 free_mdstat(mdstat);
4896 mdstat = NULL;
4897 continue;
4898 }
4899 break;
4900 }
4901 if (!content) {
4902 pr_err("Unable to determine reshaped array for %s\n", devname);
4903 ret_val = 1;
4904 goto Grow_continue_command_exit;
4905 }
4906 fd2 = open_dev(mdstat->devnm);
4907 if (fd2 < 0) {
4908 pr_err("cannot open (%s)\n", mdstat->devnm);
4909 ret_val = 1;
4910 goto Grow_continue_command_exit;
4911 }
4912
4913 sysfs_init(content, fd2, mdstat->devnm);
4914
4915 close(fd2);
4916
4917 /* start mdmon in case it is not running
4918 */
4919 if (!mdmon_running(container))
4920 start_mdmon(container);
4921 ping_monitor(container);
4922
4923 if (mdmon_running(container))
4924 st->update_tail = &st->updates;
4925 else {
4926 pr_err("No mdmon found. Grow cannot continue.\n");
4927 ret_val = 1;
4928 goto Grow_continue_command_exit;
4929 }
4930 }
4931
4932 /* verify that array under reshape is started from
4933 * correct position
4934 */
4935 if (verify_reshape_position(content, content->array.level) < 0) {
4936 ret_val = 1;
4937 goto Grow_continue_command_exit;
4938 }
4939
4940 /* continue reshape
4941 */
4942 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
4943
4944 Grow_continue_command_exit:
4945 if (cfd > -1)
4946 close(cfd);
4947 st->ss->free_super(st);
4948 free_mdstat(mdstat);
4949 sysfs_free(cc);
4950 free(subarray);
4951
4952 return ret_val;
4953 }
4954
4955 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4956 char *backup_file, int forked, int freeze_reshape)
4957 {
4958 int ret_val = 2;
4959
4960 if (!info->reshape_active)
4961 return ret_val;
4962
4963 if (st->ss->external) {
4964 int cfd = open_dev(st->container_devnm);
4965
4966 if (cfd < 0)
4967 return 1;
4968
4969 st->ss->load_container(st, cfd, st->container_devnm);
4970 close(cfd);
4971 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4972 st, info, 0, backup_file,
4973 0, forked,
4974 1 | info->reshape_active,
4975 freeze_reshape);
4976 } else
4977 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4978 NULL, INVALID_SECTORS,
4979 backup_file, 0, forked,
4980 1 | info->reshape_active,
4981 freeze_reshape);
4982
4983 return ret_val;
4984 }
4985
4986 char *make_backup(char *name)
4987 {
4988 char *base = "backup_file-";
4989 int len;
4990 char *fname;
4991
4992 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
4993 fname = xmalloc(len);
4994 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
4995 return fname;
4996 }
4997
4998 char *locate_backup(char *name)
4999 {
5000 char *fl = make_backup(name);
5001 struct stat stb;
5002
5003 if (stat(fl, &stb) == 0 &&
5004 S_ISREG(stb.st_mode))
5005 return fl;
5006
5007 free(fl);
5008 return NULL;
5009 }