]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
tests: add test that DDF marks missing devices as failed on assembly.
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stdint.h>
28 #include <signal.h>
29
30 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
31 #error no endian defined
32 #endif
33 #include "md_u.h"
34 #include "md_p.h"
35
36 #ifndef offsetof
37 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
38 #endif
39
40 int restore_backup(struct supertype *st,
41 struct mdinfo *content,
42 int working_disks,
43 int next_spare,
44 char *backup_file,
45 int verbose)
46 {
47 int i;
48 int *fdlist;
49 struct mdinfo *dev;
50 int err;
51 int disk_count = next_spare + working_disks;
52
53 dprintf("Called restore_backup()\n");
54 fdlist = xmalloc(sizeof(int) * disk_count);
55
56 enable_fds(next_spare);
57 for (i = 0; i < next_spare; i++)
58 fdlist[i] = -1;
59 for (dev = content->devs; dev; dev = dev->next) {
60 char buf[22];
61 int fd;
62 sprintf(buf, "%d:%d",
63 dev->disk.major,
64 dev->disk.minor);
65 fd = dev_open(buf, O_RDWR);
66
67 if (dev->disk.raid_disk >= 0)
68 fdlist[dev->disk.raid_disk] = fd;
69 else
70 fdlist[next_spare++] = fd;
71 }
72
73 if (st->ss->external && st->ss->recover_backup)
74 err = st->ss->recover_backup(st, content);
75 else
76 err = Grow_restart(st, content, fdlist, next_spare,
77 backup_file, verbose > 0);
78
79 while (next_spare > 0) {
80 next_spare--;
81 if (fdlist[next_spare] >= 0)
82 close(fdlist[next_spare]);
83 }
84 free(fdlist);
85 if (err) {
86 pr_err("Failed to restore critical"
87 " section for reshape - sorry.\n");
88 if (!backup_file)
89 pr_err("Possibly you need"
90 " to specify a --backup-file\n");
91 return 1;
92 }
93
94 dprintf("restore_backup() returns status OK.\n");
95 return 0;
96 }
97
98 int Grow_Add_device(char *devname, int fd, char *newdev)
99 {
100 /* Add a device to an active array.
101 * Currently, just extend a linear array.
102 * This requires writing a new superblock on the
103 * new device, calling the kernel to add the device,
104 * and if that succeeds, update the superblock on
105 * all other devices.
106 * This means that we need to *find* all other devices.
107 */
108 struct mdinfo info;
109
110 struct stat stb;
111 int nfd, fd2;
112 int d, nd;
113 struct supertype *st = NULL;
114 char *subarray = NULL;
115
116 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
117 pr_err("cannot get array info for %s\n", devname);
118 return 1;
119 }
120
121 if (info.array.level != -1) {
122 pr_err("can only add devices to linear arrays\n");
123 return 1;
124 }
125
126 st = super_by_fd(fd, &subarray);
127 if (!st) {
128 pr_err("cannot handle arrays with superblock version %d\n",
129 info.array.major_version);
130 return 1;
131 }
132
133 if (subarray) {
134 pr_err("Cannot grow linear sub-arrays yet\n");
135 free(subarray);
136 free(st);
137 return 1;
138 }
139
140 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
141 if (nfd < 0) {
142 pr_err("cannot open %s\n", newdev);
143 free(st);
144 return 1;
145 }
146 fstat(nfd, &stb);
147 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
148 pr_err("%s is not a block device!\n", newdev);
149 close(nfd);
150 free(st);
151 return 1;
152 }
153 /* now check out all the devices and make sure we can read the
154 * superblock */
155 for (d=0 ; d < info.array.raid_disks ; d++) {
156 mdu_disk_info_t disk;
157 char *dv;
158
159 st->ss->free_super(st);
160
161 disk.number = d;
162 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
163 pr_err("cannot get device detail for device %d\n",
164 d);
165 close(nfd);
166 free(st);
167 return 1;
168 }
169 dv = map_dev(disk.major, disk.minor, 1);
170 if (!dv) {
171 pr_err("cannot find device file for device %d\n",
172 d);
173 close(nfd);
174 free(st);
175 return 1;
176 }
177 fd2 = dev_open(dv, O_RDWR);
178 if (fd2 < 0) {
179 pr_err("cannot open device file %s\n", dv);
180 close(nfd);
181 free(st);
182 return 1;
183 }
184
185 if (st->ss->load_super(st, fd2, NULL)) {
186 pr_err("cannot find super block on %s\n", dv);
187 close(nfd);
188 close(fd2);
189 free(st);
190 return 1;
191 }
192 close(fd2);
193 }
194 /* Ok, looks good. Lets update the superblock and write it out to
195 * newdev.
196 */
197
198 info.disk.number = d;
199 info.disk.major = major(stb.st_rdev);
200 info.disk.minor = minor(stb.st_rdev);
201 info.disk.raid_disk = d;
202 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
203 st->ss->update_super(st, &info, "linear-grow-new", newdev,
204 0, 0, NULL);
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n",
208 newdev);
209 close(nfd);
210 return 1;
211 }
212 close(nfd);
213
214 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
215 pr_err("Cannot add new disk to this array\n");
216 return 1;
217 }
218 /* Well, that seems to have worked.
219 * Now go through and update all superblocks
220 */
221
222 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
223 pr_err("cannot get array info for %s\n", devname);
224 return 1;
225 }
226
227 nd = d;
228 for (d=0 ; d < info.array.raid_disks ; d++) {
229 mdu_disk_info_t disk;
230 char *dv;
231
232 disk.number = d;
233 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
234 pr_err("cannot get device detail for device %d\n",
235 d);
236 return 1;
237 }
238 dv = map_dev(disk.major, disk.minor, 1);
239 if (!dv) {
240 pr_err("cannot find device file for device %d\n",
241 d);
242 return 1;
243 }
244 fd2 = dev_open(dv, O_RDWR);
245 if (fd2 < 0) {
246 pr_err("cannot open device file %s\n", dv);
247 return 1;
248 }
249 if (st->ss->load_super(st, fd2, NULL)) {
250 pr_err("cannot find super block on %s\n", dv);
251 close(fd);
252 return 1;
253 }
254 info.array.raid_disks = nd+1;
255 info.array.nr_disks = nd+1;
256 info.array.active_disks = nd+1;
257 info.array.working_disks = nd+1;
258
259 st->ss->update_super(st, &info, "linear-grow-update", dv,
260 0, 0, NULL);
261
262 if (st->ss->store_super(st, fd2)) {
263 pr_err("Cannot store new superblock on %s\n", dv);
264 close(fd2);
265 return 1;
266 }
267 close(fd2);
268 }
269
270 return 0;
271 }
272
273 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
274 {
275 /*
276 * First check that array doesn't have a bitmap
277 * Then create the bitmap
278 * Then add it
279 *
280 * For internal bitmaps, we need to check the version,
281 * find all the active devices, and write the bitmap block
282 * to all devices
283 */
284 mdu_bitmap_file_t bmf;
285 mdu_array_info_t array;
286 struct supertype *st;
287 char *subarray = NULL;
288 int major = BITMAP_MAJOR_HI;
289 int vers = md_get_version(fd);
290 unsigned long long bitmapsize, array_size;
291
292 if (vers < 9003) {
293 major = BITMAP_MAJOR_HOSTENDIAN;
294 pr_err("Warning - bitmaps created on this kernel"
295 " are not portable\n"
296 " between different architectures. Consider upgrading"
297 " the Linux kernel.\n");
298 }
299
300 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
301 if (errno == ENOMEM)
302 pr_err("Memory allocation failure.\n");
303 else
304 pr_err("bitmaps not supported by this kernel.\n");
305 return 1;
306 }
307 if (bmf.pathname[0]) {
308 if (strcmp(s->bitmap_file,"none")==0) {
309 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
310 pr_err("failed to remove bitmap %s\n",
311 bmf.pathname);
312 return 1;
313 }
314 return 0;
315 }
316 pr_err("%s already has a bitmap (%s)\n",
317 devname, bmf.pathname);
318 return 1;
319 }
320 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
321 pr_err("cannot get array status for %s\n", devname);
322 return 1;
323 }
324 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
325 if (strcmp(s->bitmap_file, "none")==0) {
326 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
327 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
328 pr_err("failed to remove internal bitmap.\n");
329 return 1;
330 }
331 return 0;
332 }
333 pr_err("Internal bitmap already present on %s\n",
334 devname);
335 return 1;
336 }
337
338 if (strcmp(s->bitmap_file, "none") == 0) {
339 pr_err("no bitmap found on %s\n", devname);
340 return 1;
341 }
342 if (array.level <= 0) {
343 pr_err("Bitmaps not meaningful with level %s\n",
344 map_num(pers, array.level)?:"of this array");
345 return 1;
346 }
347 bitmapsize = array.size;
348 bitmapsize <<= 1;
349 if (get_dev_size(fd, NULL, &array_size) &&
350 array_size > (0x7fffffffULL<<9)) {
351 /* Array is big enough that we cannot trust array.size
352 * try other approaches
353 */
354 bitmapsize = get_component_size(fd);
355 }
356 if (bitmapsize == 0) {
357 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
358 return 1;
359 }
360
361 if (array.level == 10) {
362 int ncopies = (array.layout&255)*((array.layout>>8)&255);
363 bitmapsize = bitmapsize * array.raid_disks / ncopies;
364 }
365
366 st = super_by_fd(fd, &subarray);
367 if (!st) {
368 pr_err("Cannot understand version %d.%d\n",
369 array.major_version, array.minor_version);
370 return 1;
371 }
372 if (subarray) {
373 pr_err("Cannot add bitmaps to sub-arrays yet\n");
374 free(subarray);
375 free(st);
376 return 1;
377 }
378 if (strcmp(s->bitmap_file, "internal") == 0) {
379 int rv;
380 int d;
381 int offset_setable = 0;
382 struct mdinfo *mdi;
383 if (st->ss->add_internal_bitmap == NULL) {
384 pr_err("Internal bitmaps not supported "
385 "with %s metadata\n", st->ss->name);
386 return 1;
387 }
388 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
389 if (mdi)
390 offset_setable = 1;
391 for (d=0; d< st->max_devs; d++) {
392 mdu_disk_info_t disk;
393 char *dv;
394 disk.number = d;
395 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
396 continue;
397 if (disk.major == 0 &&
398 disk.minor == 0)
399 continue;
400 if ((disk.state & (1<<MD_DISK_SYNC))==0)
401 continue;
402 dv = map_dev(disk.major, disk.minor, 1);
403 if (dv) {
404 int fd2 = dev_open(dv, O_RDWR);
405 if (fd2 < 0)
406 continue;
407 if (st->ss->load_super(st, fd2, NULL)==0) {
408 if (st->ss->add_internal_bitmap(
409 st,
410 &s->bitmap_chunk, c->delay, s->write_behind,
411 bitmapsize, offset_setable,
412 major)
413 )
414 st->ss->write_bitmap(st, fd2);
415 else {
416 pr_err("failed to create internal bitmap"
417 " - chunksize problem.\n");
418 close(fd2);
419 return 1;
420 }
421 }
422 close(fd2);
423 }
424 }
425 if (offset_setable) {
426 st->ss->getinfo_super(st, mdi, NULL);
427 sysfs_init(mdi, fd, NULL);
428 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
429 mdi->bitmap_offset);
430 } else {
431 array.state |= (1<<MD_SB_BITMAP_PRESENT);
432 rv = ioctl(fd, SET_ARRAY_INFO, &array);
433 }
434 if (rv < 0) {
435 if (errno == EBUSY)
436 pr_err("Cannot add bitmap while array is"
437 " resyncing or reshaping etc.\n");
438 pr_err("failed to set internal bitmap.\n");
439 return 1;
440 }
441 } else {
442 int uuid[4];
443 int bitmap_fd;
444 int d;
445 int max_devs = st->max_devs;
446
447 /* try to load a superblock */
448 for (d = 0; d < max_devs; d++) {
449 mdu_disk_info_t disk;
450 char *dv;
451 int fd2;
452 disk.number = d;
453 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
454 continue;
455 if ((disk.major==0 && disk.minor==0) ||
456 (disk.state & (1<<MD_DISK_REMOVED)))
457 continue;
458 dv = map_dev(disk.major, disk.minor, 1);
459 if (!dv)
460 continue;
461 fd2 = dev_open(dv, O_RDONLY);
462 if (fd2 >= 0) {
463 if (st->ss->load_super(st, fd2, NULL) == 0) {
464 close(fd2);
465 st->ss->uuid_from_super(st, uuid);
466 break;
467 }
468 close(fd2);
469 }
470 }
471 if (d == max_devs) {
472 pr_err("cannot find UUID for array!\n");
473 return 1;
474 }
475 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
476 c->delay, s->write_behind, bitmapsize, major)) {
477 return 1;
478 }
479 bitmap_fd = open(s->bitmap_file, O_RDWR);
480 if (bitmap_fd < 0) {
481 pr_err("weird: %s cannot be opened\n",
482 s->bitmap_file);
483 return 1;
484 }
485 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
486 int err = errno;
487 if (errno == EBUSY)
488 pr_err("Cannot add bitmap while array is"
489 " resyncing or reshaping etc.\n");
490 pr_err("Cannot set bitmap file for %s: %s\n",
491 devname, strerror(err));
492 return 1;
493 }
494 }
495
496 return 0;
497 }
498
499 /*
500 * When reshaping an array we might need to backup some data.
501 * This is written to all spares with a 'super_block' describing it.
502 * The superblock goes 4K from the end of the used space on the
503 * device.
504 * It if written after the backup is complete.
505 * It has the following structure.
506 */
507
508 static struct mdp_backup_super {
509 char magic[16]; /* md_backup_data-1 or -2 */
510 __u8 set_uuid[16];
511 __u64 mtime;
512 /* start/sizes in 512byte sectors */
513 __u64 devstart; /* address on backup device/file of data */
514 __u64 arraystart;
515 __u64 length;
516 __u32 sb_csum; /* csum of preceeding bytes. */
517 __u32 pad1;
518 __u64 devstart2; /* offset in to data of second section */
519 __u64 arraystart2;
520 __u64 length2;
521 __u32 sb_csum2; /* csum of preceeding bytes. */
522 __u8 pad[512-68-32];
523 } __attribute__((aligned(512))) bsb, bsb2;
524
525 static __u32 bsb_csum(char *buf, int len)
526 {
527 int i;
528 int csum = 0;
529 for (i = 0; i < len; i++)
530 csum = (csum<<3) + buf[0];
531 return __cpu_to_le32(csum);
532 }
533
534 static int check_idle(struct supertype *st)
535 {
536 /* Check that all member arrays for this container, or the
537 * container of this array, are idle
538 */
539 char *container = (st->container_devnm[0]
540 ? st->container_devnm : st->devnm);
541 struct mdstat_ent *ent, *e;
542 int is_idle = 1;
543
544 ent = mdstat_read(0, 0);
545 for (e = ent ; e; e = e->next) {
546 if (!is_container_member(e, container))
547 continue;
548 if (e->percent >= 0) {
549 is_idle = 0;
550 break;
551 }
552 }
553 free_mdstat(ent);
554 return is_idle;
555 }
556
557 static int freeze_container(struct supertype *st)
558 {
559 char *container = (st->container_devnm[0]
560 ? st->container_devnm : st->devnm);
561
562 if (!check_idle(st))
563 return -1;
564
565 if (block_monitor(container, 1)) {
566 pr_err("failed to freeze container\n");
567 return -2;
568 }
569
570 return 1;
571 }
572
573 static void unfreeze_container(struct supertype *st)
574 {
575 char *container = (st->container_devnm[0]
576 ? st->container_devnm : st->devnm);
577
578 unblock_monitor(container, 1);
579 }
580
581 static int freeze(struct supertype *st)
582 {
583 /* Try to freeze resync/rebuild on this array/container.
584 * Return -1 if the array is busy,
585 * return -2 container cannot be frozen,
586 * return 0 if this kernel doesn't support 'frozen'
587 * return 1 if it worked.
588 */
589 if (st->ss->external)
590 return freeze_container(st);
591 else {
592 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
593 int err;
594 char buf[20];
595
596 if (!sra)
597 return -1;
598 /* Need to clear any 'read-auto' status */
599 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
600 strncmp(buf, "read-auto", 9) == 0)
601 sysfs_set_str(sra, NULL, "array_state", "clean");
602
603 err = sysfs_freeze_array(sra);
604 sysfs_free(sra);
605 return err;
606 }
607 }
608
609 static void unfreeze(struct supertype *st)
610 {
611 if (st->ss->external)
612 return unfreeze_container(st);
613 else {
614 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
615 char buf[20];
616
617 if (sra &&
618 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
619 && strcmp(buf, "frozen\n") == 0) {
620 printf("unfreeze\n");
621 sysfs_set_str(sra, NULL, "sync_action", "idle");
622 }
623 sysfs_free(sra);
624 }
625 }
626
627 static void wait_reshape(struct mdinfo *sra)
628 {
629 int fd = sysfs_get_fd(sra, NULL, "sync_action");
630 char action[20];
631
632 if (fd < 0)
633 return;
634
635 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
636 strncmp(action, "reshape", 7) == 0)
637 sysfs_wait(fd, NULL);
638 close(fd);
639 }
640
641 static int reshape_super(struct supertype *st, unsigned long long size,
642 int level, int layout, int chunksize, int raid_disks,
643 int delta_disks, char *backup_file, char *dev,
644 int direction, int verbose)
645 {
646 /* nothing extra to check in the native case */
647 if (!st->ss->external)
648 return 0;
649 if (!st->ss->reshape_super ||
650 !st->ss->manage_reshape) {
651 pr_err("%s metadata does not support reshape\n",
652 st->ss->name);
653 return 1;
654 }
655
656 return st->ss->reshape_super(st, size, level, layout, chunksize,
657 raid_disks, delta_disks, backup_file, dev,
658 direction, verbose);
659 }
660
661 static void sync_metadata(struct supertype *st)
662 {
663 if (st->ss->external) {
664 if (st->update_tail) {
665 flush_metadata_updates(st);
666 st->update_tail = &st->updates;
667 } else
668 st->ss->sync_metadata(st);
669 }
670 }
671
672 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
673 {
674 /* when dealing with external metadata subarrays we need to be
675 * prepared to handle EAGAIN. The kernel may need to wait for
676 * mdmon to mark the array active so the kernel can handle
677 * allocations/writeback when preparing the reshape action
678 * (md_allow_write()). We temporarily disable safe_mode_delay
679 * to close a race with the array_state going clean before the
680 * next write to raid_disks / stripe_cache_size
681 */
682 char safe[50];
683 int rc;
684
685 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
686 if (!container ||
687 (strcmp(name, "raid_disks") != 0 &&
688 strcmp(name, "stripe_cache_size") != 0))
689 return sysfs_set_num(sra, NULL, name, n);
690
691 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
692 if (rc <= 0)
693 return -1;
694 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
695 rc = sysfs_set_num(sra, NULL, name, n);
696 if (rc < 0 && errno == EAGAIN) {
697 ping_monitor(container);
698 /* if we get EAGAIN here then the monitor is not active
699 * so stop trying
700 */
701 rc = sysfs_set_num(sra, NULL, name, n);
702 }
703 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
704 return rc;
705 }
706
707 int start_reshape(struct mdinfo *sra, int already_running,
708 int before_data_disks, int data_disks)
709 {
710 int err;
711 unsigned long long sync_max_to_set;
712
713 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
714 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
715 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
716 sra->reshape_progress);
717 if (before_data_disks <= data_disks)
718 sync_max_to_set = sra->reshape_progress / data_disks;
719 else
720 sync_max_to_set = (sra->component_size * data_disks
721 - sra->reshape_progress) / data_disks;
722 if (!already_running)
723 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
724 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
725 if (!already_running)
726 err = err ?: sysfs_set_str(sra, NULL, "sync_action", "reshape");
727
728 return err;
729 }
730
731 void abort_reshape(struct mdinfo *sra)
732 {
733 sysfs_set_str(sra, NULL, "sync_action", "idle");
734 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
735 sysfs_set_num(sra, NULL, "suspend_hi", 0);
736 sysfs_set_num(sra, NULL, "suspend_lo", 0);
737 sysfs_set_num(sra, NULL, "sync_min", 0);
738 // It isn't safe to reset sync_max as we aren't monitoring.
739 // Array really should be stopped at this point.
740 }
741
742 int remove_disks_for_takeover(struct supertype *st,
743 struct mdinfo *sra,
744 int layout)
745 {
746 int nr_of_copies;
747 struct mdinfo *remaining;
748 int slot;
749
750 if (sra->array.level == 10)
751 nr_of_copies = layout & 0xff;
752 else if (sra->array.level == 1)
753 nr_of_copies = sra->array.raid_disks;
754 else
755 return 1;
756
757 remaining = sra->devs;
758 sra->devs = NULL;
759 /* for each 'copy', select one device and remove from the list. */
760 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
761 struct mdinfo **diskp;
762 int found = 0;
763
764 /* Find a working device to keep */
765 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
766 struct mdinfo *disk = *diskp;
767
768 if (disk->disk.raid_disk < slot)
769 continue;
770 if (disk->disk.raid_disk >= slot + nr_of_copies)
771 continue;
772 if (disk->disk.state & (1<<MD_DISK_REMOVED))
773 continue;
774 if (disk->disk.state & (1<<MD_DISK_FAULTY))
775 continue;
776 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
777 continue;
778
779 /* We have found a good disk to use! */
780 *diskp = disk->next;
781 disk->next = sra->devs;
782 sra->devs = disk;
783 found = 1;
784 break;
785 }
786 if (!found)
787 break;
788 }
789
790 if (slot < sra->array.raid_disks) {
791 /* didn't find all slots */
792 struct mdinfo **e;
793 e = &remaining;
794 while (*e)
795 e = &(*e)->next;
796 *e = sra->devs;
797 sra->devs = remaining;
798 return 1;
799 }
800
801 /* Remove all 'remaining' devices from the array */
802 while (remaining) {
803 struct mdinfo *sd = remaining;
804 remaining = sd->next;
805
806 sysfs_set_str(sra, sd, "state", "faulty");
807 sysfs_set_str(sra, sd, "slot", "none");
808 /* for external metadata disks should be removed in mdmon */
809 if (!st->ss->external)
810 sysfs_set_str(sra, sd, "state", "remove");
811 sd->disk.state |= (1<<MD_DISK_REMOVED);
812 sd->disk.state &= ~(1<<MD_DISK_SYNC);
813 sd->next = sra->devs;
814 sra->devs = sd;
815 }
816 return 0;
817 }
818
819 void reshape_free_fdlist(int *fdlist,
820 unsigned long long *offsets,
821 int size)
822 {
823 int i;
824
825 for (i = 0; i < size; i++)
826 if (fdlist[i] >= 0)
827 close(fdlist[i]);
828
829 free(fdlist);
830 free(offsets);
831 }
832
833 int reshape_prepare_fdlist(char *devname,
834 struct mdinfo *sra,
835 int raid_disks,
836 int nrdisks,
837 unsigned long blocks,
838 char *backup_file,
839 int *fdlist,
840 unsigned long long *offsets)
841 {
842 int d = 0;
843 struct mdinfo *sd;
844
845 enable_fds(nrdisks);
846 for (d = 0; d <= nrdisks; d++)
847 fdlist[d] = -1;
848 d = raid_disks;
849 for (sd = sra->devs; sd; sd = sd->next) {
850 if (sd->disk.state & (1<<MD_DISK_FAULTY))
851 continue;
852 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
853 char *dn = map_dev(sd->disk.major,
854 sd->disk.minor, 1);
855 fdlist[sd->disk.raid_disk]
856 = dev_open(dn, O_RDONLY);
857 offsets[sd->disk.raid_disk] = sd->data_offset*512;
858 if (fdlist[sd->disk.raid_disk] < 0) {
859 pr_err("%s: cannot open component %s\n",
860 devname, dn ? dn : "-unknown-");
861 d = -1;
862 goto release;
863 }
864 } else if (backup_file == NULL) {
865 /* spare */
866 char *dn = map_dev(sd->disk.major,
867 sd->disk.minor, 1);
868 fdlist[d] = dev_open(dn, O_RDWR);
869 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
870 if (fdlist[d] < 0) {
871 pr_err("%s: cannot open component %s\n",
872 devname, dn ? dn : "-unknown-");
873 d = -1;
874 goto release;
875 }
876 d++;
877 }
878 }
879 release:
880 return d;
881 }
882
883 int reshape_open_backup_file(char *backup_file,
884 int fd,
885 char *devname,
886 long blocks,
887 int *fdlist,
888 unsigned long long *offsets,
889 int restart)
890 {
891 /* Return 1 on success, 0 on any form of failure */
892 /* need to check backup file is large enough */
893 char buf[512];
894 struct stat stb;
895 unsigned int dev;
896 int i;
897
898 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
899 S_IRUSR | S_IWUSR);
900 *offsets = 8 * 512;
901 if (*fdlist < 0) {
902 pr_err("%s: cannot create backup file %s: %s\n",
903 devname, backup_file, strerror(errno));
904 return 0;
905 }
906 /* Guard against backup file being on array device.
907 * If array is partitioned or if LVM etc is in the
908 * way this will not notice, but it is better than
909 * nothing.
910 */
911 fstat(*fdlist, &stb);
912 dev = stb.st_dev;
913 fstat(fd, &stb);
914 if (stb.st_rdev == dev) {
915 pr_err("backup file must NOT be"
916 " on the array being reshaped.\n");
917 close(*fdlist);
918 return 0;
919 }
920
921 memset(buf, 0, 512);
922 for (i=0; i < blocks + 8 ; i++) {
923 if (write(*fdlist, buf, 512) != 512) {
924 pr_err("%s: cannot create"
925 " backup file %s: %s\n",
926 devname, backup_file, strerror(errno));
927 return 0;
928 }
929 }
930 if (fsync(*fdlist) != 0) {
931 pr_err("%s: cannot create backup file %s: %s\n",
932 devname, backup_file, strerror(errno));
933 return 0;
934 }
935
936 return 1;
937 }
938
939 unsigned long compute_backup_blocks(int nchunk, int ochunk,
940 unsigned int ndata, unsigned int odata)
941 {
942 unsigned long a, b, blocks;
943 /* So how much do we need to backup.
944 * We need an amount of data which is both a whole number of
945 * old stripes and a whole number of new stripes.
946 * So LCM for (chunksize*datadisks).
947 */
948 a = (ochunk/512) * odata;
949 b = (nchunk/512) * ndata;
950 /* Find GCD */
951 a = GCD(a, b);
952 /* LCM == product / GCD */
953 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
954
955 return blocks;
956 }
957
958 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
959 {
960 /* Based on the current array state in info->array and
961 * the changes in info->new_* etc, determine:
962 * - whether the change is possible
963 * - Intermediate level/raid_disks/layout
964 * - whether a restriping reshape is needed
965 * - number of sectors in minimum change unit. This
966 * will cover a whole number of stripes in 'before' and
967 * 'after'.
968 *
969 * Return message if the change should be rejected
970 * NULL if the change can be achieved
971 *
972 * This can be called as part of starting a reshape, or
973 * when assembling an array that is undergoing reshape.
974 */
975 int near, far, offset, copies;
976 int new_disks;
977 int old_chunk, new_chunk;
978 /* delta_parity records change in number of devices
979 * caused by level change
980 */
981 int delta_parity = 0;
982
983 memset(re, 0, sizeof(*re));
984
985 /* If a new level not explicitly given, we assume no-change */
986 if (info->new_level == UnSet)
987 info->new_level = info->array.level;
988
989 if (info->new_chunk)
990 switch (info->new_level) {
991 case 0:
992 case 4:
993 case 5:
994 case 6:
995 case 10:
996 /* chunk size is meaningful, must divide component_size
997 * evenly
998 */
999 if (info->component_size % (info->new_chunk/512)) {
1000 unsigned long long shrink = info->component_size;
1001 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1002 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1003 info->new_chunk/1024, info->component_size/2);
1004 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1005 devname, shrink/2);
1006 pr_err("will shrink the array so the given chunk size would work.\n");
1007 return "";
1008 }
1009 break;
1010 default:
1011 return "chunk size not meaningful for this level";
1012 }
1013 else
1014 info->new_chunk = info->array.chunk_size;
1015
1016 switch (info->array.level) {
1017 default:
1018 return "Cannot understand this RAID level";
1019 case 1:
1020 /* RAID1 can convert to RAID1 with different disks, or
1021 * raid5 with 2 disks, or
1022 * raid0 with 1 disk
1023 */
1024 if (info->new_level > 1 &&
1025 (info->component_size & 7))
1026 return "Cannot convert RAID1 of this size - "
1027 "reduce size to multiple of 4K first.";
1028 if (info->new_level == 0) {
1029 if (info->delta_disks != UnSet &&
1030 info->delta_disks != 0)
1031 return "Cannot change number of disks "
1032 "with RAID1->RAID0 conversion";
1033 re->level = 0;
1034 re->before.data_disks = 1;
1035 re->after.data_disks = 1;
1036 return NULL;
1037 }
1038 if (info->new_level == 1) {
1039 if (info->delta_disks == UnSet)
1040 /* Don't know what to do */
1041 return "no change requested for Growing RAID1";
1042 re->level = 1;
1043 return NULL;
1044 }
1045 if (info->array.raid_disks == 2 &&
1046 info->new_level == 5) {
1047
1048 re->level = 5;
1049 re->before.data_disks = 1;
1050 if (info->delta_disks != UnSet &&
1051 info->delta_disks != 0)
1052 re->after.data_disks = 1 + info->delta_disks;
1053 else
1054 re->after.data_disks = 1;
1055 if (re->after.data_disks < 1)
1056 return "Number of disks too small for RAID5";
1057
1058 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1059 info->array.chunk_size = 65536;
1060 break;
1061 }
1062 /* Could do some multi-stage conversions, but leave that to
1063 * later.
1064 */
1065 return "Impossibly level change request for RAID1";
1066
1067 case 10:
1068 /* RAID10 can be converted from near mode to
1069 * RAID0 by removing some devices.
1070 * It can also be reshaped if the kernel supports
1071 * new_data_offset.
1072 */
1073 switch (info->new_level) {
1074 case 0:
1075 if ((info->array.layout & ~0xff) != 0x100)
1076 return "Cannot Grow RAID10 with far/offset layout";
1077 /* number of devices must be multiple of number of copies */
1078 if (info->array.raid_disks % (info->array.layout & 0xff))
1079 return "RAID10 layout too complex for Grow operation";
1080
1081 new_disks = (info->array.raid_disks
1082 / (info->array.layout & 0xff));
1083 if (info->delta_disks == UnSet)
1084 info->delta_disks = (new_disks
1085 - info->array.raid_disks);
1086
1087 if (info->delta_disks != new_disks - info->array.raid_disks)
1088 return "New number of raid-devices impossible for RAID10";
1089 if (info->new_chunk &&
1090 info->new_chunk != info->array.chunk_size)
1091 return "Cannot change chunk-size with RAID10 Grow";
1092
1093 /* looks good */
1094 re->level = 0;
1095 re->before.data_disks = new_disks;
1096 re->after.data_disks = re->before.data_disks;
1097 return NULL;
1098
1099 case 10:
1100 near = info->array.layout & 0xff;
1101 far = (info->array.layout >> 8) & 0xff;
1102 offset = info->array.layout & 0x10000;
1103 if (far > 1 && !offset)
1104 return "Cannot reshape RAID10 in far-mode";
1105 copies = near * far;
1106
1107 old_chunk = info->array.chunk_size * far;
1108
1109 if (info->new_layout == UnSet)
1110 info->new_layout = info->array.layout;
1111 else {
1112 near = info->new_layout & 0xff;
1113 far = (info->new_layout >> 8) & 0xff;
1114 offset = info->new_layout & 0x10000;
1115 if (far > 1 && !offset)
1116 return "Cannot reshape RAID10 to far-mode";
1117 if (near * far != copies)
1118 return "Cannot change number of copies"
1119 " when reshaping RAID10";
1120 }
1121 if (info->delta_disks == UnSet)
1122 info->delta_disks = 0;
1123 new_disks = (info->array.raid_disks +
1124 info->delta_disks);
1125
1126 new_chunk = info->new_chunk * far;
1127
1128 re->level = 10;
1129 re->before.layout = info->array.layout;
1130 re->before.data_disks = info->array.raid_disks;
1131 re->after.layout = info->new_layout;
1132 re->after.data_disks = new_disks;
1133 /* For RAID10 we don't do backup but do allow reshape,
1134 * so set backup_blocks to INVALID_SECTORS rather than
1135 * zero.
1136 * And there is no need to synchronise stripes on both
1137 * 'old' and 'new'. So the important
1138 * number is the minimum data_offset difference
1139 * which is the larger of (offset copies * chunk).
1140 */
1141 re->backup_blocks = INVALID_SECTORS;
1142 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1143 if (new_disks < re->before.data_disks &&
1144 info->space_after < re->min_offset_change)
1145 /* Reduce component size by one chunk */
1146 re->new_size = (info->component_size -
1147 re->min_offset_change);
1148 else
1149 re->new_size = info->component_size;
1150 re->new_size = re->new_size * new_disks / copies;
1151 return NULL;
1152
1153 default:
1154 return "RAID10 can only be changed to RAID0";
1155 }
1156 case 0:
1157 /* RAID0 can be converted to RAID10, or to RAID456 */
1158 if (info->new_level == 10) {
1159 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1160 /* Assume near=2 layout */
1161 info->new_layout = 0x102;
1162 info->delta_disks = info->array.raid_disks;
1163 }
1164 if (info->new_layout == UnSet) {
1165 int copies = 1 + (info->delta_disks
1166 / info->array.raid_disks);
1167 if (info->array.raid_disks * (copies-1)
1168 != info->delta_disks)
1169 return "Impossible number of devices"
1170 " for RAID0->RAID10";
1171 info->new_layout = 0x100 + copies;
1172 }
1173 if (info->delta_disks == UnSet) {
1174 int copies = info->new_layout & 0xff;
1175 if (info->new_layout != 0x100 + copies)
1176 return "New layout impossible"
1177 " for RAID0->RAID10";;
1178 info->delta_disks = (copies - 1) *
1179 info->array.raid_disks;
1180 }
1181 if (info->new_chunk &&
1182 info->new_chunk != info->array.chunk_size)
1183 return "Cannot change chunk-size with RAID0->RAID10";
1184 /* looks good */
1185 re->level = 10;
1186 re->before.data_disks = (info->array.raid_disks +
1187 info->delta_disks);
1188 re->after.data_disks = re->before.data_disks;
1189 re->before.layout = info->new_layout;
1190 return NULL;
1191 }
1192
1193 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1194 * a raid4 style layout of the final level.
1195 */
1196 switch (info->new_level) {
1197 case 4:
1198 delta_parity = 1;
1199 case 0:
1200 re->level = 4;
1201 re->before.layout = 0;
1202 break;
1203 case 5:
1204 delta_parity = 1;
1205 re->level = 5;
1206 re->before.layout = ALGORITHM_PARITY_N;
1207 if (info->new_layout == UnSet)
1208 info->new_layout = map_name(r5layout, "default");
1209 break;
1210 case 6:
1211 delta_parity = 2;
1212 re->level = 6;
1213 re->before.layout = ALGORITHM_PARITY_N;
1214 if (info->new_layout == UnSet)
1215 info->new_layout = map_name(r6layout, "default");
1216 break;
1217 default:
1218 return "Impossible level change requested";
1219 }
1220 re->before.data_disks = info->array.raid_disks;
1221 /* determining 'after' layout happens outside this 'switch' */
1222 break;
1223
1224 case 4:
1225 info->array.layout = ALGORITHM_PARITY_N;
1226 case 5:
1227 switch (info->new_level) {
1228 case 0:
1229 delta_parity = -1;
1230 case 4:
1231 re->level = info->array.level;
1232 re->before.data_disks = info->array.raid_disks - 1;
1233 re->before.layout = info->array.layout;
1234 break;
1235 case 5:
1236 re->level = 5;
1237 re->before.data_disks = info->array.raid_disks - 1;
1238 re->before.layout = info->array.layout;
1239 break;
1240 case 6:
1241 delta_parity = 1;
1242 re->level = 6;
1243 re->before.data_disks = info->array.raid_disks - 1;
1244 switch (info->array.layout) {
1245 case ALGORITHM_LEFT_ASYMMETRIC:
1246 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1247 break;
1248 case ALGORITHM_RIGHT_ASYMMETRIC:
1249 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1250 break;
1251 case ALGORITHM_LEFT_SYMMETRIC:
1252 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1253 break;
1254 case ALGORITHM_RIGHT_SYMMETRIC:
1255 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1256 break;
1257 case ALGORITHM_PARITY_0:
1258 re->before.layout = ALGORITHM_PARITY_0_6;
1259 break;
1260 case ALGORITHM_PARITY_N:
1261 re->before.layout = ALGORITHM_PARITY_N_6;
1262 break;
1263 default:
1264 return "Cannot convert an array with this layout";
1265 }
1266 break;
1267 case 1:
1268 if (info->array.raid_disks != 2)
1269 return "Can only convert a 2-device array to RAID1";
1270 if (info->delta_disks != UnSet &&
1271 info->delta_disks != 0)
1272 return "Cannot set raid_disk when "
1273 "converting RAID5->RAID1";
1274 re->level = 1;
1275 info->new_chunk = 0;
1276 return NULL;
1277 default:
1278 return "Impossible level change requested";
1279 }
1280 break;
1281 case 6:
1282 switch (info->new_level) {
1283 case 4:
1284 case 5:
1285 delta_parity = -1;
1286 case 6:
1287 re->level = 6;
1288 re->before.data_disks = info->array.raid_disks - 2;
1289 re->before.layout = info->array.layout;
1290 break;
1291 default:
1292 return "Impossible level change requested";
1293 }
1294 break;
1295 }
1296
1297 /* If we reached here then it looks like a re-stripe is
1298 * happening. We have determined the intermediate level
1299 * and initial raid_disks/layout and stored these in 're'.
1300 *
1301 * We need to deduce the final layout that can be atomically
1302 * converted to the end state.
1303 */
1304 switch (info->new_level) {
1305 case 0:
1306 /* We can only get to RAID0 from RAID4 or RAID5
1307 * with appropriate layout and one extra device
1308 */
1309 if (re->level != 4 && re->level != 5)
1310 return "Cannot covert to RAID0 from this level";
1311
1312 switch (re->level) {
1313 case 4:
1314 re->before.layout = 0;
1315 re->after.layout = 0;
1316 break;
1317 case 5:
1318 re->after.layout = ALGORITHM_PARITY_N;
1319 break;
1320 }
1321 break;
1322
1323 case 4:
1324 /* We can only get to RAID4 from RAID5 */
1325 if (re->level != 4 && re->level != 5)
1326 return "Cannot convert to RAID4 from this level";
1327
1328 switch (re->level) {
1329 case 4:
1330 re->before.layout = 0;
1331 re->after.layout = 0;
1332 break;
1333 case 5:
1334 re->after.layout = ALGORITHM_PARITY_N;
1335 break;
1336 }
1337 break;
1338
1339 case 5:
1340 /* We get to RAID5 from RAID5 or RAID6 */
1341 if (re->level != 5 && re->level != 6)
1342 return "Cannot convert to RAID5 from this level";
1343
1344 switch (re->level) {
1345 case 5:
1346 if (info->new_layout == UnSet)
1347 re->after.layout = re->before.layout;
1348 else
1349 re->after.layout = info->new_layout;
1350 break;
1351 case 6:
1352 if (info->new_layout == UnSet)
1353 info->new_layout = re->before.layout;
1354
1355 /* after.layout needs to be raid6 version of new_layout */
1356 if (info->new_layout == ALGORITHM_PARITY_N)
1357 re->after.layout = ALGORITHM_PARITY_N;
1358 else {
1359 char layout[40];
1360 char *ls = map_num(r5layout, info->new_layout);
1361 int l;
1362 if (ls) {
1363 /* Current RAID6 layout has a RAID5
1364 * equivalent - good
1365 */
1366 strcat(strcpy(layout, ls), "-6");
1367 l = map_name(r6layout, layout);
1368 if (l == UnSet)
1369 return "Cannot find RAID6 layout"
1370 " to convert to";
1371 } else {
1372 /* Current RAID6 has no equivalent.
1373 * If it is already a '-6' layout we
1374 * can leave it unchanged, else we must
1375 * fail
1376 */
1377 ls = map_num(r6layout, info->new_layout);
1378 if (!ls ||
1379 strcmp(ls+strlen(ls)-2, "-6") != 0)
1380 return "Please specify new layout";
1381 l = info->new_layout;
1382 }
1383 re->after.layout = l;
1384 }
1385 }
1386 break;
1387
1388 case 6:
1389 /* We must already be at level 6 */
1390 if (re->level != 6)
1391 return "Impossible level change";
1392 if (info->new_layout == UnSet)
1393 re->after.layout = info->array.layout;
1394 else
1395 re->after.layout = info->new_layout;
1396 break;
1397 default:
1398 return "Impossible level change requested";
1399 }
1400 if (info->delta_disks == UnSet)
1401 info->delta_disks = delta_parity;
1402
1403 re->after.data_disks = (re->before.data_disks
1404 + info->delta_disks
1405 - delta_parity);
1406 switch (re->level) {
1407 case 6: re->parity = 2;
1408 break;
1409 case 4:
1410 case 5: re->parity = 1;
1411 break;
1412 default: re->parity = 0;
1413 break;
1414 }
1415 /* So we have a restripe operation, we need to calculate the number
1416 * of blocks per reshape operation.
1417 */
1418 re->new_size = info->component_size * re->before.data_disks;
1419 if (info->new_chunk == 0)
1420 info->new_chunk = info->array.chunk_size;
1421 if (re->after.data_disks == re->before.data_disks &&
1422 re->after.layout == re->before.layout &&
1423 info->new_chunk == info->array.chunk_size) {
1424 /* Nothing to change, can change level immediately. */
1425 re->level = info->new_level;
1426 re->backup_blocks = 0;
1427 return NULL;
1428 }
1429 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1430 /* chunk and layout changes make no difference */
1431 re->level = info->new_level;
1432 re->backup_blocks = 0;
1433 return NULL;
1434 }
1435
1436 if (re->after.data_disks == re->before.data_disks &&
1437 get_linux_version() < 2006032)
1438 return "in-place reshape is not safe before 2.6.32 - sorry.";
1439
1440 if (re->after.data_disks < re->before.data_disks &&
1441 get_linux_version() < 2006030)
1442 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1443
1444 re->backup_blocks = compute_backup_blocks(
1445 info->new_chunk, info->array.chunk_size,
1446 re->after.data_disks,
1447 re->before.data_disks);
1448 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1449
1450 re->new_size = info->component_size * re->after.data_disks;
1451 return NULL;
1452 }
1453
1454 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1455 char *text_version)
1456 {
1457 struct mdinfo *info;
1458 char *subarray;
1459 int ret_val = -1;
1460
1461 if ((st == NULL) || (sra == NULL))
1462 return ret_val;
1463
1464 if (text_version == NULL)
1465 text_version = sra->text_version;
1466 subarray = strchr(text_version+1, '/')+1;
1467 info = st->ss->container_content(st, subarray);
1468 if (info) {
1469 unsigned long long current_size = 0;
1470 unsigned long long new_size =
1471 info->custom_array_size/2;
1472
1473 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1474 new_size > current_size) {
1475 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1476 < 0)
1477 dprintf("Error: Cannot set array size");
1478 else {
1479 ret_val = 0;
1480 dprintf("Array size changed");
1481 }
1482 dprintf(" from %llu to %llu.\n",
1483 current_size, new_size);
1484 }
1485 sysfs_free(info);
1486 } else
1487 dprintf("Error: set_array_size(): info pointer in NULL\n");
1488
1489 return ret_val;
1490 }
1491
1492 static int reshape_array(char *container, int fd, char *devname,
1493 struct supertype *st, struct mdinfo *info,
1494 int force, struct mddev_dev *devlist,
1495 unsigned long long data_offset,
1496 char *backup_file, int verbose, int forked,
1497 int restart, int freeze_reshape);
1498 static int reshape_container(char *container, char *devname,
1499 int mdfd,
1500 struct supertype *st,
1501 struct mdinfo *info,
1502 int force,
1503 char *backup_file,
1504 int verbose, int restart, int freeze_reshape);
1505
1506 int Grow_reshape(char *devname, int fd,
1507 struct mddev_dev *devlist,
1508 unsigned long long data_offset,
1509 struct context *c, struct shape *s)
1510 {
1511 /* Make some changes in the shape of an array.
1512 * The kernel must support the change.
1513 *
1514 * There are three different changes. Each can trigger
1515 * a resync or recovery so we freeze that until we have
1516 * requested everything (if kernel supports freezing - 2.6.30).
1517 * The steps are:
1518 * - change size (i.e. component_size)
1519 * - change level
1520 * - change layout/chunksize/ndisks
1521 *
1522 * The last can require a reshape. It is different on different
1523 * levels so we need to check the level before actioning it.
1524 * Some times the level change needs to be requested after the
1525 * reshape (e.g. raid6->raid5, raid5->raid0)
1526 *
1527 */
1528 struct mdu_array_info_s array;
1529 int rv = 0;
1530 struct supertype *st;
1531 char *subarray = NULL;
1532
1533 int frozen;
1534 int changed = 0;
1535 char *container = NULL;
1536 int cfd = -1;
1537
1538 struct mddev_dev *dv;
1539 int added_disks;
1540
1541 struct mdinfo info;
1542 struct mdinfo *sra;
1543
1544 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1545 pr_err("%s is not an active md array - aborting\n",
1546 devname);
1547 return 1;
1548 }
1549 if (data_offset != INVALID_SECTORS && array.level != 10
1550 && (array.level < 4 || array.level > 6)) {
1551 pr_err("--grow --data-offset not yet supported\n");
1552 return 1;
1553 }
1554
1555 if (s->size > 0 &&
1556 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1557 pr_err("cannot change component size at the same time "
1558 "as other changes.\n"
1559 " Change size first, then check data is intact before "
1560 "making other changes.\n");
1561 return 1;
1562 }
1563
1564 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1565 get_linux_version() < 2006032 &&
1566 !check_env("MDADM_FORCE_FEWER")) {
1567 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1568 " Please use a newer kernel\n");
1569 return 1;
1570 }
1571
1572 st = super_by_fd(fd, &subarray);
1573 if (!st) {
1574 pr_err("Unable to determine metadata format for %s\n", devname);
1575 return 1;
1576 }
1577 if (s->raiddisks > st->max_devs) {
1578 pr_err("Cannot increase raid-disks on this array"
1579 " beyond %d\n", st->max_devs);
1580 return 1;
1581 }
1582
1583 /* in the external case we need to check that the requested reshape is
1584 * supported, and perform an initial check that the container holds the
1585 * pre-requisite spare devices (mdmon owns final validation)
1586 */
1587 if (st->ss->external) {
1588 int rv;
1589
1590 if (subarray) {
1591 container = st->container_devnm;
1592 cfd = open_dev_excl(st->container_devnm);
1593 } else {
1594 container = st->devnm;
1595 close(fd);
1596 cfd = open_dev_excl(st->devnm);
1597 fd = cfd;
1598 }
1599 if (cfd < 0) {
1600 pr_err("Unable to open container for %s\n",
1601 devname);
1602 free(subarray);
1603 return 1;
1604 }
1605
1606 rv = st->ss->load_container(st, cfd, NULL);
1607
1608 if (rv) {
1609 pr_err("Cannot read superblock for %s\n",
1610 devname);
1611 free(subarray);
1612 return 1;
1613 }
1614
1615 /* check if operation is supported for metadata handler */
1616 if (st->ss->container_content) {
1617 struct mdinfo *cc = NULL;
1618 struct mdinfo *content = NULL;
1619
1620 cc = st->ss->container_content(st, subarray);
1621 for (content = cc; content ; content = content->next) {
1622 int allow_reshape = 1;
1623
1624 /* check if reshape is allowed based on metadata
1625 * indications stored in content.array.status
1626 */
1627 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1628 allow_reshape = 0;
1629 if (content->array.state
1630 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1631 allow_reshape = 0;
1632 if (!allow_reshape) {
1633 pr_err("cannot reshape arrays in"
1634 " container with unsupported"
1635 " metadata: %s(%s)\n",
1636 devname, container);
1637 sysfs_free(cc);
1638 free(subarray);
1639 return 1;
1640 }
1641 }
1642 sysfs_free(cc);
1643 }
1644 if (mdmon_running(container))
1645 st->update_tail = &st->updates;
1646 }
1647
1648 added_disks = 0;
1649 for (dv = devlist; dv; dv = dv->next)
1650 added_disks++;
1651 if (s->raiddisks > array.raid_disks &&
1652 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1653 !c->force) {
1654 pr_err("Need %d spare%s to avoid degraded array,"
1655 " and only have %d.\n"
1656 " Use --force to over-ride this check.\n",
1657 s->raiddisks - array.raid_disks,
1658 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1659 array.spare_disks + added_disks);
1660 return 1;
1661 }
1662
1663 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1664 | GET_STATE | GET_VERSION);
1665 if (sra) {
1666 if (st->ss->external && subarray == NULL) {
1667 array.level = LEVEL_CONTAINER;
1668 sra->array.level = LEVEL_CONTAINER;
1669 }
1670 } else {
1671 pr_err("failed to read sysfs parameters for %s\n",
1672 devname);
1673 return 1;
1674 }
1675 frozen = freeze(st);
1676 if (frozen < -1) {
1677 /* freeze() already spewed the reason */
1678 sysfs_free(sra);
1679 return 1;
1680 } else if (frozen < 0) {
1681 pr_err("%s is performing resync/recovery and cannot"
1682 " be reshaped\n", devname);
1683 sysfs_free(sra);
1684 return 1;
1685 }
1686
1687 /* ========= set size =============== */
1688 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1689 unsigned long long orig_size = get_component_size(fd)/2;
1690 unsigned long long min_csize;
1691 struct mdinfo *mdi;
1692 int raid0_takeover = 0;
1693
1694 if (orig_size == 0)
1695 orig_size = (unsigned) array.size;
1696
1697 if (orig_size == 0) {
1698 pr_err("Cannot set device size in this type of array.\n");
1699 rv = 1;
1700 goto release;
1701 }
1702
1703 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1704 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1705 rv = 1;
1706 goto release;
1707 }
1708 sync_metadata(st);
1709 if (st->ss->external) {
1710 /* metadata can have size limitation
1711 * update size value according to metadata information
1712 */
1713 struct mdinfo *sizeinfo =
1714 st->ss->container_content(st, subarray);
1715 if (sizeinfo) {
1716 unsigned long long new_size =
1717 sizeinfo->custom_array_size/2;
1718 int data_disks = get_data_disks(
1719 sizeinfo->array.level,
1720 sizeinfo->array.layout,
1721 sizeinfo->array.raid_disks);
1722 new_size /= data_disks;
1723 dprintf("Metadata size correction from %llu to "
1724 "%llu (%llu)\n", orig_size, new_size,
1725 new_size * data_disks);
1726 s->size = new_size;
1727 sysfs_free(sizeinfo);
1728 }
1729 }
1730
1731 /* Update the size of each member device in case
1732 * they have been resized. This will never reduce
1733 * below the current used-size. The "size" attribute
1734 * understands '0' to mean 'max'.
1735 */
1736 min_csize = 0;
1737 rv = 0;
1738 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1739 if (sysfs_set_num(sra, mdi, "size",
1740 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1741 /* Probably kernel refusing to let us
1742 * reduce the size - not an error.
1743 */
1744 break;
1745 }
1746 if (array.not_persistent == 0 &&
1747 array.major_version == 0 &&
1748 get_linux_version() < 3001000) {
1749 /* Dangerous to allow size to exceed 2TB */
1750 unsigned long long csize;
1751 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1752 if (csize >= 2ULL*1024*1024*1024)
1753 csize = 2ULL*1024*1024*1024;
1754 if ((min_csize == 0 || (min_csize
1755 > csize)))
1756 min_csize = csize;
1757 }
1758 }
1759 }
1760 if (rv) {
1761 pr_err("Cannot set size on "
1762 "array members.\n");
1763 goto size_change_error;
1764 }
1765 if (min_csize && s->size > min_csize) {
1766 pr_err("Cannot safely make this array "
1767 "use more than 2TB per device on this kernel.\n");
1768 rv = 1;
1769 goto size_change_error;
1770 }
1771 if (min_csize && s->size == MAX_SIZE) {
1772 /* Don't let the kernel choose a size - it will get
1773 * it wrong
1774 */
1775 pr_err("Limited v0.90 array to "
1776 "2TB per device\n");
1777 s->size = min_csize;
1778 }
1779 if (st->ss->external) {
1780 if (sra->array.level == 0) {
1781 rv = sysfs_set_str(sra, NULL, "level",
1782 "raid5");
1783 if (!rv) {
1784 raid0_takeover = 1;
1785 /* get array parametes after takeover
1786 * to chane one parameter at time only
1787 */
1788 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1789 }
1790 }
1791 /* make sure mdmon is
1792 * aware of the new level */
1793 if (!mdmon_running(st->container_devnm))
1794 start_mdmon(st->container_devnm);
1795 ping_monitor(container);
1796 if (mdmon_running(st->container_devnm) &&
1797 st->update_tail == NULL)
1798 st->update_tail = &st->updates;
1799 }
1800
1801 if (s->size == MAX_SIZE)
1802 s->size = 0;
1803 array.size = s->size;
1804 if ((unsigned)array.size != s->size) {
1805 /* got truncated to 32bit, write to
1806 * component_size instead
1807 */
1808 if (sra)
1809 rv = sysfs_set_num(sra, NULL,
1810 "component_size", s->size);
1811 else
1812 rv = -1;
1813 } else {
1814 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1815
1816 /* manage array size when it is managed externally
1817 */
1818 if ((rv == 0) && st->ss->external)
1819 rv = set_array_size(st, sra, sra->text_version);
1820 }
1821
1822 if (raid0_takeover) {
1823 /* do not recync non-existing parity,
1824 * we will drop it anyway
1825 */
1826 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1827 /* go back to raid0, drop parity disk
1828 */
1829 sysfs_set_str(sra, NULL, "level", "raid0");
1830 ioctl(fd, GET_ARRAY_INFO, &array);
1831 }
1832
1833 size_change_error:
1834 if (rv != 0) {
1835 int err = errno;
1836
1837 /* restore metadata */
1838 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1839 UnSet, NULL, devname,
1840 ROLLBACK_METADATA_CHANGES,
1841 c->verbose) == 0)
1842 sync_metadata(st);
1843 pr_err("Cannot set device size for %s: %s\n",
1844 devname, strerror(err));
1845 if (err == EBUSY &&
1846 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1847 cont_err("Bitmap must be removed before size can be changed\n");
1848 rv = 1;
1849 goto release;
1850 }
1851 if (s->assume_clean) {
1852 /* This will fail on kernels older than 3.0 unless
1853 * a backport has been arranged.
1854 */
1855 if (sra == NULL ||
1856 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1857 pr_err("--assume-clean not supported with --grow on this kernel\n");
1858 }
1859 ioctl(fd, GET_ARRAY_INFO, &array);
1860 s->size = get_component_size(fd)/2;
1861 if (s->size == 0)
1862 s->size = array.size;
1863 if (c->verbose >= 0) {
1864 if (s->size == orig_size)
1865 pr_err("component size of %s "
1866 "unchanged at %lluK\n",
1867 devname, s->size);
1868 else
1869 pr_err("component size of %s "
1870 "has been set to %lluK\n",
1871 devname, s->size);
1872 }
1873 changed = 1;
1874 } else if (array.level != LEVEL_CONTAINER) {
1875 s->size = get_component_size(fd)/2;
1876 if (s->size == 0)
1877 s->size = array.size;
1878 }
1879
1880 /* See if there is anything else to do */
1881 if ((s->level == UnSet || s->level == array.level) &&
1882 (s->layout_str == NULL) &&
1883 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1884 data_offset == INVALID_SECTORS &&
1885 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1886 /* Nothing more to do */
1887 if (!changed && c->verbose >= 0)
1888 pr_err("%s: no change requested\n",
1889 devname);
1890 goto release;
1891 }
1892
1893 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1894 * current implementation assumes that following conditions must be met:
1895 * - RAID10:
1896 * - far_copies == 1
1897 * - near_copies == 2
1898 */
1899 if ((s->level == 0 && array.level == 10 && sra &&
1900 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1901 (s->level == 0 && array.level == 1 && sra)) {
1902 int err;
1903 err = remove_disks_for_takeover(st, sra, array.layout);
1904 if (err) {
1905 dprintf(Name": Array cannot be reshaped\n");
1906 if (cfd > -1)
1907 close(cfd);
1908 rv = 1;
1909 goto release;
1910 }
1911 /* Make sure mdmon has seen the device removal
1912 * and updated metadata before we continue with
1913 * level change
1914 */
1915 if (container)
1916 ping_monitor(container);
1917 }
1918
1919 memset(&info, 0, sizeof(info));
1920 info.array = array;
1921 sysfs_init(&info, fd, NULL);
1922 strcpy(info.text_version, sra->text_version);
1923 info.component_size = s->size*2;
1924 info.new_level = s->level;
1925 info.new_chunk = s->chunk * 1024;
1926 if (info.array.level == LEVEL_CONTAINER) {
1927 info.delta_disks = UnSet;
1928 info.array.raid_disks = s->raiddisks;
1929 } else if (s->raiddisks)
1930 info.delta_disks = s->raiddisks - info.array.raid_disks;
1931 else
1932 info.delta_disks = UnSet;
1933 if (s->layout_str == NULL) {
1934 info.new_layout = UnSet;
1935 if (info.array.level == 6 &&
1936 (info.new_level == 6 || info.new_level == UnSet) &&
1937 info.array.layout >= 16) {
1938 pr_err("%s has a non-standard layout. If you"
1939 " wish to preserve this\n", devname);
1940 cont_err("during the reshape, please specify"
1941 " --layout=preserve\n");
1942 cont_err("If you want to change it, specify a"
1943 " layout or use --layout=normalise\n");
1944 rv = 1;
1945 goto release;
1946 }
1947 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1948 strcmp(s->layout_str, "normalize") == 0) {
1949 /* If we have a -6 RAID6 layout, remove the '-6'. */
1950 info.new_layout = UnSet;
1951 if (info.array.level == 6 && info.new_level == UnSet) {
1952 char l[40], *h;
1953 strcpy(l, map_num(r6layout, info.array.layout));
1954 h = strrchr(l, '-');
1955 if (h && strcmp(h, "-6") == 0) {
1956 *h = 0;
1957 info.new_layout = map_name(r6layout, l);
1958 }
1959 } else {
1960 pr_err("%s is only meaningful when reshaping"
1961 " a RAID6 array.\n", s->layout_str);
1962 rv = 1;
1963 goto release;
1964 }
1965 } else if (strcmp(s->layout_str, "preserve") == 0) {
1966 /* This means that a non-standard RAID6 layout
1967 * is OK.
1968 * In particular:
1969 * - When reshape a RAID6 (e.g. adding a device)
1970 * which is in a non-standard layout, it is OK
1971 * to preserve that layout.
1972 * - When converting a RAID5 to RAID6, leave it in
1973 * the XXX-6 layout, don't re-layout.
1974 */
1975 if (info.array.level == 6 && info.new_level == UnSet)
1976 info.new_layout = info.array.layout;
1977 else if (info.array.level == 5 && info.new_level == 6) {
1978 char l[40];
1979 strcpy(l, map_num(r5layout, info.array.layout));
1980 strcat(l, "-6");
1981 info.new_layout = map_name(r6layout, l);
1982 } else {
1983 pr_err("%s in only meaningful when reshaping"
1984 " to RAID6\n", s->layout_str);
1985 rv = 1;
1986 goto release;
1987 }
1988 } else {
1989 int l = info.new_level;
1990 if (l == UnSet)
1991 l = info.array.level;
1992 switch (l) {
1993 case 5:
1994 info.new_layout = map_name(r5layout, s->layout_str);
1995 break;
1996 case 6:
1997 info.new_layout = map_name(r6layout, s->layout_str);
1998 break;
1999 case 10:
2000 info.new_layout = parse_layout_10(s->layout_str);
2001 break;
2002 case LEVEL_FAULTY:
2003 info.new_layout = parse_layout_faulty(s->layout_str);
2004 break;
2005 default:
2006 pr_err("layout not meaningful"
2007 " with this level\n");
2008 rv = 1;
2009 goto release;
2010 }
2011 if (info.new_layout == UnSet) {
2012 pr_err("layout %s not understood"
2013 " for this level\n",
2014 s->layout_str);
2015 rv = 1;
2016 goto release;
2017 }
2018 }
2019
2020 if (array.level == LEVEL_FAULTY) {
2021 if (s->level != UnSet && s->level != array.level) {
2022 pr_err("cannot change level of Faulty device\n");
2023 rv =1 ;
2024 }
2025 if (s->chunk) {
2026 pr_err("cannot set chunksize of Faulty device\n");
2027 rv =1 ;
2028 }
2029 if (s->raiddisks && s->raiddisks != 1) {
2030 pr_err("cannot set raid_disks of Faulty device\n");
2031 rv =1 ;
2032 }
2033 if (s->layout_str) {
2034 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2035 dprintf("Cannot get array information.\n");
2036 goto release;
2037 }
2038 array.layout = info.new_layout;
2039 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2040 pr_err("failed to set new layout\n");
2041 rv = 1;
2042 } else if (c->verbose >= 0)
2043 printf("layout for %s set to %d\n",
2044 devname, array.layout);
2045 }
2046 } else if (array.level == LEVEL_CONTAINER) {
2047 /* This change is to be applied to every array in the
2048 * container. This is only needed when the metadata imposes
2049 * restraints of the various arrays in the container.
2050 * Currently we only know that IMSM requires all arrays
2051 * to have the same number of devices so changing the
2052 * number of devices (On-Line Capacity Expansion) must be
2053 * performed at the level of the container
2054 */
2055 rv = reshape_container(container, devname, -1, st, &info,
2056 c->force, c->backup_file, c->verbose, 0, 0);
2057 frozen = 0;
2058 } else {
2059 /* get spare devices from external metadata
2060 */
2061 if (st->ss->external) {
2062 struct mdinfo *info2;
2063
2064 info2 = st->ss->container_content(st, subarray);
2065 if (info2) {
2066 info.array.spare_disks =
2067 info2->array.spare_disks;
2068 sysfs_free(info2);
2069 }
2070 }
2071
2072 /* Impose these changes on a single array. First
2073 * check that the metadata is OK with the change. */
2074
2075 if (reshape_super(st, 0, info.new_level,
2076 info.new_layout, info.new_chunk,
2077 info.array.raid_disks, info.delta_disks,
2078 c->backup_file, devname, APPLY_METADATA_CHANGES,
2079 c->verbose)) {
2080 rv = 1;
2081 goto release;
2082 }
2083 sync_metadata(st);
2084 rv = reshape_array(container, fd, devname, st, &info, c->force,
2085 devlist, data_offset, c->backup_file, c->verbose,
2086 0, 0, 0);
2087 frozen = 0;
2088 }
2089 release:
2090 sysfs_free(sra);
2091 if (frozen > 0)
2092 unfreeze(st);
2093 return rv;
2094 }
2095
2096 /* verify_reshape_position()
2097 * Function checks if reshape position in metadata is not farther
2098 * than position in md.
2099 * Return value:
2100 * 0 : not valid sysfs entry
2101 * it can be caused by not started reshape, it should be started
2102 * by reshape array or raid0 array is before takeover
2103 * -1 : error, reshape position is obviously wrong
2104 * 1 : success, reshape progress correct or updated
2105 */
2106 static int verify_reshape_position(struct mdinfo *info, int level)
2107 {
2108 int ret_val = 0;
2109 char buf[40];
2110 int rv;
2111
2112 /* read sync_max, failure can mean raid0 array */
2113 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2114
2115 if (rv > 0) {
2116 char *ep;
2117 unsigned long long position = strtoull(buf, &ep, 0);
2118
2119 dprintf(Name": Read sync_max sysfs entry is: %s\n", buf);
2120 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2121 position *= get_data_disks(level,
2122 info->new_layout,
2123 info->array.raid_disks);
2124 if (info->reshape_progress < position) {
2125 dprintf("Corrected reshape progress (%llu) to "
2126 "md position (%llu)\n",
2127 info->reshape_progress, position);
2128 info->reshape_progress = position;
2129 ret_val = 1;
2130 } else if (info->reshape_progress > position) {
2131 pr_err("Fatal error: array "
2132 "reshape was not properly frozen "
2133 "(expected reshape position is %llu, "
2134 "but reshape progress is %llu.\n",
2135 position, info->reshape_progress);
2136 ret_val = -1;
2137 } else {
2138 dprintf("Reshape position in md and metadata "
2139 "are the same;");
2140 ret_val = 1;
2141 }
2142 }
2143 } else if (rv == 0) {
2144 /* for valid sysfs entry, 0-length content
2145 * should be indicated as error
2146 */
2147 ret_val = -1;
2148 }
2149
2150 return ret_val;
2151 }
2152
2153 static unsigned long long choose_offset(unsigned long long lo,
2154 unsigned long long hi,
2155 unsigned long long min,
2156 unsigned long long max)
2157 {
2158 /* Choose a new offset between hi and lo.
2159 * It must be between min and max, but
2160 * we would prefer something near the middle of hi/lo, and also
2161 * prefer to be aligned to a big power of 2.
2162 *
2163 * So we start with the middle, then for each bit,
2164 * starting at '1' and increasing, if it is set, we either
2165 * add it or subtract it if possible, preferring the option
2166 * which is furthest from the boundary.
2167 *
2168 * We stop once we get a 1MB alignment. As units are in sectors,
2169 * 1MB = 2*1024 sectors.
2170 */
2171 unsigned long long choice = (lo + hi) / 2;
2172 unsigned long long bit = 1;
2173
2174 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2175 unsigned long long bigger, smaller;
2176 if (! (bit & choice))
2177 continue;
2178 bigger = choice + bit;
2179 smaller = choice - bit;
2180 if (bigger > max && smaller < min)
2181 break;
2182 if (bigger > max)
2183 choice = smaller;
2184 else if (smaller < min)
2185 choice = bigger;
2186 else if (hi - bigger > smaller - lo)
2187 choice = bigger;
2188 else
2189 choice = smaller;
2190 }
2191 return choice;
2192 }
2193
2194 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2195 char *devname, int delta_disks,
2196 unsigned long long data_offset,
2197 unsigned long long min,
2198 int can_fallback)
2199 {
2200 struct mdinfo *sd;
2201 int dir = 0;
2202 int err = 0;
2203 unsigned long long before, after;
2204
2205 /* Need to find min space before and after so same is used
2206 * on all devices
2207 */
2208 before = UINT64_MAX;
2209 after = UINT64_MAX;
2210 for (sd = sra->devs; sd; sd = sd->next) {
2211 char *dn;
2212 int dfd;
2213 int rv;
2214 struct supertype *st2;
2215 struct mdinfo info2;
2216
2217 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2218 continue;
2219 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2220 dfd = dev_open(dn, O_RDONLY);
2221 if (dfd < 0) {
2222 pr_err("%s: cannot open component %s\n",
2223 devname, dn ? dn : "-unknown-");
2224 goto release;
2225 }
2226 st2 = dup_super(st);
2227 rv = st2->ss->load_super(st2,dfd, NULL);
2228 close(dfd);
2229 if (rv) {
2230 free(st2);
2231 pr_err("%s: cannot get superblock from %s\n",
2232 devname, dn);
2233 goto release;
2234 }
2235 st2->ss->getinfo_super(st2, &info2, NULL);
2236 st2->ss->free_super(st2);
2237 free(st2);
2238 if (info2.space_before == 0 &&
2239 info2.space_after == 0) {
2240 /* Metadata doesn't support data_offset changes */
2241 return 1;
2242 }
2243 if (before > info2.space_before)
2244 before = info2.space_before;
2245 if (after > info2.space_after)
2246 after = info2.space_after;
2247
2248 if (data_offset != INVALID_SECTORS) {
2249 if (dir == 0) {
2250 if (info2.data_offset == data_offset) {
2251 pr_err("%s: already has that data_offset\n",
2252 dn);
2253 goto release;
2254 }
2255 if (data_offset < info2.data_offset)
2256 dir = -1;
2257 else
2258 dir = 1;
2259 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2260 (data_offset >= info2.data_offset && dir == -1)) {
2261 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2262 dn);
2263 goto release;
2264 }
2265 }
2266 }
2267 if (before == UINT64_MAX)
2268 /* impossible really, there must be no devices */
2269 return 1;
2270
2271 for (sd = sra->devs; sd; sd = sd->next) {
2272 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2273 unsigned long long new_data_offset;
2274
2275 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2276 continue;
2277 if (delta_disks < 0) {
2278 /* Don't need any space as array is shrinking
2279 * just move data_offset up by min
2280 */
2281 if (data_offset == INVALID_SECTORS)
2282 new_data_offset = sd->data_offset + min;
2283 else {
2284 if (data_offset < sd->data_offset + min) {
2285 pr_err("--data-offset too small for %s\n",
2286 dn);
2287 goto release;
2288 }
2289 new_data_offset = data_offset;
2290 }
2291 } else if (delta_disks > 0) {
2292 /* need space before */
2293 if (before < min) {
2294 if (can_fallback)
2295 goto fallback;
2296 pr_err("Insufficient head-space for reshape on %s\n",
2297 dn);
2298 goto release;
2299 }
2300 if (data_offset == INVALID_SECTORS)
2301 new_data_offset = sd->data_offset - min;
2302 else {
2303 if (data_offset > sd->data_offset - min) {
2304 pr_err("--data-offset too large for %s\n",
2305 dn);
2306 goto release;
2307 }
2308 new_data_offset = data_offset;
2309 }
2310 } else {
2311 if (dir == 0) {
2312 /* can move up or down. If 'data_offset'
2313 * was set we would have already decided,
2314 * so just choose direction with most space.
2315 */
2316 if (before > after)
2317 dir = -1;
2318 else
2319 dir = 1;
2320 }
2321 sysfs_set_str(sra, NULL, "reshape_direction",
2322 dir == 1 ? "backwards" : "forwards");
2323 if (dir > 0) {
2324 /* Increase data offset */
2325 if (after < min) {
2326 if (can_fallback)
2327 goto fallback;
2328 pr_err("Insufficient tail-space for reshape on %s\n",
2329 dn);
2330 goto release;
2331 }
2332 if (data_offset != INVALID_SECTORS &&
2333 data_offset < sd->data_offset + min) {
2334 pr_err("--data-offset too small on %s\n",
2335 dn);
2336 goto release;
2337 }
2338 if (data_offset != INVALID_SECTORS)
2339 new_data_offset = data_offset;
2340 else
2341 new_data_offset = choose_offset(sd->data_offset,
2342 sd->data_offset + after,
2343 sd->data_offset + min,
2344 sd->data_offset + after);
2345 } else {
2346 /* Decrease data offset */
2347 if (before < min) {
2348 if (can_fallback)
2349 goto fallback;
2350 pr_err("insufficient head-room on %s\n",
2351 dn);
2352 goto release;
2353 }
2354 if (data_offset != INVALID_SECTORS &&
2355 data_offset < sd->data_offset - min) {
2356 pr_err("--data-offset too small on %s\n",
2357 dn);
2358 goto release;
2359 }
2360 if (data_offset != INVALID_SECTORS)
2361 new_data_offset = data_offset;
2362 else
2363 new_data_offset = choose_offset(sd->data_offset - before,
2364 sd->data_offset,
2365 sd->data_offset - before,
2366 sd->data_offset - min);
2367 }
2368 }
2369 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2370 if (err < 0 && errno == E2BIG) {
2371 /* try again after increasing data size to max */
2372 err = sysfs_set_num(sra, sd, "size", 0);
2373 if (err < 0 && errno == EINVAL &&
2374 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2375 /* some kernels have a bug where you cannot
2376 * use '0' on spare devices. */
2377 sysfs_set_num(sra, sd, "size",
2378 (sra->component_size + after)/2);
2379 }
2380 err = sysfs_set_num(sra, sd, "new_offset",
2381 new_data_offset);
2382 }
2383 if (err < 0) {
2384 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2385 pr_err("data-offset is too big for %s\n",
2386 dn);
2387 goto release;
2388 }
2389 if (sd == sra->devs &&
2390 (errno == ENOENT || errno == E2BIG))
2391 /* Early kernel, no 'new_offset' file,
2392 * or kernel doesn't like us.
2393 * For RAID5/6 this is not fatal
2394 */
2395 return 1;
2396 pr_err("Cannot set new_offset for %s\n",
2397 dn);
2398 break;
2399 }
2400 }
2401 return err;
2402 release:
2403 return -1;
2404 fallback:
2405 /* Just use a backup file */
2406 return 1;
2407 }
2408
2409 static int raid10_reshape(char *container, int fd, char *devname,
2410 struct supertype *st, struct mdinfo *info,
2411 struct reshape *reshape,
2412 unsigned long long data_offset,
2413 int force, int verbose)
2414 {
2415 /* Changing raid_disks, layout, chunksize or possibly
2416 * just data_offset for a RAID10.
2417 * We must always change data_offset. We change by at least
2418 * ->min_offset_change which is the largest of the old and new
2419 * chunk sizes.
2420 * If raid_disks is increasing, then data_offset must decrease
2421 * by at least this copy size.
2422 * If raid_disks is unchanged, data_offset must increase or
2423 * decrease by at least min_offset_change but preferably by much more.
2424 * We choose half of the available space.
2425 * If raid_disks is decreasing, data_offset must increase by
2426 * at least min_offset_change. To allow of this, component_size
2427 * must be decreased by the same amount.
2428 *
2429 * So we calculate the required minimum and direction, possibly
2430 * reduce the component_size, then iterate through the devices
2431 * and set the new_data_offset.
2432 * If that all works, we set chunk_size, layout, raid_disks, and start
2433 * 'reshape'
2434 */
2435 struct mdinfo *sra;
2436 unsigned long long min;
2437 int err = 0;
2438
2439 sra = sysfs_read(fd, NULL,
2440 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2441 );
2442 if (!sra) {
2443 pr_err("%s: Cannot get array details from sysfs\n",
2444 devname);
2445 goto release;
2446 }
2447 min = reshape->min_offset_change;
2448
2449 if (info->delta_disks)
2450 sysfs_set_str(sra, NULL, "reshape_direction",
2451 info->delta_disks < 0 ? "backwards" : "forwards");
2452 if (info->delta_disks < 0 &&
2453 info->space_after < min) {
2454 int rv = sysfs_set_num(sra, NULL, "component_size",
2455 (sra->component_size -
2456 min)/2);
2457 if (rv) {
2458 pr_err("cannot reduce component size\n");
2459 goto release;
2460 }
2461 }
2462 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2463 min, 0);
2464 if (err == 1) {
2465 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2466 cont_err("supported on this kernel\n");
2467 err = -1;
2468 }
2469 if (err < 0)
2470 goto release;
2471
2472 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2473 err = errno;
2474 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2475 err = errno;
2476 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2477 info->array.raid_disks + info->delta_disks) < 0)
2478 err = errno;
2479 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2480 err = errno;
2481 if (err) {
2482 pr_err("Cannot set array shape for %s\n",
2483 devname);
2484 if (err == EBUSY &&
2485 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2486 cont_err(" Bitmap must be removed before"
2487 " shape can be changed\n");
2488 goto release;
2489 }
2490 sysfs_free(sra);
2491 return 0;
2492 release:
2493 sysfs_free(sra);
2494 return 1;
2495 }
2496
2497 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2498 {
2499 struct mdinfo *sra, *sd;
2500 /* Initialisation to silence compiler warning */
2501 unsigned long long min_space_before = 0, min_space_after = 0;
2502 int first = 1;
2503
2504 sra = sysfs_read(fd, NULL, GET_DEVS);
2505 if (!sra)
2506 return;
2507 for (sd = sra->devs; sd; sd = sd->next) {
2508 char *dn;
2509 int dfd;
2510 struct supertype *st2;
2511 struct mdinfo info2;
2512
2513 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2514 continue;
2515 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2516 dfd = dev_open(dn, O_RDONLY);
2517 if (dfd < 0)
2518 break;
2519 st2 = dup_super(st);
2520 if (st2->ss->load_super(st2,dfd, NULL)) {
2521 close(dfd);
2522 free(st2);
2523 break;
2524 }
2525 close(dfd);
2526 st2->ss->getinfo_super(st2, &info2, NULL);
2527 st2->ss->free_super(st2);
2528 free(st2);
2529 if (first ||
2530 min_space_before > info2.space_before)
2531 min_space_before = info2.space_before;
2532 if (first ||
2533 min_space_after > info2.space_after)
2534 min_space_after = info2.space_after;
2535 first = 0;
2536 }
2537 if (sd == NULL && !first) {
2538 info->space_after = min_space_after;
2539 info->space_before = min_space_before;
2540 }
2541 sysfs_free(sra);
2542 }
2543
2544 static void update_cache_size(char *container, struct mdinfo *sra,
2545 struct mdinfo *info,
2546 int disks, unsigned long long blocks)
2547 {
2548 /* Check that the internal stripe cache is
2549 * large enough, or it won't work.
2550 * It must hold at least 4 stripes of the larger
2551 * chunk size
2552 */
2553 unsigned long cache;
2554 cache = max(info->array.chunk_size, info->new_chunk);
2555 cache *= 4; /* 4 stripes minimum */
2556 cache /= 512; /* convert to sectors */
2557 /* make sure there is room for 'blocks' with a bit to spare */
2558 if (cache < 16 + blocks / disks)
2559 cache = 16 + blocks / disks;
2560 cache /= (4096/512); /* Covert from sectors to pages */
2561
2562 if (sra->cache_size < cache)
2563 subarray_set_num(container, sra, "stripe_cache_size",
2564 cache+1);
2565 }
2566
2567 static int impose_reshape(struct mdinfo *sra,
2568 struct mdinfo *info,
2569 struct supertype *st,
2570 int fd,
2571 int restart,
2572 char *devname, char *container,
2573 struct reshape *reshape)
2574 {
2575 struct mdu_array_info_s array;
2576
2577 sra->new_chunk = info->new_chunk;
2578
2579 if (restart) {
2580 /* for external metadata checkpoint saved by mdmon can be lost
2581 * or missed /due to e.g. crash/. Check if md is not during
2582 * restart farther than metadata points to.
2583 * If so, this means metadata information is obsolete.
2584 */
2585 if (st->ss->external)
2586 verify_reshape_position(info, reshape->level);
2587 sra->reshape_progress = info->reshape_progress;
2588 } else {
2589 sra->reshape_progress = 0;
2590 if (reshape->after.data_disks < reshape->before.data_disks)
2591 /* start from the end of the new array */
2592 sra->reshape_progress = (sra->component_size
2593 * reshape->after.data_disks);
2594 }
2595
2596 ioctl(fd, GET_ARRAY_INFO, &array);
2597 if (info->array.chunk_size == info->new_chunk &&
2598 reshape->before.layout == reshape->after.layout &&
2599 st->ss->external == 0) {
2600 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2601 array.raid_disks = reshape->after.data_disks + reshape->parity;
2602 if (!restart &&
2603 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2604 int err = errno;
2605
2606 pr_err("Cannot set device shape for %s: %s\n",
2607 devname, strerror(errno));
2608
2609 if (err == EBUSY &&
2610 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2611 cont_err("Bitmap must be removed before"
2612 " shape can be changed\n");
2613
2614 goto release;
2615 }
2616 } else if (!restart) {
2617 /* set them all just in case some old 'new_*' value
2618 * persists from some earlier problem.
2619 */
2620 int err = 0;
2621 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2622 err = errno;
2623 if (!err && sysfs_set_num(sra, NULL, "layout",
2624 reshape->after.layout) < 0)
2625 err = errno;
2626 if (!err && subarray_set_num(container, sra, "raid_disks",
2627 reshape->after.data_disks +
2628 reshape->parity) < 0)
2629 err = errno;
2630 if (err) {
2631 pr_err("Cannot set device shape for %s\n",
2632 devname);
2633
2634 if (err == EBUSY &&
2635 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2636 cont_err("Bitmap must be removed before"
2637 " shape can be changed\n");
2638 goto release;
2639 }
2640 }
2641 return 0;
2642 release:
2643 return -1;
2644 }
2645
2646 static int impose_level(int fd, int level, char *devname, int verbose)
2647 {
2648 char *c;
2649 struct mdu_array_info_s array;
2650 struct mdinfo info;
2651 sysfs_init(&info, fd, NULL);
2652
2653 ioctl(fd, GET_ARRAY_INFO, &array);
2654 if (level == 0 &&
2655 (array.level >= 4 && array.level <= 6)) {
2656 /* To convert to RAID0 we need to fail and
2657 * remove any non-data devices. */
2658 int found = 0;
2659 int d;
2660 int data_disks = array.raid_disks - 1;
2661 if (array.level == 6)
2662 data_disks -= 1;
2663 if (array.level == 5 &&
2664 array.layout != ALGORITHM_PARITY_N)
2665 return -1;
2666 if (array.level == 6 &&
2667 array.layout != ALGORITHM_PARITY_N_6)
2668 return -1;
2669 sysfs_set_str(&info, NULL,"sync_action", "idle");
2670 /* First remove any spares so no recovery starts */
2671 for (d = 0, found = 0;
2672 d < MAX_DISKS && found < array.nr_disks;
2673 d++) {
2674 mdu_disk_info_t disk;
2675 disk.number = d;
2676 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2677 continue;
2678 if (disk.major == 0 && disk.minor == 0)
2679 continue;
2680 found++;
2681 if ((disk.state & (1 << MD_DISK_ACTIVE))
2682 && disk.raid_disk < data_disks)
2683 /* keep this */
2684 continue;
2685 ioctl(fd, HOT_REMOVE_DISK,
2686 makedev(disk.major, disk.minor));
2687 }
2688 /* Now fail anything left */
2689 ioctl(fd, GET_ARRAY_INFO, &array);
2690 for (d = 0, found = 0;
2691 d < MAX_DISKS && found < array.nr_disks;
2692 d++) {
2693 int cnt;
2694 mdu_disk_info_t disk;
2695 disk.number = d;
2696 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2697 continue;
2698 if (disk.major == 0 && disk.minor == 0)
2699 continue;
2700 found++;
2701 if ((disk.state & (1 << MD_DISK_ACTIVE))
2702 && disk.raid_disk < data_disks)
2703 /* keep this */
2704 continue;
2705 ioctl(fd, SET_DISK_FAULTY,
2706 makedev(disk.major, disk.minor));
2707 cnt = 5;
2708 while (ioctl(fd, HOT_REMOVE_DISK,
2709 makedev(disk.major, disk.minor)) < 0
2710 && errno == EBUSY
2711 && cnt--) {
2712 usleep(10000);
2713 }
2714 }
2715 }
2716 c = map_num(pers, level);
2717 if (c) {
2718 int err = sysfs_set_str(&info, NULL, "level", c);
2719 if (err) {
2720 err = errno;
2721 pr_err("%s: could not set level to %s\n",
2722 devname, c);
2723 if (err == EBUSY &&
2724 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2725 cont_err("Bitmap must be removed"
2726 " before level can be changed\n");
2727 return err;
2728 }
2729 if (verbose >= 0)
2730 pr_err("level of %s changed to %s\n",
2731 devname, c);
2732 }
2733 return 0;
2734 }
2735
2736 int sigterm = 0;
2737 static void catch_term(int sig)
2738 {
2739 sigterm = 1;
2740 }
2741
2742 static int reshape_array(char *container, int fd, char *devname,
2743 struct supertype *st, struct mdinfo *info,
2744 int force, struct mddev_dev *devlist,
2745 unsigned long long data_offset,
2746 char *backup_file, int verbose, int forked,
2747 int restart, int freeze_reshape)
2748 {
2749 struct reshape reshape;
2750 int spares_needed;
2751 char *msg;
2752 int orig_level = UnSet;
2753 int odisks;
2754 int delayed;
2755
2756 struct mdu_array_info_s array;
2757 char *c;
2758
2759 struct mddev_dev *dv;
2760 int added_disks;
2761
2762 int *fdlist = NULL;
2763 unsigned long long *offsets = NULL;
2764 int d;
2765 int nrdisks;
2766 int err;
2767 unsigned long blocks;
2768 unsigned long long array_size;
2769 int done;
2770 struct mdinfo *sra = NULL;
2771
2772 /* when reshaping a RAID0, the component_size might be zero.
2773 * So try to fix that up.
2774 */
2775 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2776 dprintf("Cannot get array information.\n");
2777 goto release;
2778 }
2779 if (array.level == 0 && info->component_size == 0) {
2780 get_dev_size(fd, NULL, &array_size);
2781 info->component_size = array_size / array.raid_disks;
2782 }
2783
2784 if (array.level == 10)
2785 /* Need space_after info */
2786 get_space_after(fd, st, info);
2787
2788 if (info->reshape_active) {
2789 int new_level = info->new_level;
2790 info->new_level = UnSet;
2791 if (info->delta_disks > 0)
2792 info->array.raid_disks -= info->delta_disks;
2793 msg = analyse_change(devname, info, &reshape);
2794 info->new_level = new_level;
2795 if (info->delta_disks > 0)
2796 info->array.raid_disks += info->delta_disks;
2797 if (!restart)
2798 /* Make sure the array isn't read-only */
2799 ioctl(fd, RESTART_ARRAY_RW, 0);
2800 } else
2801 msg = analyse_change(devname, info, &reshape);
2802 if (msg) {
2803 /* if msg == "", error has already been printed */
2804 if (msg[0])
2805 pr_err("%s\n", msg);
2806 goto release;
2807 }
2808 if (restart &&
2809 (reshape.level != info->array.level ||
2810 reshape.before.layout != info->array.layout ||
2811 reshape.before.data_disks + reshape.parity
2812 != info->array.raid_disks - max(0, info->delta_disks))) {
2813 pr_err("reshape info is not in native format -"
2814 " cannot continue.\n");
2815 goto release;
2816 }
2817
2818 if (st->ss->external && restart && (info->reshape_progress == 0)) {
2819 /* When reshape is restarted from '0', very begin of array
2820 * it is possible that for external metadata reshape and array
2821 * configuration doesn't happen.
2822 * Check if md has the same opinion, and reshape is restarted
2823 * from 0. If so, this is regular reshape start after reshape
2824 * switch in metadata to next array only.
2825 */
2826 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2827 (info->reshape_progress == 0))
2828 restart = 0;
2829 }
2830 if (restart) {
2831 /* reshape already started. just skip to monitoring the reshape */
2832 if (reshape.backup_blocks == 0)
2833 return 0;
2834 if (restart & RESHAPE_NO_BACKUP)
2835 return 0;
2836
2837 /* Need 'sra' down at 'started:' */
2838 sra = sysfs_read(fd, NULL,
2839 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2840 GET_CACHE);
2841 if (!sra) {
2842 pr_err("%s: Cannot get array details from sysfs\n",
2843 devname);
2844 goto release;
2845 }
2846 goto started;
2847 }
2848 /* The container is frozen but the array may not be.
2849 * So freeze the array so spares don't get put to the wrong use
2850 * FIXME there should probably be a cleaner separation between
2851 * freeze_array and freeze_container.
2852 */
2853 sysfs_freeze_array(info);
2854 /* Check we have enough spares to not be degraded */
2855 added_disks = 0;
2856 for (dv = devlist; dv ; dv=dv->next)
2857 added_disks++;
2858 spares_needed = max(reshape.before.data_disks,
2859 reshape.after.data_disks)
2860 + reshape.parity - array.raid_disks;
2861
2862 if (!force &&
2863 info->new_level > 1 && info->array.level > 1 &&
2864 spares_needed > info->array.spare_disks + added_disks) {
2865 pr_err("Need %d spare%s to avoid degraded array,"
2866 " and only have %d.\n"
2867 " Use --force to over-ride this check.\n",
2868 spares_needed,
2869 spares_needed == 1 ? "" : "s",
2870 info->array.spare_disks + added_disks);
2871 goto release;
2872 }
2873 /* Check we have enough spares to not fail */
2874 spares_needed = max(reshape.before.data_disks,
2875 reshape.after.data_disks)
2876 - array.raid_disks;
2877 if ((info->new_level > 1 || info->new_level == 0) &&
2878 spares_needed > info->array.spare_disks +added_disks) {
2879 pr_err("Need %d spare%s to create working array,"
2880 " and only have %d.\n",
2881 spares_needed,
2882 spares_needed == 1 ? "" : "s",
2883 info->array.spare_disks + added_disks);
2884 goto release;
2885 }
2886
2887 if (reshape.level != array.level) {
2888 int err = impose_level(fd, reshape.level, devname, verbose);
2889 if (err)
2890 goto release;
2891 info->new_layout = UnSet; /* after level change,
2892 * layout is meaningless */
2893 orig_level = array.level;
2894 sysfs_freeze_array(info);
2895
2896 if (reshape.level > 0 && st->ss->external) {
2897 /* make sure mdmon is aware of the new level */
2898 if (mdmon_running(container))
2899 flush_mdmon(container);
2900
2901 if (!mdmon_running(container))
2902 start_mdmon(container);
2903 ping_monitor(container);
2904 if (mdmon_running(container) &&
2905 st->update_tail == NULL)
2906 st->update_tail = &st->updates;
2907 }
2908 }
2909 /* ->reshape_super might have chosen some spares from the
2910 * container that it wants to be part of the new array.
2911 * We can collect them with ->container_content and give
2912 * them to the kernel.
2913 */
2914 if (st->ss->reshape_super && st->ss->container_content) {
2915 char *subarray = strchr(info->text_version+1, '/')+1;
2916 struct mdinfo *info2 =
2917 st->ss->container_content(st, subarray);
2918 struct mdinfo *d;
2919
2920 if (info2) {
2921 sysfs_init(info2, fd, st->devnm);
2922 /* When increasing number of devices, we need to set
2923 * new raid_disks before adding these, or they might
2924 * be rejected.
2925 */
2926 if (reshape.backup_blocks &&
2927 reshape.after.data_disks > reshape.before.data_disks)
2928 subarray_set_num(container, info2, "raid_disks",
2929 reshape.after.data_disks +
2930 reshape.parity);
2931 for (d = info2->devs; d; d = d->next) {
2932 if (d->disk.state == 0 &&
2933 d->disk.raid_disk >= 0) {
2934 /* This is a spare that wants to
2935 * be part of the array.
2936 */
2937 add_disk(fd, st, info2, d);
2938 }
2939 }
2940 sysfs_free(info2);
2941 }
2942 }
2943 /* We might have been given some devices to add to the
2944 * array. Now that the array has been changed to the right
2945 * level and frozen, we can safely add them.
2946 */
2947 if (devlist)
2948 Manage_subdevs(devname, fd, devlist, verbose,
2949 0,NULL, 0);
2950
2951 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
2952 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
2953 if (reshape.backup_blocks == 0) {
2954 /* No restriping needed, but we might need to impose
2955 * some more changes: layout, raid_disks, chunk_size
2956 */
2957 /* read current array info */
2958 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2959 dprintf("Cannot get array information.\n");
2960 goto release;
2961 }
2962 /* compare current array info with new values and if
2963 * it is different update them to new */
2964 if (info->new_layout != UnSet &&
2965 info->new_layout != array.layout) {
2966 array.layout = info->new_layout;
2967 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2968 pr_err("failed to set new layout\n");
2969 goto release;
2970 } else if (verbose >= 0)
2971 printf("layout for %s set to %d\n",
2972 devname, array.layout);
2973 }
2974 if (info->delta_disks != UnSet &&
2975 info->delta_disks != 0 &&
2976 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
2977 array.raid_disks += info->delta_disks;
2978 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2979 pr_err("failed to set raid disks\n");
2980 goto release;
2981 } else if (verbose >= 0) {
2982 printf("raid_disks for %s set to %d\n",
2983 devname, array.raid_disks);
2984 }
2985 }
2986 if (info->new_chunk != 0 &&
2987 info->new_chunk != array.chunk_size) {
2988 if (sysfs_set_num(info, NULL,
2989 "chunk_size", info->new_chunk) != 0) {
2990 pr_err("failed to set chunk size\n");
2991 goto release;
2992 } else if (verbose >= 0)
2993 printf("chunk size for %s set to %d\n",
2994 devname, array.chunk_size);
2995 }
2996 unfreeze(st);
2997 return 0;
2998 }
2999
3000 /*
3001 * There are three possibilities.
3002 * 1/ The array will shrink.
3003 * We need to ensure the reshape will pause before reaching
3004 * the 'critical section'. We also need to fork and wait for
3005 * that to happen. When it does we
3006 * suspend/backup/complete/unfreeze
3007 *
3008 * 2/ The array will not change size.
3009 * This requires that we keep a backup of a sliding window
3010 * so that we can restore data after a crash. So we need
3011 * to fork and monitor progress.
3012 * In future we will allow the data_offset to change, so
3013 * a sliding backup becomes unnecessary.
3014 *
3015 * 3/ The array will grow. This is relatively easy.
3016 * However the kernel's restripe routines will cheerfully
3017 * overwrite some early data before it is safe. So we
3018 * need to make a backup of the early parts of the array
3019 * and be ready to restore it if rebuild aborts very early.
3020 * For externally managed metadata, we still need a forked
3021 * child to monitor the reshape and suspend IO over the region
3022 * that is being reshaped.
3023 *
3024 * We backup data by writing it to one spare, or to a
3025 * file which was given on command line.
3026 *
3027 * In each case, we first make sure that storage is available
3028 * for the required backup.
3029 * Then we:
3030 * - request the shape change.
3031 * - fork to handle backup etc.
3032 */
3033 /* Check that we can hold all the data */
3034 get_dev_size(fd, NULL, &array_size);
3035 if (reshape.new_size < (array_size/512)) {
3036 pr_err("this change will reduce the size of the array.\n"
3037 " use --grow --array-size first to truncate array.\n"
3038 " e.g. mdadm --grow %s --array-size %llu\n",
3039 devname, reshape.new_size/2);
3040 goto release;
3041 }
3042
3043 if (array.level == 10) {
3044 /* Reshaping RAID10 does not require any data backup by
3045 * user-space. Instead it requires that the data_offset
3046 * is changed to avoid the need for backup.
3047 * So this is handled very separately
3048 */
3049 if (restart)
3050 /* Nothing to do. */
3051 return 0;
3052 return raid10_reshape(container, fd, devname, st, info,
3053 &reshape, data_offset,
3054 force, verbose);
3055 }
3056 sra = sysfs_read(fd, NULL,
3057 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3058 GET_CACHE);
3059 if (!sra) {
3060 pr_err("%s: Cannot get array details from sysfs\n",
3061 devname);
3062 goto release;
3063 }
3064
3065 if (!backup_file)
3066 switch(set_new_data_offset(sra, st, devname,
3067 reshape.after.data_disks - reshape.before.data_disks,
3068 data_offset,
3069 reshape.min_offset_change, 1)) {
3070 case -1:
3071 goto release;
3072 case 0:
3073 /* Updated data_offset, so it's easy now */
3074 update_cache_size(container, sra, info,
3075 min(reshape.before.data_disks,
3076 reshape.after.data_disks),
3077 reshape.backup_blocks);
3078
3079 /* Right, everything seems fine. Let's kick things off.
3080 */
3081 sync_metadata(st);
3082
3083 if (impose_reshape(sra, info, st, fd, restart,
3084 devname, container, &reshape) < 0)
3085 goto release;
3086 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3087 pr_err("Failed to initiate reshape!\n");
3088 goto release;
3089 }
3090 if (info->new_level == reshape.level)
3091 return 0;
3092 /* need to adjust level when reshape completes */
3093 switch(fork()) {
3094 case -1: /* ignore error, but don't wait */
3095 return 0;
3096 default: /* parent */
3097 return 0;
3098 case 0:
3099 map_fork();
3100 break;
3101 }
3102 close(fd);
3103 wait_reshape(sra);
3104 fd = open_dev(sra->sys_name);
3105 if (fd >= 0)
3106 impose_level(fd, info->new_level, devname, verbose);
3107 return 0;
3108 case 1: /* Couldn't set data_offset, try the old way */
3109 if (data_offset != INVALID_SECTORS) {
3110 pr_err("Cannot update data_offset on this array\n");
3111 goto release;
3112 }
3113 break;
3114 }
3115
3116 started:
3117 /* Decide how many blocks (sectors) for a reshape
3118 * unit. The number we have so far is just a minimum
3119 */
3120 blocks = reshape.backup_blocks;
3121 if (reshape.before.data_disks ==
3122 reshape.after.data_disks) {
3123 /* Make 'blocks' bigger for better throughput, but
3124 * not so big that we reject it below.
3125 * Try for 16 megabytes
3126 */
3127 while (blocks * 32 < sra->component_size &&
3128 blocks < 16*1024*2)
3129 blocks *= 2;
3130 } else
3131 pr_err("Need to backup %luK of critical "
3132 "section..\n", blocks/2);
3133
3134 if (blocks >= sra->component_size/2) {
3135 pr_err("%s: Something wrong"
3136 " - reshape aborted\n",
3137 devname);
3138 goto release;
3139 }
3140
3141 /* Now we need to open all these devices so we can read/write.
3142 */
3143 nrdisks = max(reshape.before.data_disks,
3144 reshape.after.data_disks) + reshape.parity
3145 + sra->array.spare_disks;
3146 fdlist = xcalloc((1+nrdisks), sizeof(int));
3147 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3148
3149 odisks = reshape.before.data_disks + reshape.parity;
3150 d = reshape_prepare_fdlist(devname, sra, odisks,
3151 nrdisks, blocks, backup_file,
3152 fdlist, offsets);
3153 if (d < 0) {
3154 goto release;
3155 }
3156 if ((st->ss->manage_reshape == NULL) ||
3157 (st->ss->recover_backup == NULL)) {
3158 if (backup_file == NULL) {
3159 if (reshape.after.data_disks <=
3160 reshape.before.data_disks) {
3161 pr_err("%s: Cannot grow - need backup-file\n",
3162 devname);
3163 pr_err(" Please provide one with \"--backup=...\"\n");
3164 goto release;
3165 } else if (sra->array.spare_disks == 0) {
3166 pr_err("%s: Cannot grow - "
3167 "need a spare or backup-file to backup "
3168 "critical section\n", devname);
3169 goto release;
3170 }
3171 } else {
3172 if (!reshape_open_backup_file(backup_file, fd, devname,
3173 (signed)blocks,
3174 fdlist+d, offsets+d,
3175 restart)) {
3176 goto release;
3177 }
3178 d++;
3179 }
3180 }
3181
3182 update_cache_size(container, sra, info,
3183 min(reshape.before.data_disks, reshape.after.data_disks),
3184 blocks);
3185
3186 /* Right, everything seems fine. Let's kick things off.
3187 * If only changing raid_disks, use ioctl, else use
3188 * sysfs.
3189 */
3190 sync_metadata(st);
3191
3192 if (impose_reshape(sra, info, st, fd, restart,
3193 devname, container, &reshape) < 0)
3194 goto release;
3195
3196 err = start_reshape(sra, restart, reshape.before.data_disks,
3197 reshape.after.data_disks);
3198 if (err) {
3199 pr_err("Cannot %s reshape for %s\n",
3200 restart ? "continue" : "start",
3201 devname);
3202 goto release;
3203 }
3204 if (restart)
3205 sysfs_set_str(sra, NULL, "array_state", "active");
3206 if (freeze_reshape) {
3207 free(fdlist);
3208 free(offsets);
3209 sysfs_free(sra);
3210 pr_err("Reshape has to be continued from"
3211 " location %llu when root filesystem has been mounted.\n",
3212 sra->reshape_progress);
3213 return 1;
3214 }
3215
3216 /* Now we just need to kick off the reshape and watch, while
3217 * handling backups of the data...
3218 * This is all done by a forked background process.
3219 */
3220 switch(forked ? 0 : fork()) {
3221 case -1:
3222 pr_err("Cannot run child to monitor reshape: %s\n",
3223 strerror(errno));
3224 abort_reshape(sra);
3225 goto release;
3226 default:
3227 free(fdlist);
3228 free(offsets);
3229 sysfs_free(sra);
3230 return 0;
3231 case 0:
3232 map_fork();
3233 break;
3234 }
3235
3236 /* If another array on the same devices is busy, the
3237 * reshape will wait for them. This would mean that
3238 * the first section that we suspend will stay suspended
3239 * for a long time. So check on that possibility
3240 * by looking for "DELAYED" in /proc/mdstat, and if found,
3241 * wait a while
3242 */
3243 do {
3244 struct mdstat_ent *mds, *m;
3245 delayed = 0;
3246 mds = mdstat_read(1, 0);
3247 for (m = mds; m; m = m->next)
3248 if (strcmp(m->devnm, sra->sys_name) == 0) {
3249 if (m->resync &&
3250 m->percent == RESYNC_DELAYED)
3251 delayed = 1;
3252 if (m->resync == 0)
3253 /* Haven't started the reshape thread
3254 * yet, wait a bit
3255 */
3256 delayed = 2;
3257 break;
3258 }
3259 free_mdstat(mds);
3260 if (delayed == 1 && get_linux_version() < 3007000) {
3261 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3262 " You might experience problems until other reshapes complete.\n");
3263 delayed = 0;
3264 }
3265 if (delayed)
3266 mdstat_wait(30 - (delayed-1) * 25);
3267 } while (delayed);
3268 mdstat_close();
3269 close(fd);
3270 if (check_env("MDADM_GROW_VERIFY"))
3271 fd = open(devname, O_RDONLY | O_DIRECT);
3272 else
3273 fd = -1;
3274 mlockall(MCL_FUTURE);
3275
3276 signal(SIGTERM, catch_term);
3277
3278 if (st->ss->external) {
3279 /* metadata handler takes it from here */
3280 done = st->ss->manage_reshape(
3281 fd, sra, &reshape, st, blocks,
3282 fdlist, offsets,
3283 d - odisks, fdlist+odisks,
3284 offsets+odisks);
3285 } else
3286 done = child_monitor(
3287 fd, sra, &reshape, st, blocks,
3288 fdlist, offsets,
3289 d - odisks, fdlist+odisks,
3290 offsets+odisks);
3291
3292 free(fdlist);
3293 free(offsets);
3294
3295 if (backup_file && done)
3296 unlink(backup_file);
3297 if (!done) {
3298 abort_reshape(sra);
3299 goto out;
3300 }
3301
3302 if (!st->ss->external &&
3303 !(reshape.before.data_disks != reshape.after.data_disks
3304 && info->custom_array_size) &&
3305 info->new_level == reshape.level &&
3306 !forked) {
3307 /* no need to wait for the reshape to finish as
3308 * there is nothing more to do.
3309 */
3310 sysfs_free(sra);
3311 exit(0);
3312 }
3313 wait_reshape(sra);
3314
3315 if (st->ss->external) {
3316 /* Re-load the metadata as much could have changed */
3317 int cfd = open_dev(st->container_devnm);
3318 if (cfd >= 0) {
3319 flush_mdmon(container);
3320 st->ss->free_super(st);
3321 st->ss->load_container(st, cfd, container);
3322 close(cfd);
3323 }
3324 }
3325
3326 /* set new array size if required customer_array_size is used
3327 * by this metadata.
3328 */
3329 if (reshape.before.data_disks !=
3330 reshape.after.data_disks &&
3331 info->custom_array_size)
3332 set_array_size(st, info, info->text_version);
3333
3334 if (info->new_level != reshape.level) {
3335 if (fd < 0)
3336 fd = open(devname, O_RDONLY);
3337 impose_level(fd, info->new_level, devname, verbose);
3338 close(fd);
3339 if (info->new_level == 0)
3340 st->update_tail = NULL;
3341 }
3342 out:
3343 sysfs_free(sra);
3344 if (forked)
3345 return 0;
3346 unfreeze(st);
3347 exit(0);
3348
3349 release:
3350 free(fdlist);
3351 free(offsets);
3352 if (orig_level != UnSet && sra) {
3353 c = map_num(pers, orig_level);
3354 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3355 pr_err("aborting level change\n");
3356 }
3357 sysfs_free(sra);
3358 if (!forked)
3359 unfreeze(st);
3360 return 1;
3361 }
3362
3363 /* mdfd handle is passed to be closed in child process (after fork).
3364 */
3365 int reshape_container(char *container, char *devname,
3366 int mdfd,
3367 struct supertype *st,
3368 struct mdinfo *info,
3369 int force,
3370 char *backup_file,
3371 int verbose, int restart, int freeze_reshape)
3372 {
3373 struct mdinfo *cc = NULL;
3374 int rv = restart;
3375 char last_devnm[32] = "";
3376
3377 /* component_size is not meaningful for a container,
3378 * so pass '0' meaning 'no change'
3379 */
3380 if (!restart &&
3381 reshape_super(st, 0, info->new_level,
3382 info->new_layout, info->new_chunk,
3383 info->array.raid_disks, info->delta_disks,
3384 backup_file, devname, APPLY_METADATA_CHANGES,
3385 verbose)) {
3386 unfreeze(st);
3387 return 1;
3388 }
3389
3390 sync_metadata(st);
3391
3392 /* ping monitor to be sure that update is on disk
3393 */
3394 ping_monitor(container);
3395
3396 switch (fork()) {
3397 case -1: /* error */
3398 perror("Cannot fork to complete reshape\n");
3399 unfreeze(st);
3400 return 1;
3401 default: /* parent */
3402 if (!freeze_reshape)
3403 printf(Name ": multi-array reshape continues"
3404 " in background\n");
3405 return 0;
3406 case 0: /* child */
3407 map_fork();
3408 break;
3409 }
3410
3411 /* close unused handle in child process
3412 */
3413 if (mdfd > -1)
3414 close(mdfd);
3415
3416 while(1) {
3417 /* For each member array with reshape_active,
3418 * we need to perform the reshape.
3419 * We pick the first array that needs reshaping and
3420 * reshape it. reshape_array() will re-read the metadata
3421 * so the next time through a different array should be
3422 * ready for reshape.
3423 * It is possible that the 'different' array will not
3424 * be assembled yet. In that case we simple exit.
3425 * When it is assembled, the mdadm which assembles it
3426 * will take over the reshape.
3427 */
3428 struct mdinfo *content;
3429 int fd;
3430 struct mdstat_ent *mdstat;
3431 char *adev;
3432 int devid;
3433
3434 sysfs_free(cc);
3435
3436 cc = st->ss->container_content(st, NULL);
3437
3438 for (content = cc; content ; content = content->next) {
3439 char *subarray;
3440 if (!content->reshape_active)
3441 continue;
3442
3443 subarray = strchr(content->text_version+1, '/')+1;
3444 mdstat = mdstat_by_subdev(subarray, container);
3445 if (!mdstat)
3446 continue;
3447 if (mdstat->active == 0) {
3448 pr_err("Skipping inactive array %s.\n",
3449 mdstat->devnm);
3450 free_mdstat(mdstat);
3451 mdstat = NULL;
3452 continue;
3453 }
3454 break;
3455 }
3456 if (!content)
3457 break;
3458
3459 devid = devnm2devid(mdstat->devnm);
3460 adev = map_dev(major(devid), minor(devid), 0);
3461 if (!adev)
3462 adev = content->text_version;
3463
3464 fd = open_dev(mdstat->devnm);
3465 if (fd < 0) {
3466 printf(Name ": Device %s cannot be opened for reshape.",
3467 adev);
3468 break;
3469 }
3470
3471 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3472 /* Do not allow for multiple reshape_array() calls for
3473 * the same array.
3474 * It can happen when reshape_array() returns without
3475 * error, when reshape is not finished (wrong reshape
3476 * starting/continuation conditions). Mdmon doesn't
3477 * switch to next array in container and reentry
3478 * conditions for the same array occur.
3479 * This is possibly interim until the behaviour of
3480 * reshape_array is resolved().
3481 */
3482 printf(Name ": Multiple reshape execution detected for "
3483 "device %s.", adev);
3484 close(fd);
3485 break;
3486 }
3487 strcpy(last_devnm, mdstat->devnm);
3488
3489 sysfs_init(content, fd, mdstat->devnm);
3490
3491 if (mdmon_running(container))
3492 flush_mdmon(container);
3493
3494 rv = reshape_array(container, fd, adev, st,
3495 content, force, NULL, INVALID_SECTORS,
3496 backup_file, verbose, 1, restart,
3497 freeze_reshape);
3498 close(fd);
3499
3500 if (freeze_reshape) {
3501 sysfs_free(cc);
3502 exit(0);
3503 }
3504
3505 restart = 0;
3506 if (rv)
3507 break;
3508
3509 if (mdmon_running(container))
3510 flush_mdmon(container);
3511 }
3512 if (!rv)
3513 unfreeze(st);
3514 sysfs_free(cc);
3515 exit(0);
3516 }
3517
3518 /*
3519 * We run a child process in the background which performs the following
3520 * steps:
3521 * - wait for resync to reach a certain point
3522 * - suspend io to the following section
3523 * - backup that section
3524 * - allow resync to proceed further
3525 * - resume io
3526 * - discard the backup.
3527 *
3528 * When are combined in slightly different ways in the three cases.
3529 * Grow:
3530 * - suspend/backup/allow/wait/resume/discard
3531 * Shrink:
3532 * - allow/wait/suspend/backup/allow/wait/resume/discard
3533 * same-size:
3534 * - wait/resume/discard/suspend/backup/allow
3535 *
3536 * suspend/backup/allow always come together
3537 * wait/resume/discard do too.
3538 * For the same-size case we have two backups to improve flow.
3539 *
3540 */
3541
3542 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3543 unsigned long long backup_point,
3544 unsigned long long wait_point,
3545 unsigned long long *suspend_point,
3546 unsigned long long *reshape_completed, int *frozen)
3547 {
3548 /* This function is called repeatedly by the reshape manager.
3549 * It determines how much progress can safely be made and allows
3550 * that progress.
3551 * - 'info' identifies the array and particularly records in
3552 * ->reshape_progress the metadata's knowledge of progress
3553 * This is a sector offset from the start of the array
3554 * of the next array block to be relocated. This number
3555 * may increase from 0 or decrease from array_size, depending
3556 * on the type of reshape that is happening.
3557 * Note that in contrast, 'sync_completed' is a block count of the
3558 * reshape so far. It gives the distance between the start point
3559 * (head or tail of device) and the next place that data will be
3560 * written. It always increases.
3561 * - 'reshape' is the structure created by analyse_change
3562 * - 'backup_point' shows how much the metadata manager has backed-up
3563 * data. For reshapes with increasing progress, it is the next address
3564 * to be backed up, previous addresses have been backed-up. For
3565 * decreasing progress, it is the earliest address that has been
3566 * backed up - later address are also backed up.
3567 * So addresses between reshape_progress and backup_point are
3568 * backed up providing those are in the 'correct' order.
3569 * - 'wait_point' is an array address. When reshape_completed
3570 * passes this point, progress_reshape should return. It might
3571 * return earlier if it determines that ->reshape_progress needs
3572 * to be updated or further backup is needed.
3573 * - suspend_point is maintained by progress_reshape and the caller
3574 * should not touch it except to initialise to zero.
3575 * It is an array address and it only increases in 2.6.37 and earlier.
3576 * This makes it difficult to handle reducing reshapes with
3577 * external metadata.
3578 * However: it is similar to backup_point in that it records the
3579 * other end of a suspended region from reshape_progress.
3580 * it is moved to extend the region that is safe to backup and/or
3581 * reshape
3582 * - reshape_completed is read from sysfs and returned. The caller
3583 * should copy this into ->reshape_progress when it has reason to
3584 * believe that the metadata knows this, and any backup outside this
3585 * has been erased.
3586 *
3587 * Return value is:
3588 * 1 if more data from backup_point - but only as far as suspend_point,
3589 * should be backed up
3590 * 0 if things are progressing smoothly
3591 * -1 if the reshape is finished because it is all done,
3592 * -2 if the reshape is finished due to an error.
3593 */
3594
3595 int advancing = (reshape->after.data_disks
3596 >= reshape->before.data_disks);
3597 unsigned long long need_backup; /* All data between start of array and
3598 * here will at some point need to
3599 * be backed up.
3600 */
3601 unsigned long long read_offset, write_offset;
3602 unsigned long long write_range;
3603 unsigned long long max_progress, target, completed;
3604 unsigned long long array_size = (info->component_size
3605 * reshape->before.data_disks);
3606 int fd;
3607 char buf[20];
3608
3609 /* First, we unsuspend any region that is now known to be safe.
3610 * If suspend_point is on the 'wrong' side of reshape_progress, then
3611 * we don't have or need suspension at the moment. This is true for
3612 * native metadata when we don't need to back-up.
3613 */
3614 if (advancing) {
3615 if (info->reshape_progress <= *suspend_point)
3616 sysfs_set_num(info, NULL, "suspend_lo",
3617 info->reshape_progress);
3618 } else {
3619 /* Note: this won't work in 2.6.37 and before.
3620 * Something somewhere should make sure we don't need it!
3621 */
3622 if (info->reshape_progress >= *suspend_point)
3623 sysfs_set_num(info, NULL, "suspend_hi",
3624 info->reshape_progress);
3625 }
3626
3627 /* Now work out how far it is safe to progress.
3628 * If the read_offset for ->reshape_progress is less than
3629 * 'blocks' beyond the write_offset, we can only progress as far
3630 * as a backup.
3631 * Otherwise we can progress until the write_offset for the new location
3632 * reaches (within 'blocks' of) the read_offset at the current location.
3633 * However that region must be suspended unless we are using native
3634 * metadata.
3635 * If we need to suspend more, we limit it to 128M per device, which is
3636 * rather arbitrary and should be some time-based calculation.
3637 */
3638 read_offset = info->reshape_progress / reshape->before.data_disks;
3639 write_offset = info->reshape_progress / reshape->after.data_disks;
3640 write_range = info->new_chunk/512;
3641 if (reshape->before.data_disks == reshape->after.data_disks)
3642 need_backup = array_size;
3643 else
3644 need_backup = reshape->backup_blocks;
3645 if (advancing) {
3646 if (read_offset < write_offset + write_range)
3647 max_progress = backup_point;
3648 else
3649 max_progress =
3650 read_offset *
3651 reshape->after.data_disks;
3652 } else {
3653 if (read_offset > write_offset - write_range)
3654 /* Can only progress as far as has been backed up,
3655 * which must be suspended */
3656 max_progress = backup_point;
3657 else if (info->reshape_progress <= need_backup)
3658 max_progress = backup_point;
3659 else {
3660 if (info->array.major_version >= 0)
3661 /* Can progress until backup is needed */
3662 max_progress = need_backup;
3663 else {
3664 /* Can progress until metadata update is required */
3665 max_progress =
3666 read_offset *
3667 reshape->after.data_disks;
3668 /* but data must be suspended */
3669 if (max_progress < *suspend_point)
3670 max_progress = *suspend_point;
3671 }
3672 }
3673 }
3674
3675 /* We know it is safe to progress to 'max_progress' providing
3676 * it is suspended or we are using native metadata.
3677 * Consider extending suspend_point 128M per device if it
3678 * is less than 64M per device beyond reshape_progress.
3679 * But always do a multiple of 'blocks'
3680 * FIXME this is too big - it takes to long to complete
3681 * this much.
3682 */
3683 target = 64*1024*2 * min(reshape->before.data_disks,
3684 reshape->after.data_disks);
3685 target /= reshape->backup_blocks;
3686 if (target < 2)
3687 target = 2;
3688 target *= reshape->backup_blocks;
3689
3690 /* For externally managed metadata we always need to suspend IO to
3691 * the area being reshaped so we regularly push suspend_point forward.
3692 * For native metadata we only need the suspend if we are going to do
3693 * a backup.
3694 */
3695 if (advancing) {
3696 if ((need_backup > info->reshape_progress
3697 || info->array.major_version < 0) &&
3698 *suspend_point < info->reshape_progress + target) {
3699 if (need_backup < *suspend_point + 2 * target)
3700 *suspend_point = need_backup;
3701 else if (*suspend_point + 2 * target < array_size)
3702 *suspend_point += 2 * target;
3703 else
3704 *suspend_point = array_size;
3705 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3706 if (max_progress > *suspend_point)
3707 max_progress = *suspend_point;
3708 }
3709 } else {
3710 if (info->array.major_version >= 0) {
3711 /* Only need to suspend when about to backup */
3712 if (info->reshape_progress < need_backup * 2 &&
3713 *suspend_point > 0) {
3714 *suspend_point = 0;
3715 sysfs_set_num(info, NULL, "suspend_lo", 0);
3716 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3717 }
3718 } else {
3719 /* Need to suspend continually */
3720 if (info->reshape_progress < *suspend_point)
3721 *suspend_point = info->reshape_progress;
3722 if (*suspend_point + target < info->reshape_progress)
3723 /* No need to move suspend region yet */;
3724 else {
3725 if (*suspend_point >= 2 * target)
3726 *suspend_point -= 2 * target;
3727 else
3728 *suspend_point = 0;
3729 sysfs_set_num(info, NULL, "suspend_lo",
3730 *suspend_point);
3731 }
3732 if (max_progress < *suspend_point)
3733 max_progress = *suspend_point;
3734 }
3735 }
3736
3737 /* now set sync_max to allow that progress. sync_max, like
3738 * sync_completed is a count of sectors written per device, so
3739 * we find the difference between max_progress and the start point,
3740 * and divide that by after.data_disks to get a sync_max
3741 * number.
3742 * At the same time we convert wait_point to a similar number
3743 * for comparing against sync_completed.
3744 */
3745 /* scale down max_progress to per_disk */
3746 max_progress /= reshape->after.data_disks;
3747 /* Round to chunk size as some kernels give an erroneously high number */
3748 max_progress /= info->new_chunk/512;
3749 max_progress *= info->new_chunk/512;
3750 /* And round to old chunk size as the kernel wants that */
3751 max_progress /= info->array.chunk_size/512;
3752 max_progress *= info->array.chunk_size/512;
3753 /* Limit progress to the whole device */
3754 if (max_progress > info->component_size)
3755 max_progress = info->component_size;
3756 wait_point /= reshape->after.data_disks;
3757 if (!advancing) {
3758 /* switch from 'device offset' to 'processed block count' */
3759 max_progress = info->component_size - max_progress;
3760 wait_point = info->component_size - wait_point;
3761 }
3762
3763 if (!*frozen)
3764 sysfs_set_num(info, NULL, "sync_max", max_progress);
3765
3766 /* Now wait. If we have already reached the point that we were
3767 * asked to wait to, don't wait at all, else wait for any change.
3768 * We need to select on 'sync_completed' as that is the place that
3769 * notifications happen, but we are really interested in
3770 * 'reshape_position'
3771 */
3772 fd = sysfs_get_fd(info, NULL, "sync_completed");
3773 if (fd < 0)
3774 goto check_progress;
3775
3776 if (sysfs_fd_get_ll(fd, &completed) < 0)
3777 goto check_progress;
3778
3779 while (completed < max_progress && completed < wait_point) {
3780 /* Check that sync_action is still 'reshape' to avoid
3781 * waiting forever on a dead array
3782 */
3783 char action[20];
3784 if (sysfs_get_str(info, NULL, "sync_action",
3785 action, 20) <= 0 ||
3786 strncmp(action, "reshape", 7) != 0)
3787 break;
3788 /* Some kernels reset 'sync_completed' to zero
3789 * before setting 'sync_action' to 'idle'.
3790 * So we need these extra tests.
3791 */
3792 if (completed == 0 && advancing
3793 && info->reshape_progress > 0)
3794 break;
3795 if (completed == 0 && !advancing
3796 && info->reshape_progress < (info->component_size
3797 * reshape->after.data_disks))
3798 break;
3799 sysfs_wait(fd, NULL);
3800 if (sysfs_fd_get_ll(fd, &completed) < 0)
3801 goto check_progress;
3802 }
3803 /* Some kernels reset 'sync_completed' to zero,
3804 * we need to have real point we are in md
3805 */
3806 if (completed == 0)
3807 completed = max_progress;
3808
3809 /* some kernels can give an incorrectly high 'completed' number */
3810 completed /= (info->new_chunk/512);
3811 completed *= (info->new_chunk/512);
3812 /* Convert 'completed' back in to a 'progress' number */
3813 completed *= reshape->after.data_disks;
3814 if (!advancing) {
3815 completed = info->component_size * reshape->after.data_disks
3816 - completed;
3817 }
3818 *reshape_completed = completed;
3819
3820 close(fd);
3821
3822 /* We return the need_backup flag. Caller will decide
3823 * how much - a multiple of ->backup_blocks up to *suspend_point
3824 */
3825 if (advancing)
3826 return need_backup > info->reshape_progress;
3827 else
3828 return need_backup >= info->reshape_progress;
3829
3830 check_progress:
3831 /* if we couldn't read a number from sync_completed, then
3832 * either the reshape did complete, or it aborted.
3833 * We can tell which by checking for 'none' in reshape_position.
3834 * If it did abort, then it might immediately restart if it
3835 * it was just a device failure that leaves us degraded but
3836 * functioning.
3837 */
3838 strcpy(buf, "hi");
3839 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3840 || strncmp(buf, "none", 4) != 0) {
3841 /* The abort might only be temporary. Wait up to 10
3842 * seconds for fd to contain a valid number again.
3843 */
3844 int wait = 10000;
3845 int rv = -2;
3846 unsigned long long new_sync_max;
3847 while (fd >= 0 && rv < 0 && wait > 0) {
3848 if (sysfs_wait(fd, &wait) != 1)
3849 break;
3850 switch (sysfs_fd_get_ll(fd, &completed)) {
3851 case 0:
3852 /* all good again */
3853 rv = 1;
3854 /* If "sync_max" is no longer max_progress
3855 * we need to freeze things
3856 */
3857 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3858 *frozen = (new_sync_max != max_progress);
3859 break;
3860 case -2: /* read error - abort */
3861 wait = 0;
3862 break;
3863 }
3864 }
3865 if (fd >= 0)
3866 close(fd);
3867 return rv; /* abort */
3868 } else {
3869 /* Maybe racing with array shutdown - check state */
3870 if (fd >= 0)
3871 close(fd);
3872 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3873 || strncmp(buf, "inactive", 8) == 0
3874 || strncmp(buf, "clear",5) == 0)
3875 return -2; /* abort */
3876 return -1; /* complete */
3877 }
3878 }
3879
3880 /* FIXME return status is never checked */
3881 static int grow_backup(struct mdinfo *sra,
3882 unsigned long long offset, /* per device */
3883 unsigned long stripes, /* per device, in old chunks */
3884 int *sources, unsigned long long *offsets,
3885 int disks, int chunk, int level, int layout,
3886 int dests, int *destfd, unsigned long long *destoffsets,
3887 int part, int *degraded,
3888 char *buf)
3889 {
3890 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3891 * to storage 'destfd' (offset 'destoffsets'), after first
3892 * suspending IO. Then allow resync to continue
3893 * over the suspended section.
3894 * Use part 'part' of the backup-super-block.
3895 */
3896 int odata = disks;
3897 int rv = 0;
3898 int i;
3899 unsigned long long ll;
3900 int new_degraded;
3901 //printf("offset %llu\n", offset);
3902 if (level >= 4)
3903 odata--;
3904 if (level == 6)
3905 odata--;
3906
3907 /* Check that array hasn't become degraded, else we might backup the wrong data */
3908 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
3909 return -1; /* FIXME this error is ignored */
3910 new_degraded = (int)ll;
3911 if (new_degraded != *degraded) {
3912 /* check each device to ensure it is still working */
3913 struct mdinfo *sd;
3914 for (sd = sra->devs ; sd ; sd = sd->next) {
3915 if (sd->disk.state & (1<<MD_DISK_FAULTY))
3916 continue;
3917 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
3918 char sbuf[20];
3919 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
3920 strstr(sbuf, "faulty") ||
3921 strstr(sbuf, "in_sync") == NULL) {
3922 /* this device is dead */
3923 sd->disk.state = (1<<MD_DISK_FAULTY);
3924 if (sd->disk.raid_disk >= 0 &&
3925 sources[sd->disk.raid_disk] >= 0) {
3926 close(sources[sd->disk.raid_disk]);
3927 sources[sd->disk.raid_disk] = -1;
3928 }
3929 }
3930 }
3931 }
3932 *degraded = new_degraded;
3933 }
3934 if (part) {
3935 bsb.arraystart2 = __cpu_to_le64(offset * odata);
3936 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
3937 } else {
3938 bsb.arraystart = __cpu_to_le64(offset * odata);
3939 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
3940 }
3941 if (part)
3942 bsb.magic[15] = '2';
3943 for (i = 0; i < dests; i++)
3944 if (part)
3945 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
3946 else
3947 lseek64(destfd[i], destoffsets[i], 0);
3948
3949 rv = save_stripes(sources, offsets,
3950 disks, chunk, level, layout,
3951 dests, destfd,
3952 offset*512*odata, stripes * chunk * odata,
3953 buf);
3954
3955 if (rv)
3956 return rv;
3957 bsb.mtime = __cpu_to_le64(time(0));
3958 for (i = 0; i < dests; i++) {
3959 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3960
3961 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3962 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3963 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3964 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3965
3966 rv = -1;
3967 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
3968 != destoffsets[i] - 4096)
3969 break;
3970 if (write(destfd[i], &bsb, 512) != 512)
3971 break;
3972 if (destoffsets[i] > 4096) {
3973 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
3974 destoffsets[i]+stripes*chunk*odata)
3975 break;
3976 if (write(destfd[i], &bsb, 512) != 512)
3977 break;
3978 }
3979 fsync(destfd[i]);
3980 rv = 0;
3981 }
3982
3983 return rv;
3984 }
3985
3986 /* in 2.6.30, the value reported by sync_completed can be
3987 * less that it should be by one stripe.
3988 * This only happens when reshape hits sync_max and pauses.
3989 * So allow wait_backup to either extent sync_max further
3990 * than strictly necessary, or return before the
3991 * sync has got quite as far as we would really like.
3992 * This is what 'blocks2' is for.
3993 * The various caller give appropriate values so that
3994 * every works.
3995 */
3996 /* FIXME return value is often ignored */
3997 static int forget_backup(int dests, int *destfd,
3998 unsigned long long *destoffsets,
3999 int part)
4000 {
4001 /*
4002 * Erase backup 'part' (which is 0 or 1)
4003 */
4004 int i;
4005 int rv;
4006
4007 if (part) {
4008 bsb.arraystart2 = __cpu_to_le64(0);
4009 bsb.length2 = __cpu_to_le64(0);
4010 } else {
4011 bsb.arraystart = __cpu_to_le64(0);
4012 bsb.length = __cpu_to_le64(0);
4013 }
4014 bsb.mtime = __cpu_to_le64(time(0));
4015 rv = 0;
4016 for (i = 0; i < dests; i++) {
4017 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4018 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4019 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4020 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4021 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4022 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4023 destoffsets[i]-4096)
4024 rv = -1;
4025 if (rv == 0 &&
4026 write(destfd[i], &bsb, 512) != 512)
4027 rv = -1;
4028 fsync(destfd[i]);
4029 }
4030 return rv;
4031 }
4032
4033 static void fail(char *msg)
4034 {
4035 int rv;
4036 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4037 rv |= (write(2, "\n", 1) != 1);
4038 exit(rv ? 1 : 2);
4039 }
4040
4041 static char *abuf, *bbuf;
4042 static unsigned long long abuflen;
4043 static void validate(int afd, int bfd, unsigned long long offset)
4044 {
4045 /* check that the data in the backup against the array.
4046 * This is only used for regression testing and should not
4047 * be used while the array is active
4048 */
4049 if (afd < 0)
4050 return;
4051 lseek64(bfd, offset - 4096, 0);
4052 if (read(bfd, &bsb2, 512) != 512)
4053 fail("cannot read bsb");
4054 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4055 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4056 fail("first csum bad");
4057 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4058 fail("magic is bad");
4059 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4060 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4061 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4062 fail("second csum bad");
4063
4064 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4065 fail("devstart is wrong");
4066
4067 if (bsb2.length) {
4068 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4069
4070 if (abuflen < len) {
4071 free(abuf);
4072 free(bbuf);
4073 abuflen = len;
4074 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4075 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4076 abuflen = 0;
4077 /* just stop validating on mem-alloc failure */
4078 return;
4079 }
4080 }
4081
4082 lseek64(bfd, offset, 0);
4083 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4084 //printf("len %llu\n", len);
4085 fail("read first backup failed");
4086 }
4087 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4088 if ((unsigned long long)read(afd, abuf, len) != len)
4089 fail("read first from array failed");
4090 if (memcmp(bbuf, abuf, len) != 0) {
4091 #if 0
4092 int i;
4093 printf("offset=%llu len=%llu\n",
4094 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4095 for (i=0; i<len; i++)
4096 if (bbuf[i] != abuf[i]) {
4097 printf("first diff byte %d\n", i);
4098 break;
4099 }
4100 #endif
4101 fail("data1 compare failed");
4102 }
4103 }
4104 if (bsb2.length2) {
4105 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4106
4107 if (abuflen < len) {
4108 free(abuf);
4109 free(bbuf);
4110 abuflen = len;
4111 abuf = xmalloc(abuflen);
4112 bbuf = xmalloc(abuflen);
4113 }
4114
4115 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4116 if ((unsigned long long)read(bfd, bbuf, len) != len)
4117 fail("read second backup failed");
4118 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4119 if ((unsigned long long)read(afd, abuf, len) != len)
4120 fail("read second from array failed");
4121 if (memcmp(bbuf, abuf, len) != 0)
4122 fail("data2 compare failed");
4123 }
4124 }
4125
4126 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4127 struct supertype *st, unsigned long blocks,
4128 int *fds, unsigned long long *offsets,
4129 int dests, int *destfd, unsigned long long *destoffsets)
4130 {
4131 /* Monitor a reshape where backup is being performed using
4132 * 'native' mechanism - either to a backup file, or
4133 * to some space in a spare.
4134 */
4135 char *buf;
4136 int degraded = -1;
4137 unsigned long long speed;
4138 unsigned long long suspend_point, array_size;
4139 unsigned long long backup_point, wait_point;
4140 unsigned long long reshape_completed;
4141 int done = 0;
4142 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4143 int part = 0; /* The next part of the backup area to fill. It may already
4144 * be full, so we need to check */
4145 int level = reshape->level;
4146 int layout = reshape->before.layout;
4147 int data = reshape->before.data_disks;
4148 int disks = reshape->before.data_disks + reshape->parity;
4149 int chunk = sra->array.chunk_size;
4150 struct mdinfo *sd;
4151 unsigned long stripes;
4152 int uuid[4];
4153 int frozen = 0;
4154
4155 /* set up the backup-super-block. This requires the
4156 * uuid from the array.
4157 */
4158 /* Find a superblock */
4159 for (sd = sra->devs; sd; sd = sd->next) {
4160 char *dn;
4161 int devfd;
4162 int ok;
4163 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4164 continue;
4165 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4166 devfd = dev_open(dn, O_RDONLY);
4167 if (devfd < 0)
4168 continue;
4169 ok = st->ss->load_super(st, devfd, NULL);
4170 close(devfd);
4171 if (ok == 0)
4172 break;
4173 }
4174 if (!sd) {
4175 pr_err("Cannot find a superblock\n");
4176 return 0;
4177 }
4178
4179 memset(&bsb, 0, 512);
4180 memcpy(bsb.magic, "md_backup_data-1", 16);
4181 st->ss->uuid_from_super(st, uuid);
4182 memcpy(bsb.set_uuid, uuid, 16);
4183 bsb.mtime = __cpu_to_le64(time(0));
4184 bsb.devstart2 = blocks;
4185
4186 stripes = blocks / (sra->array.chunk_size/512) /
4187 reshape->before.data_disks;
4188
4189 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4190 /* Don't start the 'reshape' */
4191 return 0;
4192 if (reshape->before.data_disks == reshape->after.data_disks) {
4193 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4194 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4195 }
4196
4197 if (increasing) {
4198 array_size = sra->component_size * reshape->after.data_disks;
4199 backup_point = sra->reshape_progress;
4200 suspend_point = 0;
4201 } else {
4202 array_size = sra->component_size * reshape->before.data_disks;
4203 backup_point = reshape->backup_blocks;
4204 suspend_point = array_size;
4205 }
4206
4207 while (!done) {
4208 int rv;
4209
4210 /* Want to return as soon the oldest backup slot can
4211 * be released as that allows us to start backing up
4212 * some more, providing suspend_point has been
4213 * advanced, which it should have.
4214 */
4215 if (increasing) {
4216 wait_point = array_size;
4217 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4218 wait_point = (__le64_to_cpu(bsb.arraystart) +
4219 __le64_to_cpu(bsb.length));
4220 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4221 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4222 __le64_to_cpu(bsb.length2));
4223 } else {
4224 wait_point = 0;
4225 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4226 wait_point = __le64_to_cpu(bsb.arraystart);
4227 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4228 wait_point = __le64_to_cpu(bsb.arraystart2);
4229 }
4230
4231 reshape_completed = sra->reshape_progress;
4232 rv = progress_reshape(sra, reshape,
4233 backup_point, wait_point,
4234 &suspend_point, &reshape_completed,
4235 &frozen);
4236 /* external metadata would need to ping_monitor here */
4237 sra->reshape_progress = reshape_completed;
4238
4239 /* Clear any backup region that is before 'here' */
4240 if (increasing) {
4241 if (__le64_to_cpu(bsb.length) > 0 &&
4242 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4243 __le64_to_cpu(bsb.length)))
4244 forget_backup(dests, destfd,
4245 destoffsets, 0);
4246 if (__le64_to_cpu(bsb.length2) > 0 &&
4247 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4248 __le64_to_cpu(bsb.length2)))
4249 forget_backup(dests, destfd,
4250 destoffsets, 1);
4251 } else {
4252 if (__le64_to_cpu(bsb.length) > 0 &&
4253 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4254 forget_backup(dests, destfd,
4255 destoffsets, 0);
4256 if (__le64_to_cpu(bsb.length2) > 0 &&
4257 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4258 forget_backup(dests, destfd,
4259 destoffsets, 1);
4260 }
4261 if (sigterm)
4262 rv = -2;
4263 if (rv < 0) {
4264 if (rv == -1)
4265 done = 1;
4266 break;
4267 }
4268 if (rv == 0 && increasing && !st->ss->external) {
4269 /* No longer need to monitor this reshape */
4270 sysfs_set_str(sra, NULL, "sync_max", "max");
4271 done = 1;
4272 break;
4273 }
4274
4275 while (rv) {
4276 unsigned long long offset;
4277 unsigned long actual_stripes;
4278 /* Need to backup some data.
4279 * If 'part' is not used and the desired
4280 * backup size is suspended, do a backup,
4281 * then consider the next part.
4282 */
4283 /* Check that 'part' is unused */
4284 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4285 break;
4286 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4287 break;
4288
4289 offset = backup_point / data;
4290 actual_stripes = stripes;
4291 if (increasing) {
4292 if (offset + actual_stripes * (chunk/512) >
4293 sra->component_size)
4294 actual_stripes = ((sra->component_size - offset)
4295 / (chunk/512));
4296 if (offset + actual_stripes * (chunk/512) >
4297 suspend_point/data)
4298 break;
4299 } else {
4300 if (offset < actual_stripes * (chunk/512))
4301 actual_stripes = offset / (chunk/512);
4302 offset -= actual_stripes * (chunk/512);
4303 if (offset < suspend_point/data)
4304 break;
4305 }
4306 if (actual_stripes == 0)
4307 break;
4308 grow_backup(sra, offset, actual_stripes,
4309 fds, offsets,
4310 disks, chunk, level, layout,
4311 dests, destfd, destoffsets,
4312 part, &degraded, buf);
4313 validate(afd, destfd[0], destoffsets[0]);
4314 /* record where 'part' is up to */
4315 part = !part;
4316 if (increasing)
4317 backup_point += actual_stripes * (chunk/512) * data;
4318 else
4319 backup_point -= actual_stripes * (chunk/512) * data;
4320 }
4321 }
4322
4323 /* FIXME maybe call progress_reshape one more time instead */
4324 /* remove any remaining suspension */
4325 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4326 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4327 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4328 sysfs_set_num(sra, NULL, "sync_min", 0);
4329
4330 if (reshape->before.data_disks == reshape->after.data_disks)
4331 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4332 free(buf);
4333 return done;
4334 }
4335
4336 /*
4337 * If any spare contains md_back_data-1 which is recent wrt mtime,
4338 * write that data into the array and update the super blocks with
4339 * the new reshape_progress
4340 */
4341 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4342 char *backup_file, int verbose)
4343 {
4344 int i, j;
4345 int old_disks;
4346 unsigned long long *offsets;
4347 unsigned long long nstripe, ostripe;
4348 int ndata, odata;
4349
4350 odata = info->array.raid_disks - info->delta_disks - 1;
4351 if (info->array.level == 6) odata--; /* number of data disks */
4352 ndata = info->array.raid_disks - 1;
4353 if (info->new_level == 6) ndata--;
4354
4355 old_disks = info->array.raid_disks - info->delta_disks;
4356
4357 if (info->delta_disks <= 0)
4358 /* Didn't grow, so the backup file must have
4359 * been used
4360 */
4361 old_disks = cnt;
4362 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4363 struct mdinfo dinfo;
4364 int fd;
4365 int bsbsize;
4366 char *devname, namebuf[20];
4367 unsigned long long lo, hi;
4368
4369 /* This was a spare and may have some saved data on it.
4370 * Load the superblock, find and load the
4371 * backup_super_block.
4372 * If either fail, go on to next device.
4373 * If the backup contains no new info, just return
4374 * else restore data and update all superblocks
4375 */
4376 if (i == old_disks-1) {
4377 fd = open(backup_file, O_RDONLY);
4378 if (fd<0) {
4379 pr_err("backup file %s inaccessible: %s\n",
4380 backup_file, strerror(errno));
4381 continue;
4382 }
4383 devname = backup_file;
4384 } else {
4385 fd = fdlist[i];
4386 if (fd < 0)
4387 continue;
4388 if (st->ss->load_super(st, fd, NULL))
4389 continue;
4390
4391 st->ss->getinfo_super(st, &dinfo, NULL);
4392 st->ss->free_super(st);
4393
4394 if (lseek64(fd,
4395 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4396 0) < 0) {
4397 pr_err("Cannot seek on device %d\n", i);
4398 continue; /* Cannot seek */
4399 }
4400 sprintf(namebuf, "device-%d", i);
4401 devname = namebuf;
4402 }
4403 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4404 if (verbose)
4405 pr_err("Cannot read from %s\n", devname);
4406 continue; /* Cannot read */
4407 }
4408 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4409 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4410 if (verbose)
4411 pr_err("No backup metadata on %s\n", devname);
4412 continue;
4413 }
4414 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4415 if (verbose)
4416 pr_err("Bad backup-metadata checksum on %s\n", devname);
4417 continue; /* bad checksum */
4418 }
4419 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4420 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4421 if (verbose)
4422 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4423 continue; /* Bad second checksum */
4424 }
4425 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4426 if (verbose)
4427 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4428 continue; /* Wrong uuid */
4429 }
4430
4431 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4432 * sometimes they aren't... So allow considerable flexability in matching, and allow
4433 * this test to be overridden by an environment variable.
4434 */
4435 if (info->array.utime > (int)__le64_to_cpu(bsb.mtime) + 2*60*60 ||
4436 info->array.utime < (int)__le64_to_cpu(bsb.mtime) - 10*60) {
4437 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4438 pr_err("accepting backup with timestamp %lu "
4439 "for array with timestamp %lu\n",
4440 (unsigned long)__le64_to_cpu(bsb.mtime),
4441 (unsigned long)info->array.utime);
4442 } else {
4443 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4444 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4445 continue; /* time stamp is too bad */
4446 }
4447 }
4448
4449 if (bsb.magic[15] == '1') {
4450 if (bsb.length == 0)
4451 continue;
4452 if (info->delta_disks >= 0) {
4453 /* reshape_progress is increasing */
4454 if (__le64_to_cpu(bsb.arraystart)
4455 + __le64_to_cpu(bsb.length)
4456 < info->reshape_progress) {
4457 nonew:
4458 if (verbose)
4459 pr_err("backup-metadata found on %s but is not needed\n", devname);
4460 continue; /* No new data here */
4461 }
4462 } else {
4463 /* reshape_progress is decreasing */
4464 if (__le64_to_cpu(bsb.arraystart) >=
4465 info->reshape_progress)
4466 goto nonew; /* No new data here */
4467 }
4468 } else {
4469 if (bsb.length == 0 && bsb.length2 == 0)
4470 continue;
4471 if (info->delta_disks >= 0) {
4472 /* reshape_progress is increasing */
4473 if ((__le64_to_cpu(bsb.arraystart)
4474 + __le64_to_cpu(bsb.length)
4475 < info->reshape_progress)
4476 &&
4477 (__le64_to_cpu(bsb.arraystart2)
4478 + __le64_to_cpu(bsb.length2)
4479 < info->reshape_progress))
4480 goto nonew; /* No new data here */
4481 } else {
4482 /* reshape_progress is decreasing */
4483 if (__le64_to_cpu(bsb.arraystart) >=
4484 info->reshape_progress &&
4485 __le64_to_cpu(bsb.arraystart2) >=
4486 info->reshape_progress)
4487 goto nonew; /* No new data here */
4488 }
4489 }
4490 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4491 second_fail:
4492 if (verbose)
4493 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4494 devname);
4495 continue; /* Cannot seek */
4496 }
4497 /* There should be a duplicate backup superblock 4k before here */
4498 if (lseek64(fd, -4096, 1) < 0 ||
4499 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4500 goto second_fail; /* Cannot find leading superblock */
4501 if (bsb.magic[15] == '1')
4502 bsbsize = offsetof(struct mdp_backup_super, pad1);
4503 else
4504 bsbsize = offsetof(struct mdp_backup_super, pad);
4505 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4506 goto second_fail; /* Cannot find leading superblock */
4507
4508 /* Now need the data offsets for all devices. */
4509 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4510 for(j=0; j<info->array.raid_disks; j++) {
4511 if (fdlist[j] < 0)
4512 continue;
4513 if (st->ss->load_super(st, fdlist[j], NULL))
4514 /* FIXME should be this be an error */
4515 continue;
4516 st->ss->getinfo_super(st, &dinfo, NULL);
4517 st->ss->free_super(st);
4518 offsets[j] = dinfo.data_offset * 512;
4519 }
4520 printf(Name ": restoring critical section\n");
4521
4522 if (restore_stripes(fdlist, offsets,
4523 info->array.raid_disks,
4524 info->new_chunk,
4525 info->new_level,
4526 info->new_layout,
4527 fd, __le64_to_cpu(bsb.devstart)*512,
4528 __le64_to_cpu(bsb.arraystart)*512,
4529 __le64_to_cpu(bsb.length)*512, NULL)) {
4530 /* didn't succeed, so giveup */
4531 if (verbose)
4532 pr_err("Error restoring backup from %s\n",
4533 devname);
4534 free(offsets);
4535 return 1;
4536 }
4537
4538 if (bsb.magic[15] == '2' &&
4539 restore_stripes(fdlist, offsets,
4540 info->array.raid_disks,
4541 info->new_chunk,
4542 info->new_level,
4543 info->new_layout,
4544 fd, __le64_to_cpu(bsb.devstart)*512 +
4545 __le64_to_cpu(bsb.devstart2)*512,
4546 __le64_to_cpu(bsb.arraystart2)*512,
4547 __le64_to_cpu(bsb.length2)*512, NULL)) {
4548 /* didn't succeed, so giveup */
4549 if (verbose)
4550 pr_err("Error restoring second backup from %s\n",
4551 devname);
4552 free(offsets);
4553 return 1;
4554 }
4555
4556 free(offsets);
4557
4558 /* Ok, so the data is restored. Let's update those superblocks. */
4559
4560 lo = hi = 0;
4561 if (bsb.length) {
4562 lo = __le64_to_cpu(bsb.arraystart);
4563 hi = lo + __le64_to_cpu(bsb.length);
4564 }
4565 if (bsb.magic[15] == '2' && bsb.length2) {
4566 unsigned long long lo1, hi1;
4567 lo1 = __le64_to_cpu(bsb.arraystart2);
4568 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4569 if (lo == hi) {
4570 lo = lo1;
4571 hi = hi1;
4572 } else if (lo < lo1)
4573 hi = hi1;
4574 else
4575 lo = lo1;
4576 }
4577 if (lo < hi &&
4578 (info->reshape_progress < lo ||
4579 info->reshape_progress > hi))
4580 /* backup does not affect reshape_progress*/ ;
4581 else if (info->delta_disks >= 0) {
4582 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4583 __le64_to_cpu(bsb.length);
4584 if (bsb.magic[15] == '2') {
4585 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4586 __le64_to_cpu(bsb.length2);
4587 if (p2 > info->reshape_progress)
4588 info->reshape_progress = p2;
4589 }
4590 } else {
4591 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4592 if (bsb.magic[15] == '2') {
4593 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4594 if (p2 < info->reshape_progress)
4595 info->reshape_progress = p2;
4596 }
4597 }
4598 for (j=0; j<info->array.raid_disks; j++) {
4599 if (fdlist[j] < 0)
4600 continue;
4601 if (st->ss->load_super(st, fdlist[j], NULL))
4602 continue;
4603 st->ss->getinfo_super(st, &dinfo, NULL);
4604 dinfo.reshape_progress = info->reshape_progress;
4605 st->ss->update_super(st, &dinfo,
4606 "_reshape_progress",
4607 NULL,0, 0, NULL);
4608 st->ss->store_super(st, fdlist[j]);
4609 st->ss->free_super(st);
4610 }
4611 return 0;
4612 }
4613 /* Didn't find any backup data, try to see if any
4614 * was needed.
4615 */
4616 if (info->delta_disks < 0) {
4617 /* When shrinking, the critical section is at the end.
4618 * So see if we are before the critical section.
4619 */
4620 unsigned long long first_block;
4621 nstripe = ostripe = 0;
4622 first_block = 0;
4623 while (ostripe >= nstripe) {
4624 ostripe += info->array.chunk_size / 512;
4625 first_block = ostripe * odata;
4626 nstripe = first_block / ndata / (info->new_chunk/512) *
4627 (info->new_chunk/512);
4628 }
4629
4630 if (info->reshape_progress >= first_block)
4631 return 0;
4632 }
4633 if (info->delta_disks > 0) {
4634 /* See if we are beyond the critical section. */
4635 unsigned long long last_block;
4636 nstripe = ostripe = 0;
4637 last_block = 0;
4638 while (nstripe >= ostripe) {
4639 nstripe += info->new_chunk / 512;
4640 last_block = nstripe * ndata;
4641 ostripe = last_block / odata / (info->array.chunk_size/512) *
4642 (info->array.chunk_size/512);
4643 }
4644
4645 if (info->reshape_progress >= last_block)
4646 return 0;
4647 }
4648 /* needed to recover critical section! */
4649 if (verbose)
4650 pr_err("Failed to find backup of critical section\n");
4651 return 1;
4652 }
4653
4654 int Grow_continue_command(char *devname, int fd,
4655 char *backup_file, int verbose)
4656 {
4657 int ret_val = 0;
4658 struct supertype *st = NULL;
4659 struct mdinfo *content = NULL;
4660 struct mdinfo array;
4661 char *subarray = NULL;
4662 struct mdinfo *cc = NULL;
4663 struct mdstat_ent *mdstat = NULL;
4664 int cfd = -1;
4665 int fd2 = -1;
4666
4667 dprintf("Grow continue from command line called for %s\n",
4668 devname);
4669
4670 st = super_by_fd(fd, &subarray);
4671 if (!st || !st->ss) {
4672 pr_err("Unable to determine metadata format for %s\n",
4673 devname);
4674 return 1;
4675 }
4676 dprintf("Grow continue is run for ");
4677 if (st->ss->external == 0) {
4678 int d;
4679 dprintf("native array (%s)\n", devname);
4680 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4681 pr_err("%s is not an active md array -"
4682 " aborting\n", devname);
4683 ret_val = 1;
4684 goto Grow_continue_command_exit;
4685 }
4686 content = &array;
4687 /* Need to load a superblock.
4688 * FIXME we should really get what we need from
4689 * sysfs
4690 */
4691 for (d = 0; d < MAX_DISKS; d++) {
4692 mdu_disk_info_t disk;
4693 char *dv;
4694 int err;
4695 disk.number = d;
4696 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4697 continue;
4698 if (disk.major == 0 && disk.minor == 0)
4699 continue;
4700 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4701 continue;
4702 dv = map_dev(disk.major, disk.minor, 1);
4703 if (!dv)
4704 continue;
4705 fd2 = dev_open(dv, O_RDONLY);
4706 if (fd2 < 0)
4707 continue;
4708 err = st->ss->load_super(st, fd2, NULL);
4709 close(fd2);
4710 /* invalidate fd2 to avoid possible double close() */
4711 fd2 = -1;
4712 if (err)
4713 continue;
4714 break;
4715 }
4716 if (d == MAX_DISKS) {
4717 pr_err("Unable to load metadata for %s\n",
4718 devname);
4719 ret_val = 1;
4720 goto Grow_continue_command_exit;
4721 }
4722 st->ss->getinfo_super(st, content, NULL);
4723 } else {
4724 char *container;
4725
4726 if (subarray) {
4727 dprintf("subarray (%s)\n", subarray);
4728 container = st->container_devnm;
4729 cfd = open_dev_excl(st->container_devnm);
4730 } else {
4731 container = st->devnm;
4732 close(fd);
4733 cfd = open_dev_excl(st->devnm);
4734 dprintf("container (%s)\n", container);
4735 fd = cfd;
4736 }
4737 if (cfd < 0) {
4738 pr_err("Unable to open container "
4739 "for %s\n", devname);
4740 ret_val = 1;
4741 goto Grow_continue_command_exit;
4742 }
4743
4744 /* find in container array under reshape
4745 */
4746 ret_val = st->ss->load_container(st, cfd, NULL);
4747 if (ret_val) {
4748 pr_err("Cannot read superblock for %s\n",
4749 devname);
4750 ret_val = 1;
4751 goto Grow_continue_command_exit;
4752 }
4753
4754 cc = st->ss->container_content(st, subarray);
4755 for (content = cc; content ; content = content->next) {
4756 char *array;
4757 int allow_reshape = 1;
4758
4759 if (content->reshape_active == 0)
4760 continue;
4761 /* The decision about array or container wide
4762 * reshape is taken in Grow_continue based
4763 * content->reshape_active state, therefore we
4764 * need to check_reshape based on
4765 * reshape_active and subarray name
4766 */
4767 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4768 allow_reshape = 0;
4769 if (content->reshape_active == CONTAINER_RESHAPE &&
4770 (content->array.state
4771 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4772 allow_reshape = 0;
4773
4774 if (!allow_reshape) {
4775 pr_err("cannot continue reshape of an array"
4776 " in container with unsupported"
4777 " metadata: %s(%s)\n",
4778 devname, container);
4779 ret_val = 1;
4780 goto Grow_continue_command_exit;
4781 }
4782
4783 array = strchr(content->text_version+1, '/')+1;
4784 mdstat = mdstat_by_subdev(array, container);
4785 if (!mdstat)
4786 continue;
4787 if (mdstat->active == 0) {
4788 pr_err("Skipping inactive array %s.\n",
4789 mdstat->devnm);
4790 free_mdstat(mdstat);
4791 mdstat = NULL;
4792 continue;
4793 }
4794 break;
4795 }
4796 if (!content) {
4797 pr_err("Unable to determine reshaped "
4798 "array for %s\n", devname);
4799 ret_val = 1;
4800 goto Grow_continue_command_exit;
4801 }
4802 fd2 = open_dev(mdstat->devnm);
4803 if (fd2 < 0) {
4804 pr_err("cannot open (%s)\n", mdstat->devnm);
4805 ret_val = 1;
4806 goto Grow_continue_command_exit;
4807 }
4808
4809 sysfs_init(content, fd2, mdstat->devnm);
4810
4811 /* start mdmon in case it is not running
4812 */
4813 if (!mdmon_running(container))
4814 start_mdmon(container);
4815 ping_monitor(container);
4816
4817 if (mdmon_running(container))
4818 st->update_tail = &st->updates;
4819 else {
4820 pr_err("No mdmon found. "
4821 "Grow cannot continue.\n");
4822 ret_val = 1;
4823 goto Grow_continue_command_exit;
4824 }
4825 }
4826
4827 /* verify that array under reshape is started from
4828 * correct position
4829 */
4830 if (verify_reshape_position(content, content->array.level) < 0) {
4831 ret_val = 1;
4832 goto Grow_continue_command_exit;
4833 }
4834
4835 /* continue reshape
4836 */
4837 ret_val = Grow_continue(fd, st, content, backup_file, 0);
4838
4839 Grow_continue_command_exit:
4840 if (fd2 > -1)
4841 close(fd2);
4842 if (cfd > -1)
4843 close(cfd);
4844 st->ss->free_super(st);
4845 free_mdstat(mdstat);
4846 sysfs_free(cc);
4847 free(subarray);
4848
4849 return ret_val;
4850 }
4851
4852 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4853 char *backup_file, int freeze_reshape)
4854 {
4855 int ret_val = 2;
4856
4857 if (!info->reshape_active)
4858 return ret_val;
4859
4860 if (st->ss->external) {
4861 int cfd = open_dev(st->container_devnm);
4862
4863 if (cfd < 0)
4864 return 1;
4865
4866 st->ss->load_container(st, cfd, st->container_devnm);
4867 close(cfd);
4868 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4869 st, info, 0, backup_file,
4870 0,
4871 1 | info->reshape_active,
4872 freeze_reshape);
4873 } else
4874 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4875 NULL, INVALID_SECTORS,
4876 backup_file, 0, 0,
4877 1 | info->reshape_active,
4878 freeze_reshape);
4879
4880 return ret_val;
4881 }