]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Add --data-offset flag for Create and Grow
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2012 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27
28 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
29 #error no endian defined
30 #endif
31 #include "md_u.h"
32 #include "md_p.h"
33
34 #ifndef offsetof
35 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
36 #endif
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char *backup_file,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50
51 dprintf("Called restore_backup()\n");
52 fdlist = xmalloc(sizeof(int) * disk_count);
53
54 for (i = 0; i < next_spare; i++)
55 fdlist[i] = -1;
56 for (dev = content->devs; dev; dev = dev->next) {
57 char buf[22];
58 int fd;
59 sprintf(buf, "%d:%d",
60 dev->disk.major,
61 dev->disk.minor);
62 fd = dev_open(buf, O_RDWR);
63
64 if (dev->disk.raid_disk >= 0)
65 fdlist[dev->disk.raid_disk] = fd;
66 else
67 fdlist[next_spare++] = fd;
68 }
69
70 if (st->ss->external && st->ss->recover_backup)
71 err = st->ss->recover_backup(st, content);
72 else
73 err = Grow_restart(st, content, fdlist, next_spare,
74 backup_file, verbose > 0);
75
76 while (next_spare > 0) {
77 next_spare--;
78 if (fdlist[next_spare] >= 0)
79 close(fdlist[next_spare]);
80 }
81 free(fdlist);
82 if (err) {
83 pr_err("Failed to restore critical"
84 " section for reshape - sorry.\n");
85 if (!backup_file)
86 pr_err("Possibly you need"
87 " to specify a --backup-file\n");
88 return 1;
89 }
90
91 dprintf("restore_backup() returns status OK.\n");
92 return 0;
93 }
94
95 int Grow_Add_device(char *devname, int fd, char *newdev)
96 {
97 /* Add a device to an active array.
98 * Currently, just extend a linear array.
99 * This requires writing a new superblock on the
100 * new device, calling the kernel to add the device,
101 * and if that succeeds, update the superblock on
102 * all other devices.
103 * This means that we need to *find* all other devices.
104 */
105 struct mdinfo info;
106
107 struct stat stb;
108 int nfd, fd2;
109 int d, nd;
110 struct supertype *st = NULL;
111 char *subarray = NULL;
112
113 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
114 pr_err("cannot get array info for %s\n", devname);
115 return 1;
116 }
117
118 if (info.array.level != -1) {
119 pr_err("can only add devices to linear arrays\n");
120 return 1;
121 }
122
123 st = super_by_fd(fd, &subarray);
124 if (!st) {
125 pr_err("cannot handle arrays with superblock version %d\n",
126 info.array.major_version);
127 return 1;
128 }
129
130 if (subarray) {
131 pr_err("Cannot grow linear sub-arrays yet\n");
132 free(subarray);
133 free(st);
134 return 1;
135 }
136
137 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
138 if (nfd < 0) {
139 pr_err("cannot open %s\n", newdev);
140 free(st);
141 return 1;
142 }
143 fstat(nfd, &stb);
144 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
145 pr_err("%s is not a block device!\n", newdev);
146 close(nfd);
147 free(st);
148 return 1;
149 }
150 /* now check out all the devices and make sure we can read the
151 * superblock */
152 for (d=0 ; d < info.array.raid_disks ; d++) {
153 mdu_disk_info_t disk;
154 char *dv;
155
156 st->ss->free_super(st);
157
158 disk.number = d;
159 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
160 pr_err("cannot get device detail for device %d\n",
161 d);
162 close(nfd);
163 free(st);
164 return 1;
165 }
166 dv = map_dev(disk.major, disk.minor, 1);
167 if (!dv) {
168 pr_err("cannot find device file for device %d\n",
169 d);
170 close(nfd);
171 free(st);
172 return 1;
173 }
174 fd2 = dev_open(dv, O_RDWR);
175 if (fd2 < 0) {
176 pr_err("cannot open device file %s\n", dv);
177 close(nfd);
178 free(st);
179 return 1;
180 }
181
182 if (st->ss->load_super(st, fd2, NULL)) {
183 pr_err("cannot find super block on %s\n", dv);
184 close(nfd);
185 close(fd2);
186 free(st);
187 return 1;
188 }
189 close(fd2);
190 }
191 /* Ok, looks good. Lets update the superblock and write it out to
192 * newdev.
193 */
194
195 info.disk.number = d;
196 info.disk.major = major(stb.st_rdev);
197 info.disk.minor = minor(stb.st_rdev);
198 info.disk.raid_disk = d;
199 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
200 st->ss->update_super(st, &info, "linear-grow-new", newdev,
201 0, 0, NULL);
202
203 if (st->ss->store_super(st, nfd)) {
204 pr_err("Cannot store new superblock on %s\n",
205 newdev);
206 close(nfd);
207 return 1;
208 }
209 close(nfd);
210
211 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
212 pr_err("Cannot add new disk to this array\n");
213 return 1;
214 }
215 /* Well, that seems to have worked.
216 * Now go through and update all superblocks
217 */
218
219 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
220 pr_err("cannot get array info for %s\n", devname);
221 return 1;
222 }
223
224 nd = d;
225 for (d=0 ; d < info.array.raid_disks ; d++) {
226 mdu_disk_info_t disk;
227 char *dv;
228
229 disk.number = d;
230 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
231 pr_err("cannot get device detail for device %d\n",
232 d);
233 return 1;
234 }
235 dv = map_dev(disk.major, disk.minor, 1);
236 if (!dv) {
237 pr_err("cannot find device file for device %d\n",
238 d);
239 return 1;
240 }
241 fd2 = dev_open(dv, O_RDWR);
242 if (fd2 < 0) {
243 pr_err("cannot open device file %s\n", dv);
244 return 1;
245 }
246 if (st->ss->load_super(st, fd2, NULL)) {
247 pr_err("cannot find super block on %s\n", dv);
248 close(fd);
249 return 1;
250 }
251 info.array.raid_disks = nd+1;
252 info.array.nr_disks = nd+1;
253 info.array.active_disks = nd+1;
254 info.array.working_disks = nd+1;
255
256 st->ss->update_super(st, &info, "linear-grow-update", dv,
257 0, 0, NULL);
258
259 if (st->ss->store_super(st, fd2)) {
260 pr_err("Cannot store new superblock on %s\n", dv);
261 close(fd2);
262 return 1;
263 }
264 close(fd2);
265 }
266
267 return 0;
268 }
269
270 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
271 {
272 /*
273 * First check that array doesn't have a bitmap
274 * Then create the bitmap
275 * Then add it
276 *
277 * For internal bitmaps, we need to check the version,
278 * find all the active devices, and write the bitmap block
279 * to all devices
280 */
281 mdu_bitmap_file_t bmf;
282 mdu_array_info_t array;
283 struct supertype *st;
284 char *subarray = NULL;
285 int major = BITMAP_MAJOR_HI;
286 int vers = md_get_version(fd);
287 unsigned long long bitmapsize, array_size;
288
289 if (vers < 9003) {
290 major = BITMAP_MAJOR_HOSTENDIAN;
291 pr_err("Warning - bitmaps created on this kernel"
292 " are not portable\n"
293 " between different architectures. Consider upgrading"
294 " the Linux kernel.\n");
295 }
296
297 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
298 if (errno == ENOMEM)
299 pr_err("Memory allocation failure.\n");
300 else
301 pr_err("bitmaps not supported by this kernel.\n");
302 return 1;
303 }
304 if (bmf.pathname[0]) {
305 if (strcmp(s->bitmap_file,"none")==0) {
306 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
307 pr_err("failed to remove bitmap %s\n",
308 bmf.pathname);
309 return 1;
310 }
311 return 0;
312 }
313 pr_err("%s already has a bitmap (%s)\n",
314 devname, bmf.pathname);
315 return 1;
316 }
317 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
318 pr_err("cannot get array status for %s\n", devname);
319 return 1;
320 }
321 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
322 if (strcmp(s->bitmap_file, "none")==0) {
323 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
324 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
325 pr_err("failed to remove internal bitmap.\n");
326 return 1;
327 }
328 return 0;
329 }
330 pr_err("Internal bitmap already present on %s\n",
331 devname);
332 return 1;
333 }
334
335 if (strcmp(s->bitmap_file, "none") == 0) {
336 pr_err("no bitmap found on %s\n", devname);
337 return 1;
338 }
339 if (array.level <= 0) {
340 pr_err("Bitmaps not meaningful with level %s\n",
341 map_num(pers, array.level)?:"of this array");
342 return 1;
343 }
344 bitmapsize = array.size;
345 bitmapsize <<= 1;
346 if (get_dev_size(fd, NULL, &array_size) &&
347 array_size > (0x7fffffffULL<<9)) {
348 /* Array is big enough that we cannot trust array.size
349 * try other approaches
350 */
351 bitmapsize = get_component_size(fd);
352 }
353 if (bitmapsize == 0) {
354 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
355 return 1;
356 }
357
358 if (array.level == 10) {
359 int ncopies = (array.layout&255)*((array.layout>>8)&255);
360 bitmapsize = bitmapsize * array.raid_disks / ncopies;
361 }
362
363 st = super_by_fd(fd, &subarray);
364 if (!st) {
365 pr_err("Cannot understand version %d.%d\n",
366 array.major_version, array.minor_version);
367 return 1;
368 }
369 if (subarray) {
370 pr_err("Cannot add bitmaps to sub-arrays yet\n");
371 free(subarray);
372 free(st);
373 return 1;
374 }
375 if (strcmp(s->bitmap_file, "internal") == 0) {
376 int rv;
377 int d;
378 int offset_setable = 0;
379 struct mdinfo *mdi;
380 if (st->ss->add_internal_bitmap == NULL) {
381 pr_err("Internal bitmaps not supported "
382 "with %s metadata\n", st->ss->name);
383 return 1;
384 }
385 mdi = sysfs_read(fd, -1, GET_BITMAP_LOCATION);
386 if (mdi)
387 offset_setable = 1;
388 for (d=0; d< st->max_devs; d++) {
389 mdu_disk_info_t disk;
390 char *dv;
391 disk.number = d;
392 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
393 continue;
394 if (disk.major == 0 &&
395 disk.minor == 0)
396 continue;
397 if ((disk.state & (1<<MD_DISK_SYNC))==0)
398 continue;
399 dv = map_dev(disk.major, disk.minor, 1);
400 if (dv) {
401 int fd2 = dev_open(dv, O_RDWR);
402 if (fd2 < 0)
403 continue;
404 if (st->ss->load_super(st, fd2, NULL)==0) {
405 if (st->ss->add_internal_bitmap(
406 st,
407 &s->bitmap_chunk, c->delay, s->write_behind,
408 bitmapsize, offset_setable,
409 major)
410 )
411 st->ss->write_bitmap(st, fd2);
412 else {
413 pr_err("failed to create internal bitmap"
414 " - chunksize problem.\n");
415 close(fd2);
416 return 1;
417 }
418 }
419 close(fd2);
420 }
421 }
422 if (offset_setable) {
423 st->ss->getinfo_super(st, mdi, NULL);
424 sysfs_init(mdi, fd, -1);
425 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
426 mdi->bitmap_offset);
427 } else {
428 array.state |= (1<<MD_SB_BITMAP_PRESENT);
429 rv = ioctl(fd, SET_ARRAY_INFO, &array);
430 }
431 if (rv < 0) {
432 if (errno == EBUSY)
433 pr_err("Cannot add bitmap while array is"
434 " resyncing or reshaping etc.\n");
435 pr_err("failed to set internal bitmap.\n");
436 return 1;
437 }
438 } else {
439 int uuid[4];
440 int bitmap_fd;
441 int d;
442 int max_devs = st->max_devs;
443
444 /* try to load a superblock */
445 for (d = 0; d < max_devs; d++) {
446 mdu_disk_info_t disk;
447 char *dv;
448 int fd2;
449 disk.number = d;
450 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
451 continue;
452 if ((disk.major==0 && disk.minor==0) ||
453 (disk.state & (1<<MD_DISK_REMOVED)))
454 continue;
455 dv = map_dev(disk.major, disk.minor, 1);
456 if (!dv)
457 continue;
458 fd2 = dev_open(dv, O_RDONLY);
459 if (fd2 >= 0) {
460 if (st->ss->load_super(st, fd2, NULL) == 0) {
461 close(fd2);
462 st->ss->uuid_from_super(st, uuid);
463 break;
464 }
465 close(fd2);
466 }
467 }
468 if (d == max_devs) {
469 pr_err("cannot find UUID for array!\n");
470 return 1;
471 }
472 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
473 c->delay, s->write_behind, bitmapsize, major)) {
474 return 1;
475 }
476 bitmap_fd = open(s->bitmap_file, O_RDWR);
477 if (bitmap_fd < 0) {
478 pr_err("weird: %s cannot be opened\n",
479 s->bitmap_file);
480 return 1;
481 }
482 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
483 int err = errno;
484 if (errno == EBUSY)
485 pr_err("Cannot add bitmap while array is"
486 " resyncing or reshaping etc.\n");
487 pr_err("Cannot set bitmap file for %s: %s\n",
488 devname, strerror(err));
489 return 1;
490 }
491 }
492
493 return 0;
494 }
495
496 /*
497 * When reshaping an array we might need to backup some data.
498 * This is written to all spares with a 'super_block' describing it.
499 * The superblock goes 4K from the end of the used space on the
500 * device.
501 * It if written after the backup is complete.
502 * It has the following structure.
503 */
504
505 static struct mdp_backup_super {
506 char magic[16]; /* md_backup_data-1 or -2 */
507 __u8 set_uuid[16];
508 __u64 mtime;
509 /* start/sizes in 512byte sectors */
510 __u64 devstart; /* address on backup device/file of data */
511 __u64 arraystart;
512 __u64 length;
513 __u32 sb_csum; /* csum of preceeding bytes. */
514 __u32 pad1;
515 __u64 devstart2; /* offset in to data of second section */
516 __u64 arraystart2;
517 __u64 length2;
518 __u32 sb_csum2; /* csum of preceeding bytes. */
519 __u8 pad[512-68-32];
520 } __attribute__((aligned(512))) bsb, bsb2;
521
522 static __u32 bsb_csum(char *buf, int len)
523 {
524 int i;
525 int csum = 0;
526 for (i = 0; i < len; i++)
527 csum = (csum<<3) + buf[0];
528 return __cpu_to_le32(csum);
529 }
530
531 static int check_idle(struct supertype *st)
532 {
533 /* Check that all member arrays for this container, or the
534 * container of this array, are idle
535 */
536 int container_dev = (st->container_dev != NoMdDev
537 ? st->container_dev : st->devnum);
538 char container[40];
539 struct mdstat_ent *ent, *e;
540 int is_idle = 1;
541
542 fmt_devname(container, container_dev);
543 ent = mdstat_read(0, 0);
544 for (e = ent ; e; e = e->next) {
545 if (!is_container_member(e, container))
546 continue;
547 if (e->percent >= 0) {
548 is_idle = 0;
549 break;
550 }
551 }
552 free_mdstat(ent);
553 return is_idle;
554 }
555
556 static int freeze_container(struct supertype *st)
557 {
558 int container_dev = (st->container_dev != NoMdDev
559 ? st->container_dev : st->devnum);
560 char container[40];
561
562 if (!check_idle(st))
563 return -1;
564
565 fmt_devname(container, container_dev);
566
567 if (block_monitor(container, 1)) {
568 pr_err("failed to freeze container\n");
569 return -2;
570 }
571
572 return 1;
573 }
574
575 static void unfreeze_container(struct supertype *st)
576 {
577 int container_dev = (st->container_dev != NoMdDev
578 ? st->container_dev : st->devnum);
579 char container[40];
580
581 fmt_devname(container, container_dev);
582
583 unblock_monitor(container, 1);
584 }
585
586 static int freeze(struct supertype *st)
587 {
588 /* Try to freeze resync/rebuild on this array/container.
589 * Return -1 if the array is busy,
590 * return -2 container cannot be frozen,
591 * return 0 if this kernel doesn't support 'frozen'
592 * return 1 if it worked.
593 */
594 if (st->ss->external)
595 return freeze_container(st);
596 else {
597 struct mdinfo *sra = sysfs_read(-1, st->devnum, GET_VERSION);
598 int err;
599 char buf[20];
600
601 if (!sra)
602 return -1;
603 /* Need to clear any 'read-auto' status */
604 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
605 strncmp(buf, "read-auto", 9) == 0)
606 sysfs_set_str(sra, NULL, "array_state", "clean");
607
608 err = sysfs_freeze_array(sra);
609 sysfs_free(sra);
610 return err;
611 }
612 }
613
614 static void unfreeze(struct supertype *st)
615 {
616 if (st->ss->external)
617 return unfreeze_container(st);
618 else {
619 struct mdinfo *sra = sysfs_read(-1, st->devnum, GET_VERSION);
620
621 if (sra)
622 sysfs_set_str(sra, NULL, "sync_action", "idle");
623 sysfs_free(sra);
624 }
625 }
626
627 static void wait_reshape(struct mdinfo *sra)
628 {
629 int fd = sysfs_get_fd(sra, NULL, "sync_action");
630 char action[20];
631
632 if (fd < 0)
633 return;
634
635 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
636 strncmp(action, "reshape", 7) == 0) {
637 fd_set rfds;
638 FD_ZERO(&rfds);
639 FD_SET(fd, &rfds);
640 select(fd+1, NULL, NULL, &rfds, NULL);
641 }
642 close(fd);
643 }
644
645 static int reshape_super(struct supertype *st, unsigned long long size,
646 int level, int layout, int chunksize, int raid_disks,
647 int delta_disks, char *backup_file, char *dev,
648 int direction, int verbose)
649 {
650 /* nothing extra to check in the native case */
651 if (!st->ss->external)
652 return 0;
653 if (!st->ss->reshape_super ||
654 !st->ss->manage_reshape) {
655 pr_err("%s metadata does not support reshape\n",
656 st->ss->name);
657 return 1;
658 }
659
660 return st->ss->reshape_super(st, size, level, layout, chunksize,
661 raid_disks, delta_disks, backup_file, dev,
662 direction, verbose);
663 }
664
665 static void sync_metadata(struct supertype *st)
666 {
667 if (st->ss->external) {
668 if (st->update_tail) {
669 flush_metadata_updates(st);
670 st->update_tail = &st->updates;
671 } else
672 st->ss->sync_metadata(st);
673 }
674 }
675
676 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
677 {
678 /* when dealing with external metadata subarrays we need to be
679 * prepared to handle EAGAIN. The kernel may need to wait for
680 * mdmon to mark the array active so the kernel can handle
681 * allocations/writeback when preparing the reshape action
682 * (md_allow_write()). We temporarily disable safe_mode_delay
683 * to close a race with the array_state going clean before the
684 * next write to raid_disks / stripe_cache_size
685 */
686 char safe[50];
687 int rc;
688
689 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
690 if (!container ||
691 (strcmp(name, "raid_disks") != 0 &&
692 strcmp(name, "stripe_cache_size") != 0))
693 return sysfs_set_num(sra, NULL, name, n);
694
695 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
696 if (rc <= 0)
697 return -1;
698 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
699 rc = sysfs_set_num(sra, NULL, name, n);
700 if (rc < 0 && errno == EAGAIN) {
701 ping_monitor(container);
702 /* if we get EAGAIN here then the monitor is not active
703 * so stop trying
704 */
705 rc = sysfs_set_num(sra, NULL, name, n);
706 }
707 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
708 return rc;
709 }
710
711 int start_reshape(struct mdinfo *sra, int already_running,
712 int before_data_disks, int data_disks)
713 {
714 int err;
715 unsigned long long sync_max_to_set;
716
717 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
718 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
719 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
720 sra->reshape_progress);
721 if (before_data_disks <= data_disks)
722 sync_max_to_set = sra->reshape_progress / data_disks;
723 else
724 sync_max_to_set = (sra->component_size * data_disks
725 - sra->reshape_progress) / data_disks;
726 if (!already_running)
727 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
728 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
729 if (!already_running)
730 err = err ?: sysfs_set_str(sra, NULL, "sync_action", "reshape");
731
732 return err;
733 }
734
735 void abort_reshape(struct mdinfo *sra)
736 {
737 sysfs_set_str(sra, NULL, "sync_action", "idle");
738 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
739 sysfs_set_num(sra, NULL, "suspend_hi", 0);
740 sysfs_set_num(sra, NULL, "suspend_lo", 0);
741 sysfs_set_num(sra, NULL, "sync_min", 0);
742 sysfs_set_str(sra, NULL, "sync_max", "max");
743 }
744
745 int remove_disks_for_takeover(struct supertype *st,
746 struct mdinfo *sra,
747 int layout)
748 {
749 int nr_of_copies;
750 struct mdinfo *remaining;
751 int slot;
752
753 if (sra->array.level == 10)
754 nr_of_copies = layout & 0xff;
755 else if (sra->array.level == 1)
756 nr_of_copies = sra->array.raid_disks;
757 else
758 return 1;
759
760 remaining = sra->devs;
761 sra->devs = NULL;
762 /* for each 'copy', select one device and remove from the list. */
763 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
764 struct mdinfo **diskp;
765 int found = 0;
766
767 /* Find a working device to keep */
768 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
769 struct mdinfo *disk = *diskp;
770
771 if (disk->disk.raid_disk < slot)
772 continue;
773 if (disk->disk.raid_disk >= slot + nr_of_copies)
774 continue;
775 if (disk->disk.state & (1<<MD_DISK_REMOVED))
776 continue;
777 if (disk->disk.state & (1<<MD_DISK_FAULTY))
778 continue;
779 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
780 continue;
781
782 /* We have found a good disk to use! */
783 *diskp = disk->next;
784 disk->next = sra->devs;
785 sra->devs = disk;
786 found = 1;
787 break;
788 }
789 if (!found)
790 break;
791 }
792
793 if (slot < sra->array.raid_disks) {
794 /* didn't find all slots */
795 struct mdinfo **e;
796 e = &remaining;
797 while (*e)
798 e = &(*e)->next;
799 *e = sra->devs;
800 sra->devs = remaining;
801 return 1;
802 }
803
804 /* Remove all 'remaining' devices from the array */
805 while (remaining) {
806 struct mdinfo *sd = remaining;
807 remaining = sd->next;
808
809 sysfs_set_str(sra, sd, "state", "faulty");
810 sysfs_set_str(sra, sd, "slot", "none");
811 /* for external metadata disks should be removed in mdmon */
812 if (!st->ss->external)
813 sysfs_set_str(sra, sd, "state", "remove");
814 sd->disk.state |= (1<<MD_DISK_REMOVED);
815 sd->disk.state &= ~(1<<MD_DISK_SYNC);
816 sd->next = sra->devs;
817 sra->devs = sd;
818 }
819 return 0;
820 }
821
822 void reshape_free_fdlist(int *fdlist,
823 unsigned long long *offsets,
824 int size)
825 {
826 int i;
827
828 for (i = 0; i < size; i++)
829 if (fdlist[i] >= 0)
830 close(fdlist[i]);
831
832 free(fdlist);
833 free(offsets);
834 }
835
836 int reshape_prepare_fdlist(char *devname,
837 struct mdinfo *sra,
838 int raid_disks,
839 int nrdisks,
840 unsigned long blocks,
841 char *backup_file,
842 int *fdlist,
843 unsigned long long *offsets)
844 {
845 int d = 0;
846 struct mdinfo *sd;
847
848 for (d = 0; d <= nrdisks; d++)
849 fdlist[d] = -1;
850 d = raid_disks;
851 for (sd = sra->devs; sd; sd = sd->next) {
852 if (sd->disk.state & (1<<MD_DISK_FAULTY))
853 continue;
854 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
855 char *dn = map_dev(sd->disk.major,
856 sd->disk.minor, 1);
857 fdlist[sd->disk.raid_disk]
858 = dev_open(dn, O_RDONLY);
859 offsets[sd->disk.raid_disk] = sd->data_offset*512;
860 if (fdlist[sd->disk.raid_disk] < 0) {
861 pr_err("%s: cannot open component %s\n",
862 devname, dn ? dn : "-unknown-");
863 d = -1;
864 goto release;
865 }
866 } else if (backup_file == NULL) {
867 /* spare */
868 char *dn = map_dev(sd->disk.major,
869 sd->disk.minor, 1);
870 fdlist[d] = dev_open(dn, O_RDWR);
871 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
872 if (fdlist[d] < 0) {
873 pr_err("%s: cannot open component %s\n",
874 devname, dn ? dn : "-unknown-");
875 d = -1;
876 goto release;
877 }
878 d++;
879 }
880 }
881 release:
882 return d;
883 }
884
885 int reshape_open_backup_file(char *backup_file,
886 int fd,
887 char *devname,
888 long blocks,
889 int *fdlist,
890 unsigned long long *offsets,
891 int restart)
892 {
893 /* Return 1 on success, 0 on any form of failure */
894 /* need to check backup file is large enough */
895 char buf[512];
896 struct stat stb;
897 unsigned int dev;
898 int i;
899
900 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
901 S_IRUSR | S_IWUSR);
902 *offsets = 8 * 512;
903 if (*fdlist < 0) {
904 pr_err("%s: cannot create backup file %s: %s\n",
905 devname, backup_file, strerror(errno));
906 return 0;
907 }
908 /* Guard against backup file being on array device.
909 * If array is partitioned or if LVM etc is in the
910 * way this will not notice, but it is better than
911 * nothing.
912 */
913 fstat(*fdlist, &stb);
914 dev = stb.st_dev;
915 fstat(fd, &stb);
916 if (stb.st_rdev == dev) {
917 pr_err("backup file must NOT be"
918 " on the array being reshaped.\n");
919 close(*fdlist);
920 return 0;
921 }
922
923 memset(buf, 0, 512);
924 for (i=0; i < blocks + 8 ; i++) {
925 if (write(*fdlist, buf, 512) != 512) {
926 pr_err("%s: cannot create"
927 " backup file %s: %s\n",
928 devname, backup_file, strerror(errno));
929 return 0;
930 }
931 }
932 if (fsync(*fdlist) != 0) {
933 pr_err("%s: cannot create backup file %s: %s\n",
934 devname, backup_file, strerror(errno));
935 return 0;
936 }
937
938 return 1;
939 }
940
941 unsigned long compute_backup_blocks(int nchunk, int ochunk,
942 unsigned int ndata, unsigned int odata)
943 {
944 unsigned long a, b, blocks;
945 /* So how much do we need to backup.
946 * We need an amount of data which is both a whole number of
947 * old stripes and a whole number of new stripes.
948 * So LCM for (chunksize*datadisks).
949 */
950 a = (ochunk/512) * odata;
951 b = (nchunk/512) * ndata;
952 /* Find GCD */
953 while (a != b) {
954 if (a < b)
955 b -= a;
956 if (b < a)
957 a -= b;
958 }
959 /* LCM == product / GCD */
960 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
961
962 return blocks;
963 }
964
965 char *analyse_change(struct mdinfo *info, struct reshape *re)
966 {
967 /* Based on the current array state in info->array and
968 * the changes in info->new_* etc, determine:
969 * - whether the change is possible
970 * - Intermediate level/raid_disks/layout
971 * - whether a restriping reshape is needed
972 * - number of sectors in minimum change unit. This
973 * will cover a whole number of stripes in 'before' and
974 * 'after'.
975 *
976 * Return message if the change should be rejected
977 * NULL if the change can be achieved
978 *
979 * This can be called as part of starting a reshape, or
980 * when assembling an array that is undergoing reshape.
981 */
982 int new_disks;
983 /* delta_parity records change in number of devices
984 * caused by level change
985 */
986 int delta_parity = 0;
987
988 /* If a new level not explicitly given, we assume no-change */
989 if (info->new_level == UnSet)
990 info->new_level = info->array.level;
991
992 if (info->new_chunk)
993 switch (info->new_level) {
994 case 0:
995 case 4:
996 case 5:
997 case 6:
998 case 10:
999 /* chunk size is meaningful, must divide component_size
1000 * evenly
1001 */
1002 if (info->component_size % (info->new_chunk/512))
1003 return "New chunk size does not"
1004 " divide component size";
1005 break;
1006 default:
1007 return "chunk size not meaningful for this level";
1008 }
1009 else
1010 info->new_chunk = info->array.chunk_size;
1011
1012 switch (info->array.level) {
1013 default:
1014 return "Cannot understand this RAID level";
1015 case 1:
1016 /* RAID1 can convert to RAID1 with different disks, or
1017 * raid5 with 2 disks, or
1018 * raid0 with 1 disk
1019 */
1020 if (info->new_level > 1 &&
1021 (info->component_size & 7))
1022 return "Cannot convert RAID1 of this size - "
1023 "reduce size to multiple of 4K first.";
1024 if (info->new_level == 0) {
1025 if (info->delta_disks != UnSet &&
1026 info->delta_disks != 0)
1027 return "Cannot change number of disks "
1028 "with RAID1->RAID0 conversion";
1029 re->level = 0;
1030 re->before.data_disks = 1;
1031 re->after.data_disks = 1;
1032 re->before.layout = 0;
1033 re->backup_blocks = 0;
1034 re->parity = 0;
1035 return NULL;
1036 }
1037 if (info->new_level == 1) {
1038 if (info->delta_disks == UnSet)
1039 /* Don't know what to do */
1040 return "no change requested for Growing RAID1";
1041 re->level = 1;
1042 re->backup_blocks = 0;
1043 re->parity = 0;
1044 return NULL;
1045 }
1046 if (info->array.raid_disks == 2 &&
1047 info->new_level == 5) {
1048
1049 re->level = 5;
1050 re->before.data_disks = 1;
1051 if (info->delta_disks != UnSet &&
1052 info->delta_disks != 0)
1053 re->after.data_disks = 1 + info->delta_disks;
1054 else
1055 re->after.data_disks = 1;
1056 if (re->after.data_disks < 1)
1057 return "Number of disks too small for RAID5";
1058
1059 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1060 info->array.chunk_size = 65536;
1061 break;
1062 }
1063 /* Could do some multi-stage conversions, but leave that to
1064 * later.
1065 */
1066 return "Impossibly level change request for RAID1";
1067
1068 case 10:
1069 /* RAID10 can only be converted from near mode to
1070 * RAID0 by removing some devices
1071 */
1072 if ((info->array.layout & ~0xff) != 0x100)
1073 return "Cannot Grow RAID10 with far/offset layout";
1074 /* number of devices must be multiple of number of copies */
1075 if (info->array.raid_disks % (info->array.layout & 0xff))
1076 return "RAID10 layout too complex for Grow operation";
1077
1078 if (info->new_level != 0)
1079 return "RAID10 can only be changed to RAID0";
1080 new_disks = (info->array.raid_disks
1081 / (info->array.layout & 0xff));
1082 if (info->delta_disks == UnSet)
1083 info->delta_disks = (new_disks
1084 - info->array.raid_disks);
1085
1086 if (info->delta_disks != new_disks - info->array.raid_disks)
1087 return "New number of raid-devices impossible for RAID10";
1088 if (info->new_chunk &&
1089 info->new_chunk != info->array.chunk_size)
1090 return "Cannot change chunk-size with RAID10 Grow";
1091
1092 /* looks good */
1093 re->level = 0;
1094 re->parity = 0;
1095 re->before.data_disks = new_disks;
1096 re->after.data_disks = re->before.data_disks;
1097 re->before.layout = 0;
1098 re->backup_blocks = 0;
1099 return NULL;
1100
1101 case 0:
1102 /* RAID0 can be converted to RAID10, or to RAID456 */
1103 if (info->new_level == 10) {
1104 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1105 /* Assume near=2 layout */
1106 info->new_layout = 0x102;
1107 info->delta_disks = info->array.raid_disks;
1108 }
1109 if (info->new_layout == UnSet) {
1110 int copies = 1 + (info->delta_disks
1111 / info->array.raid_disks);
1112 if (info->array.raid_disks * (copies-1)
1113 != info->delta_disks)
1114 return "Impossible number of devices"
1115 " for RAID0->RAID10";
1116 info->new_layout = 0x100 + copies;
1117 }
1118 if (info->delta_disks == UnSet) {
1119 int copies = info->new_layout & 0xff;
1120 if (info->new_layout != 0x100 + copies)
1121 return "New layout impossible"
1122 " for RAID0->RAID10";;
1123 info->delta_disks = (copies - 1) *
1124 info->array.raid_disks;
1125 }
1126 if (info->new_chunk &&
1127 info->new_chunk != info->array.chunk_size)
1128 return "Cannot change chunk-size with RAID0->RAID10";
1129 /* looks good */
1130 re->level = 10;
1131 re->parity = 0;
1132 re->before.data_disks = (info->array.raid_disks +
1133 info->delta_disks);
1134 re->after.data_disks = re->before.data_disks;
1135 re->before.layout = info->new_layout;
1136 re->backup_blocks = 0;
1137 return NULL;
1138 }
1139
1140 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1141 * a raid4 style layout of the final level.
1142 */
1143 switch (info->new_level) {
1144 case 4:
1145 delta_parity = 1;
1146 case 0:
1147 re->level = 4;
1148 re->before.layout = 0;
1149 break;
1150 case 5:
1151 delta_parity = 1;
1152 re->level = 5;
1153 re->before.layout = ALGORITHM_PARITY_N;
1154 break;
1155 case 6:
1156 delta_parity = 2;
1157 re->level = 6;
1158 re->before.layout = ALGORITHM_PARITY_N;
1159 break;
1160 default:
1161 return "Impossible level change requested";
1162 }
1163 re->before.data_disks = info->array.raid_disks;
1164 /* determining 'after' layout happens outside this 'switch' */
1165 break;
1166
1167 case 4:
1168 info->array.layout = ALGORITHM_PARITY_N;
1169 case 5:
1170 switch (info->new_level) {
1171 case 0:
1172 delta_parity = -1;
1173 case 4:
1174 re->level = info->array.level;
1175 re->before.data_disks = info->array.raid_disks - 1;
1176 re->before.layout = info->array.layout;
1177 break;
1178 case 5:
1179 re->level = 5;
1180 re->before.data_disks = info->array.raid_disks - 1;
1181 re->before.layout = info->array.layout;
1182 break;
1183 case 6:
1184 delta_parity = 1;
1185 re->level = 6;
1186 re->before.data_disks = info->array.raid_disks - 1;
1187 switch (info->array.layout) {
1188 case ALGORITHM_LEFT_ASYMMETRIC:
1189 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1190 break;
1191 case ALGORITHM_RIGHT_ASYMMETRIC:
1192 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1193 break;
1194 case ALGORITHM_LEFT_SYMMETRIC:
1195 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1196 break;
1197 case ALGORITHM_RIGHT_SYMMETRIC:
1198 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1199 break;
1200 case ALGORITHM_PARITY_0:
1201 re->before.layout = ALGORITHM_PARITY_0_6;
1202 break;
1203 case ALGORITHM_PARITY_N:
1204 re->before.layout = ALGORITHM_PARITY_N_6;
1205 break;
1206 default:
1207 return "Cannot convert an array with this layout";
1208 }
1209 break;
1210 case 1:
1211 if (info->array.raid_disks != 2)
1212 return "Can only convert a 2-device array to RAID1";
1213 if (info->delta_disks != UnSet &&
1214 info->delta_disks != 0)
1215 return "Cannot set raid_disk when "
1216 "converting RAID5->RAID1";
1217 re->level = 1;
1218 break;
1219 default:
1220 return "Impossible level change requested";
1221 }
1222 break;
1223 case 6:
1224 switch (info->new_level) {
1225 case 4:
1226 case 5:
1227 delta_parity = -1;
1228 case 6:
1229 re->level = 6;
1230 re->before.data_disks = info->array.raid_disks - 2;
1231 re->before.layout = info->array.layout;
1232 break;
1233 default:
1234 return "Impossible level change requested";
1235 }
1236 break;
1237 }
1238
1239 /* If we reached here then it looks like a re-stripe is
1240 * happening. We have determined the intermediate level
1241 * and initial raid_disks/layout and stored these in 're'.
1242 *
1243 * We need to deduce the final layout that can be atomically
1244 * converted to the end state.
1245 */
1246 switch (info->new_level) {
1247 case 0:
1248 /* We can only get to RAID0 from RAID4 or RAID5
1249 * with appropriate layout and one extra device
1250 */
1251 if (re->level != 4 && re->level != 5)
1252 return "Cannot covert to RAID0 from this level";
1253
1254 switch (re->level) {
1255 case 4:
1256 re->after.layout = 0;
1257 break;
1258 case 5:
1259 re->after.layout = ALGORITHM_PARITY_N;
1260 break;
1261 }
1262 break;
1263
1264 case 4:
1265 /* We can only get to RAID4 from RAID5 */
1266 if (re->level != 4 && re->level != 5)
1267 return "Cannot convert to RAID4 from this level";
1268
1269 switch (re->level) {
1270 case 4:
1271 re->after.layout = 0;
1272 break;
1273 case 5:
1274 re->after.layout = ALGORITHM_PARITY_N;
1275 break;
1276 }
1277 break;
1278
1279 case 5:
1280 /* We get to RAID5 from RAID5 or RAID6 */
1281 if (re->level != 5 && re->level != 6)
1282 return "Cannot convert to RAID5 from this level";
1283
1284 switch (re->level) {
1285 case 5:
1286 if (info->new_layout == UnSet)
1287 re->after.layout = re->before.layout;
1288 else
1289 re->after.layout = info->new_layout;
1290 break;
1291 case 6:
1292 if (info->new_layout == UnSet)
1293 info->new_layout = re->before.layout;
1294
1295 /* after.layout needs to be raid6 version of new_layout */
1296 if (info->new_layout == ALGORITHM_PARITY_N)
1297 re->after.layout = ALGORITHM_PARITY_N;
1298 else {
1299 char layout[40];
1300 char *ls = map_num(r5layout, info->new_layout);
1301 int l;
1302 if (ls) {
1303 /* Current RAID6 layout has a RAID5
1304 * equivalent - good
1305 */
1306 strcat(strcpy(layout, ls), "-6");
1307 l = map_name(r6layout, layout);
1308 if (l == UnSet)
1309 return "Cannot find RAID6 layout"
1310 " to convert to";
1311 } else {
1312 /* Current RAID6 has no equivalent.
1313 * If it is already a '-6' layout we
1314 * can leave it unchanged, else we must
1315 * fail
1316 */
1317 ls = map_num(r6layout, info->new_layout);
1318 if (!ls ||
1319 strcmp(ls+strlen(ls)-2, "-6") != 0)
1320 return "Please specify new layout";
1321 l = info->new_layout;
1322 }
1323 re->after.layout = l;
1324 }
1325 }
1326 break;
1327
1328 case 6:
1329 /* We must already be at level 6 */
1330 if (re->level != 6)
1331 return "Impossible level change";
1332 if (info->new_layout == UnSet)
1333 re->after.layout = info->array.layout;
1334 else
1335 re->after.layout = info->new_layout;
1336 break;
1337 default:
1338 return "Impossible level change requested";
1339 }
1340 if (info->delta_disks == UnSet)
1341 info->delta_disks = delta_parity;
1342
1343 re->after.data_disks = (re->before.data_disks
1344 + info->delta_disks
1345 - delta_parity);
1346 switch (re->level) {
1347 case 6: re->parity = 2;
1348 break;
1349 case 4:
1350 case 5: re->parity = 1;
1351 break;
1352 default: re->parity = 0;
1353 break;
1354 }
1355 /* So we have a restripe operation, we need to calculate the number
1356 * of blocks per reshape operation.
1357 */
1358 if (info->new_chunk == 0)
1359 info->new_chunk = info->array.chunk_size;
1360 if (re->after.data_disks == re->before.data_disks &&
1361 re->after.layout == re->before.layout &&
1362 info->new_chunk == info->array.chunk_size) {
1363 /* Nothing to change */
1364 re->backup_blocks = 0;
1365 return NULL;
1366 }
1367 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1368 /* chunk and layout changes make no difference */
1369 re->backup_blocks = 0;
1370 return NULL;
1371 }
1372
1373 if (re->after.data_disks == re->before.data_disks &&
1374 get_linux_version() < 2006032)
1375 return "in-place reshape is not safe before 2.6.32 - sorry.";
1376
1377 if (re->after.data_disks < re->before.data_disks &&
1378 get_linux_version() < 2006030)
1379 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1380
1381 re->backup_blocks = compute_backup_blocks(
1382 info->new_chunk, info->array.chunk_size,
1383 re->after.data_disks,
1384 re->before.data_disks);
1385
1386 re->new_size = info->component_size * re->after.data_disks;
1387 return NULL;
1388 }
1389
1390 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1391 char *text_version)
1392 {
1393 struct mdinfo *info;
1394 char *subarray;
1395 int ret_val = -1;
1396
1397 if ((st == NULL) || (sra == NULL))
1398 return ret_val;
1399
1400 if (text_version == NULL)
1401 text_version = sra->text_version;
1402 subarray = strchr(text_version+1, '/')+1;
1403 info = st->ss->container_content(st, subarray);
1404 if (info) {
1405 unsigned long long current_size = 0;
1406 unsigned long long new_size =
1407 info->custom_array_size/2;
1408
1409 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1410 new_size > current_size) {
1411 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1412 < 0)
1413 dprintf("Error: Cannot set array size");
1414 else {
1415 ret_val = 0;
1416 dprintf("Array size changed");
1417 }
1418 dprintf(" from %llu to %llu.\n",
1419 current_size, new_size);
1420 }
1421 sysfs_free(info);
1422 } else
1423 dprintf("Error: set_array_size(): info pointer in NULL\n");
1424
1425 return ret_val;
1426 }
1427
1428 static int reshape_array(char *container, int fd, char *devname,
1429 struct supertype *st, struct mdinfo *info,
1430 int force, struct mddev_dev *devlist,
1431 char *backup_file, int verbose, int forked,
1432 int restart, int freeze_reshape);
1433 static int reshape_container(char *container, char *devname,
1434 int mdfd,
1435 struct supertype *st,
1436 struct mdinfo *info,
1437 int force,
1438 char *backup_file,
1439 int verbose, int restart, int freeze_reshape);
1440
1441 int Grow_reshape(char *devname, int fd,
1442 struct mddev_dev *devlist,
1443 unsigned long long data_offset,
1444 struct context *c, struct shape *s)
1445 {
1446 /* Make some changes in the shape of an array.
1447 * The kernel must support the change.
1448 *
1449 * There are three different changes. Each can trigger
1450 * a resync or recovery so we freeze that until we have
1451 * requested everything (if kernel supports freezing - 2.6.30).
1452 * The steps are:
1453 * - change size (i.e. component_size)
1454 * - change level
1455 * - change layout/chunksize/ndisks
1456 *
1457 * The last can require a reshape. It is different on different
1458 * levels so we need to check the level before actioning it.
1459 * Some times the level change needs to be requested after the
1460 * reshape (e.g. raid6->raid5, raid5->raid0)
1461 *
1462 */
1463 struct mdu_array_info_s array;
1464 int rv = 0;
1465 struct supertype *st;
1466 char *subarray = NULL;
1467
1468 int frozen;
1469 int changed = 0;
1470 char *container = NULL;
1471 char container_buf[20];
1472 int cfd = -1;
1473
1474 struct mddev_dev *dv;
1475 int added_disks;
1476
1477 struct mdinfo info;
1478 struct mdinfo *sra;
1479
1480 if (data_offset != INVALID_SECTORS) {
1481 fprintf(stderr, Name ": --grow --data-offset not yet supported\n");
1482 return 1;
1483 }
1484
1485 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1486 pr_err("%s is not an active md array - aborting\n",
1487 devname);
1488 return 1;
1489 }
1490
1491 if (s->size > 0 &&
1492 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1493 pr_err("cannot change component size at the same time "
1494 "as other changes.\n"
1495 " Change size first, then check data is intact before "
1496 "making other changes.\n");
1497 return 1;
1498 }
1499
1500 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1501 get_linux_version() < 2006032 &&
1502 !check_env("MDADM_FORCE_FEWER")) {
1503 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1504 " Please use a newer kernel\n");
1505 return 1;
1506 }
1507
1508 st = super_by_fd(fd, &subarray);
1509 if (!st) {
1510 pr_err("Unable to determine metadata format for %s\n", devname);
1511 return 1;
1512 }
1513 if (s->raiddisks > st->max_devs) {
1514 pr_err("Cannot increase raid-disks on this array"
1515 " beyond %d\n", st->max_devs);
1516 return 1;
1517 }
1518
1519 /* in the external case we need to check that the requested reshape is
1520 * supported, and perform an initial check that the container holds the
1521 * pre-requisite spare devices (mdmon owns final validation)
1522 */
1523 if (st->ss->external) {
1524 int container_dev;
1525 int rv;
1526
1527 if (subarray) {
1528 container_dev = st->container_dev;
1529 cfd = open_dev_excl(st->container_dev);
1530 } else {
1531 container_dev = st->devnum;
1532 close(fd);
1533 cfd = open_dev_excl(st->devnum);
1534 fd = cfd;
1535 }
1536 if (cfd < 0) {
1537 pr_err("Unable to open container for %s\n",
1538 devname);
1539 free(subarray);
1540 return 1;
1541 }
1542
1543 fmt_devname(container_buf, container_dev);
1544 container = container_buf;
1545
1546 rv = st->ss->load_container(st, cfd, NULL);
1547
1548 if (rv) {
1549 pr_err("Cannot read superblock for %s\n",
1550 devname);
1551 free(subarray);
1552 return 1;
1553 }
1554
1555 /* check if operation is supported for metadata handler */
1556 if (st->ss->container_content) {
1557 struct mdinfo *cc = NULL;
1558 struct mdinfo *content = NULL;
1559
1560 cc = st->ss->container_content(st, subarray);
1561 for (content = cc; content ; content = content->next) {
1562 int allow_reshape = 1;
1563
1564 /* check if reshape is allowed based on metadata
1565 * indications stored in content.array.status
1566 */
1567 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1568 allow_reshape = 0;
1569 if (content->array.state
1570 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1571 allow_reshape = 0;
1572 if (!allow_reshape) {
1573 pr_err("cannot reshape arrays in"
1574 " container with unsupported"
1575 " metadata: %s(%s)\n",
1576 devname, container_buf);
1577 sysfs_free(cc);
1578 free(subarray);
1579 return 1;
1580 }
1581 }
1582 sysfs_free(cc);
1583 }
1584 if (mdmon_running(container_dev))
1585 st->update_tail = &st->updates;
1586 }
1587
1588 added_disks = 0;
1589 for (dv = devlist; dv; dv = dv->next)
1590 added_disks++;
1591 if (s->raiddisks > array.raid_disks &&
1592 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1593 !c->force) {
1594 pr_err("Need %d spare%s to avoid degraded array,"
1595 " and only have %d.\n"
1596 " Use --force to over-ride this check.\n",
1597 s->raiddisks - array.raid_disks,
1598 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1599 array.spare_disks + added_disks);
1600 return 1;
1601 }
1602
1603 sra = sysfs_read(fd, 0, GET_LEVEL | GET_DISKS | GET_DEVS
1604 | GET_STATE | GET_VERSION);
1605 if (sra) {
1606 if (st->ss->external && subarray == NULL) {
1607 array.level = LEVEL_CONTAINER;
1608 sra->array.level = LEVEL_CONTAINER;
1609 }
1610 } else {
1611 pr_err("failed to read sysfs parameters for %s\n",
1612 devname);
1613 return 1;
1614 }
1615 frozen = freeze(st);
1616 if (frozen < -1) {
1617 /* freeze() already spewed the reason */
1618 sysfs_free(sra);
1619 return 1;
1620 } else if (frozen < 0) {
1621 pr_err("%s is performing resync/recovery and cannot"
1622 " be reshaped\n", devname);
1623 sysfs_free(sra);
1624 return 1;
1625 }
1626
1627 /* ========= set size =============== */
1628 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1629 unsigned long long orig_size = get_component_size(fd)/2;
1630 unsigned long long min_csize;
1631 struct mdinfo *mdi;
1632 int raid0_takeover = 0;
1633
1634 if (orig_size == 0)
1635 orig_size = (unsigned) array.size;
1636
1637 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1638 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1639 rv = 1;
1640 goto release;
1641 }
1642 sync_metadata(st);
1643 if (st->ss->external) {
1644 /* metadata can have size limitation
1645 * update size value according to metadata information
1646 */
1647 struct mdinfo *sizeinfo =
1648 st->ss->container_content(st, subarray);
1649 if (sizeinfo) {
1650 unsigned long long new_size =
1651 sizeinfo->custom_array_size/2;
1652 int data_disks = get_data_disks(
1653 sizeinfo->array.level,
1654 sizeinfo->array.layout,
1655 sizeinfo->array.raid_disks);
1656 new_size /= data_disks;
1657 dprintf("Metadata size correction from %llu to "
1658 "%llu (%llu)\n", orig_size, new_size,
1659 new_size * data_disks);
1660 s->size = new_size;
1661 sysfs_free(sizeinfo);
1662 }
1663 }
1664
1665 /* Update the size of each member device in case
1666 * they have been resized. This will never reduce
1667 * below the current used-size. The "size" attribute
1668 * understands '0' to mean 'max'.
1669 */
1670 min_csize = 0;
1671 rv = 0;
1672 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1673 if (sysfs_set_num(sra, mdi, "size",
1674 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1675 /* Probably kernel refusing to let us
1676 * reduce the size - not an error.
1677 */
1678 break;
1679 }
1680 if (array.not_persistent == 0 &&
1681 array.major_version == 0 &&
1682 get_linux_version() < 3001000) {
1683 /* Dangerous to allow size to exceed 2TB */
1684 unsigned long long csize;
1685 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1686 if (csize >= 2ULL*1024*1024*1024)
1687 csize = 2ULL*1024*1024*1024;
1688 if ((min_csize == 0 || (min_csize
1689 > csize)))
1690 min_csize = csize;
1691 }
1692 }
1693 }
1694 if (rv) {
1695 pr_err("Cannot set size on "
1696 "array members.\n");
1697 goto size_change_error;
1698 }
1699 if (min_csize && s->size > min_csize) {
1700 pr_err("Cannot safely make this array "
1701 "use more than 2TB per device on this kernel.\n");
1702 rv = 1;
1703 goto size_change_error;
1704 }
1705 if (min_csize && s->size == MAX_SIZE) {
1706 /* Don't let the kernel choose a size - it will get
1707 * it wrong
1708 */
1709 pr_err("Limited v0.90 array to "
1710 "2TB per device\n");
1711 s->size = min_csize;
1712 }
1713 if (st->ss->external) {
1714 if (sra->array.level == 0) {
1715 rv = sysfs_set_str(sra, NULL, "level",
1716 "raid5");
1717 if (!rv) {
1718 raid0_takeover = 1;
1719 /* get array parametes after takeover
1720 * to chane one parameter at time only
1721 */
1722 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1723 }
1724 }
1725 /* make sure mdmon is
1726 * aware of the new level */
1727 if (!mdmon_running(st->container_dev))
1728 start_mdmon(st->container_dev);
1729 ping_monitor(container);
1730 if (mdmon_running(st->container_dev) &&
1731 st->update_tail == NULL)
1732 st->update_tail = &st->updates;
1733 }
1734
1735 if (s->size == MAX_SIZE)
1736 s->size = 0;
1737 array.size = s->size;
1738 if ((unsigned)array.size != s->size) {
1739 /* got truncated to 32bit, write to
1740 * component_size instead
1741 */
1742 if (sra)
1743 rv = sysfs_set_num(sra, NULL,
1744 "component_size", s->size);
1745 else
1746 rv = -1;
1747 } else {
1748 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1749
1750 /* manage array size when it is managed externally
1751 */
1752 if ((rv == 0) && st->ss->external)
1753 rv = set_array_size(st, sra, sra->text_version);
1754 }
1755
1756 if (raid0_takeover) {
1757 /* do not recync non-existing parity,
1758 * we will drop it anyway
1759 */
1760 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1761 /* go back to raid0, drop parity disk
1762 */
1763 sysfs_set_str(sra, NULL, "level", "raid0");
1764 ioctl(fd, GET_ARRAY_INFO, &array);
1765 }
1766
1767 size_change_error:
1768 if (rv != 0) {
1769 int err = errno;
1770
1771 /* restore metadata */
1772 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1773 UnSet, NULL, devname,
1774 ROLLBACK_METADATA_CHANGES,
1775 c->verbose) == 0)
1776 sync_metadata(st);
1777 pr_err("Cannot set device size for %s: %s\n",
1778 devname, strerror(err));
1779 if (err == EBUSY &&
1780 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1781 cont_err("Bitmap must be removed before size can be changed\n");
1782 rv = 1;
1783 goto release;
1784 }
1785 if (s->assume_clean) {
1786 /* This will fail on kernels newer than 3.0 unless
1787 * a backport has been arranged.
1788 */
1789 if (sra == NULL ||
1790 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1791 pr_err("--assume-clean not support with --grow on this kernel\n");
1792 }
1793 ioctl(fd, GET_ARRAY_INFO, &array);
1794 s->size = get_component_size(fd)/2;
1795 if (s->size == 0)
1796 s->size = array.size;
1797 if (c->verbose >= 0) {
1798 if (s->size == orig_size)
1799 pr_err("component size of %s "
1800 "unchanged at %lluK\n",
1801 devname, s->size);
1802 else
1803 pr_err("component size of %s "
1804 "has been set to %lluK\n",
1805 devname, s->size);
1806 }
1807 changed = 1;
1808 } else if (array.level != LEVEL_CONTAINER) {
1809 s->size = get_component_size(fd)/2;
1810 if (s->size == 0)
1811 s->size = array.size;
1812 }
1813
1814 /* See if there is anything else to do */
1815 if ((s->level == UnSet || s->level == array.level) &&
1816 (s->layout_str == NULL) &&
1817 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1818 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1819 /* Nothing more to do */
1820 if (!changed && c->verbose >= 0)
1821 pr_err("%s: no change requested\n",
1822 devname);
1823 goto release;
1824 }
1825
1826 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1827 * current implementation assumes that following conditions must be met:
1828 * - RAID10:
1829 * - far_copies == 1
1830 * - near_copies == 2
1831 */
1832 if ((s->level == 0 && array.level == 10 && sra &&
1833 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1834 (s->level == 0 && array.level == 1 && sra)) {
1835 int err;
1836 err = remove_disks_for_takeover(st, sra, array.layout);
1837 if (err) {
1838 dprintf(Name": Array cannot be reshaped\n");
1839 if (cfd > -1)
1840 close(cfd);
1841 rv = 1;
1842 goto release;
1843 }
1844 /* Make sure mdmon has seen the device removal
1845 * and updated metadata before we continue with
1846 * level change
1847 */
1848 if (container)
1849 ping_monitor(container);
1850 }
1851
1852 memset(&info, 0, sizeof(info));
1853 info.array = array;
1854 sysfs_init(&info, fd, NoMdDev);
1855 strcpy(info.text_version, sra->text_version);
1856 info.component_size = s->size*2;
1857 info.new_level = s->level;
1858 info.new_chunk = s->chunk * 1024;
1859 if (info.array.level == LEVEL_CONTAINER) {
1860 info.delta_disks = UnSet;
1861 info.array.raid_disks = s->raiddisks;
1862 } else if (s->raiddisks)
1863 info.delta_disks = s->raiddisks - info.array.raid_disks;
1864 else
1865 info.delta_disks = UnSet;
1866 if (s->layout_str == NULL) {
1867 info.new_layout = UnSet;
1868 if (info.array.level == 6 &&
1869 (info.new_level == 6 || info.new_level == UnSet) &&
1870 info.array.layout >= 16) {
1871 pr_err("%s has a non-standard layout. If you"
1872 " wish to preserve this\n", devname);
1873 cont_err("during the reshape, please specify"
1874 " --layout=preserve\n");
1875 cont_err("If you want to change it, specify a"
1876 " layout or use --layout=normalise\n");
1877 rv = 1;
1878 goto release;
1879 }
1880 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1881 strcmp(s->layout_str, "normalize") == 0) {
1882 /* If we have a -6 RAID6 layout, remove the '-6'. */
1883 info.new_layout = UnSet;
1884 if (info.array.level == 6 && info.new_level == UnSet) {
1885 char l[40], *h;
1886 strcpy(l, map_num(r6layout, info.array.layout));
1887 h = strrchr(l, '-');
1888 if (h && strcmp(h, "-6") == 0) {
1889 *h = 0;
1890 info.new_layout = map_name(r6layout, l);
1891 }
1892 } else {
1893 pr_err("%s is only meaningful when reshaping"
1894 " a RAID6 array.\n", s->layout_str);
1895 rv = 1;
1896 goto release;
1897 }
1898 } else if (strcmp(s->layout_str, "preserve") == 0) {
1899 /* This means that a non-standard RAID6 layout
1900 * is OK.
1901 * In particular:
1902 * - When reshape a RAID6 (e.g. adding a device)
1903 * which is in a non-standard layout, it is OK
1904 * to preserve that layout.
1905 * - When converting a RAID5 to RAID6, leave it in
1906 * the XXX-6 layout, don't re-layout.
1907 */
1908 if (info.array.level == 6 && info.new_level == UnSet)
1909 info.new_layout = info.array.layout;
1910 else if (info.array.level == 5 && info.new_level == 6) {
1911 char l[40];
1912 strcpy(l, map_num(r5layout, info.array.layout));
1913 strcat(l, "-6");
1914 info.new_layout = map_name(r6layout, l);
1915 } else {
1916 pr_err("%s in only meaningful when reshaping"
1917 " to RAID6\n", s->layout_str);
1918 rv = 1;
1919 goto release;
1920 }
1921 } else {
1922 int l = info.new_level;
1923 if (l == UnSet)
1924 l = info.array.level;
1925 switch (l) {
1926 case 5:
1927 info.new_layout = map_name(r5layout, s->layout_str);
1928 break;
1929 case 6:
1930 info.new_layout = map_name(r6layout, s->layout_str);
1931 break;
1932 case 10:
1933 info.new_layout = parse_layout_10(s->layout_str);
1934 break;
1935 case LEVEL_FAULTY:
1936 info.new_layout = parse_layout_faulty(s->layout_str);
1937 break;
1938 default:
1939 pr_err("layout not meaningful"
1940 " with this level\n");
1941 rv = 1;
1942 goto release;
1943 }
1944 if (info.new_layout == UnSet) {
1945 pr_err("layout %s not understood"
1946 " for this level\n",
1947 s->layout_str);
1948 rv = 1;
1949 goto release;
1950 }
1951 }
1952
1953 if (array.level == LEVEL_FAULTY) {
1954 if (s->level != UnSet && s->level != array.level) {
1955 pr_err("cannot change level of Faulty device\n");
1956 rv =1 ;
1957 }
1958 if (s->chunk) {
1959 pr_err("cannot set chunksize of Faulty device\n");
1960 rv =1 ;
1961 }
1962 if (s->raiddisks && s->raiddisks != 1) {
1963 pr_err("cannot set raid_disks of Faulty device\n");
1964 rv =1 ;
1965 }
1966 if (s->layout_str) {
1967 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
1968 dprintf("Cannot get array information.\n");
1969 goto release;
1970 }
1971 array.layout = info.new_layout;
1972 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
1973 pr_err("failed to set new layout\n");
1974 rv = 1;
1975 } else if (c->verbose >= 0)
1976 printf("layout for %s set to %d\n",
1977 devname, array.layout);
1978 }
1979 } else if (array.level == LEVEL_CONTAINER) {
1980 /* This change is to be applied to every array in the
1981 * container. This is only needed when the metadata imposes
1982 * restraints of the various arrays in the container.
1983 * Currently we only know that IMSM requires all arrays
1984 * to have the same number of devices so changing the
1985 * number of devices (On-Line Capacity Expansion) must be
1986 * performed at the level of the container
1987 */
1988 rv = reshape_container(container, devname, -1, st, &info,
1989 c->force, c->backup_file, c->verbose, 0, 0);
1990 frozen = 0;
1991 } else {
1992 /* get spare devices from external metadata
1993 */
1994 if (st->ss->external) {
1995 struct mdinfo *info2;
1996
1997 info2 = st->ss->container_content(st, subarray);
1998 if (info2) {
1999 info.array.spare_disks =
2000 info2->array.spare_disks;
2001 sysfs_free(info2);
2002 }
2003 }
2004
2005 /* Impose these changes on a single array. First
2006 * check that the metadata is OK with the change. */
2007
2008 if (reshape_super(st, 0, info.new_level,
2009 info.new_layout, info.new_chunk,
2010 info.array.raid_disks, info.delta_disks,
2011 c->backup_file, devname, APPLY_METADATA_CHANGES,
2012 c->verbose)) {
2013 rv = 1;
2014 goto release;
2015 }
2016 sync_metadata(st);
2017 rv = reshape_array(container, fd, devname, st, &info, c->force,
2018 devlist, c->backup_file, c->verbose, 0, 0, 0);
2019 frozen = 0;
2020 }
2021 release:
2022 sysfs_free(sra);
2023 if (frozen > 0)
2024 unfreeze(st);
2025 return rv;
2026 }
2027
2028 /* verify_reshape_position()
2029 * Function checks if reshape position in metadata is not farther
2030 * than position in md.
2031 * Return value:
2032 * 0 : not valid sysfs entry
2033 * it can be caused by not started reshape, it should be started
2034 * by reshape array or raid0 array is before takeover
2035 * -1 : error, reshape position is obviously wrong
2036 * 1 : success, reshape progress correct or updated
2037 */
2038 static int verify_reshape_position(struct mdinfo *info, int level)
2039 {
2040 int ret_val = 0;
2041 char buf[40];
2042 int rv;
2043
2044 /* read sync_max, failure can mean raid0 array */
2045 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2046
2047 if (rv > 0) {
2048 char *ep;
2049 unsigned long long position = strtoull(buf, &ep, 0);
2050
2051 dprintf(Name": Read sync_max sysfs entry is: %s\n", buf);
2052 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2053 position *= get_data_disks(level,
2054 info->new_layout,
2055 info->array.raid_disks);
2056 if (info->reshape_progress < position) {
2057 dprintf("Corrected reshape progress (%llu) to "
2058 "md position (%llu)\n",
2059 info->reshape_progress, position);
2060 info->reshape_progress = position;
2061 ret_val = 1;
2062 } else if (info->reshape_progress > position) {
2063 pr_err("Fatal error: array "
2064 "reshape was not properly frozen "
2065 "(expected reshape position is %llu, "
2066 "but reshape progress is %llu.\n",
2067 position, info->reshape_progress);
2068 ret_val = -1;
2069 } else {
2070 dprintf("Reshape position in md and metadata "
2071 "are the same;");
2072 ret_val = 1;
2073 }
2074 }
2075 } else if (rv == 0) {
2076 /* for valid sysfs entry, 0-length content
2077 * should be indicated as error
2078 */
2079 ret_val = -1;
2080 }
2081
2082 return ret_val;
2083 }
2084
2085 static int reshape_array(char *container, int fd, char *devname,
2086 struct supertype *st, struct mdinfo *info,
2087 int force, struct mddev_dev *devlist,
2088 char *backup_file, int verbose, int forked,
2089 int restart, int freeze_reshape)
2090 {
2091 struct reshape reshape;
2092 int spares_needed;
2093 char *msg;
2094 int orig_level = UnSet;
2095 int disks, odisks;
2096 int delayed;
2097
2098 struct mdu_array_info_s array;
2099 char *c;
2100
2101 struct mddev_dev *dv;
2102 int added_disks;
2103
2104 int *fdlist = NULL;
2105 unsigned long long *offsets = NULL;
2106 int d;
2107 int nrdisks;
2108 int err;
2109 unsigned long blocks;
2110 unsigned long cache;
2111 unsigned long long array_size;
2112 int done;
2113 struct mdinfo *sra = NULL;
2114
2115 /* when reshaping a RAID0, the component_size might be zero.
2116 * So try to fix that up.
2117 */
2118 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2119 dprintf("Cannot get array information.\n");
2120 goto release;
2121 }
2122 if (array.level == 0 && info->component_size == 0) {
2123 get_dev_size(fd, NULL, &array_size);
2124 info->component_size = array_size / array.raid_disks;
2125 }
2126
2127 if (info->reshape_active) {
2128 int new_level = info->new_level;
2129 info->new_level = UnSet;
2130 if (info->delta_disks > 0)
2131 info->array.raid_disks -= info->delta_disks;
2132 msg = analyse_change(info, &reshape);
2133 info->new_level = new_level;
2134 if (info->delta_disks > 0)
2135 info->array.raid_disks += info->delta_disks;
2136 if (!restart)
2137 /* Make sure the array isn't read-only */
2138 ioctl(fd, RESTART_ARRAY_RW, 0);
2139 } else
2140 msg = analyse_change(info, &reshape);
2141 if (msg) {
2142 pr_err("%s\n", msg);
2143 goto release;
2144 }
2145 if (restart &&
2146 (reshape.level != info->array.level ||
2147 reshape.before.layout != info->array.layout ||
2148 reshape.before.data_disks + reshape.parity
2149 != info->array.raid_disks - max(0, info->delta_disks))) {
2150 pr_err("reshape info is not in native format -"
2151 " cannot continue.\n");
2152 goto release;
2153 }
2154
2155 if (st->ss->external && restart && (info->reshape_progress == 0)) {
2156 /* When reshape is restarted from '0', very begin of array
2157 * it is possible that for external metadata reshape and array
2158 * configuration doesn't happen.
2159 * Check if md has the same opinion, and reshape is restarted
2160 * from 0. If so, this is regular reshape start after reshape
2161 * switch in metadata to next array only.
2162 */
2163 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2164 (info->reshape_progress == 0))
2165 restart = 0;
2166 }
2167 if (restart) {
2168 /* reshape already started. just skip to monitoring the reshape */
2169 if (reshape.backup_blocks == 0)
2170 return 0;
2171 goto started;
2172 }
2173 /* The container is frozen but the array may not be.
2174 * So freeze the array so spares don't get put to the wrong use
2175 * FIXME there should probably be a cleaner separation between
2176 * freeze_array and freeze_container.
2177 */
2178 sysfs_freeze_array(info);
2179 /* Check we have enough spares to not be degraded */
2180 added_disks = 0;
2181 for (dv = devlist; dv ; dv=dv->next)
2182 added_disks++;
2183 spares_needed = max(reshape.before.data_disks,
2184 reshape.after.data_disks)
2185 + reshape.parity - array.raid_disks;
2186
2187 if (!force &&
2188 info->new_level > 1 && info->array.level > 1 &&
2189 spares_needed > info->array.spare_disks + added_disks) {
2190 pr_err("Need %d spare%s to avoid degraded array,"
2191 " and only have %d.\n"
2192 " Use --force to over-ride this check.\n",
2193 spares_needed,
2194 spares_needed == 1 ? "" : "s",
2195 info->array.spare_disks + added_disks);
2196 goto release;
2197 }
2198 /* Check we have enough spares to not fail */
2199 spares_needed = max(reshape.before.data_disks,
2200 reshape.after.data_disks)
2201 - array.raid_disks;
2202 if ((info->new_level > 1 || info->new_level == 0) &&
2203 spares_needed > info->array.spare_disks +added_disks) {
2204 pr_err("Need %d spare%s to create working array,"
2205 " and only have %d.\n",
2206 spares_needed,
2207 spares_needed == 1 ? "" : "s",
2208 info->array.spare_disks + added_disks);
2209 goto release;
2210 }
2211
2212 if (reshape.level != array.level) {
2213 char *c = map_num(pers, reshape.level);
2214 int err;
2215 if (c == NULL)
2216 goto release;
2217
2218 err = sysfs_set_str(info, NULL, "level", c);
2219 if (err) {
2220 err = errno;
2221 pr_err("%s: could not set level to %s\n",
2222 devname, c);
2223 if (err == EBUSY &&
2224 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2225 cont_err("Bitmap must be removed"
2226 " before level can be changed\n");
2227 goto release;
2228 }
2229 if (verbose >= 0)
2230 pr_err("level of %s changed to %s\n",
2231 devname, c);
2232 orig_level = array.level;
2233 sysfs_freeze_array(info);
2234
2235 if (reshape.level > 0 && st->ss->external) {
2236 /* make sure mdmon is aware of the new level */
2237 if (mdmon_running(st->container_dev))
2238 flush_mdmon(container);
2239
2240 if (!mdmon_running(st->container_dev))
2241 start_mdmon(st->container_dev);
2242 ping_monitor(container);
2243 if (mdmon_running(st->container_dev) &&
2244 st->update_tail == NULL)
2245 st->update_tail = &st->updates;
2246 }
2247 }
2248 /* ->reshape_super might have chosen some spares from the
2249 * container that it wants to be part of the new array.
2250 * We can collect them with ->container_content and give
2251 * them to the kernel.
2252 */
2253 if (st->ss->reshape_super && st->ss->container_content) {
2254 char *subarray = strchr(info->text_version+1, '/')+1;
2255 struct mdinfo *info2 =
2256 st->ss->container_content(st, subarray);
2257 struct mdinfo *d;
2258
2259 if (info2) {
2260 sysfs_init(info2, fd, st->devnum);
2261 /* When increasing number of devices, we need to set
2262 * new raid_disks before adding these, or they might
2263 * be rejected.
2264 */
2265 if (reshape.backup_blocks &&
2266 reshape.after.data_disks > reshape.before.data_disks)
2267 subarray_set_num(container, info2, "raid_disks",
2268 reshape.after.data_disks +
2269 reshape.parity);
2270 for (d = info2->devs; d; d = d->next) {
2271 if (d->disk.state == 0 &&
2272 d->disk.raid_disk >= 0) {
2273 /* This is a spare that wants to
2274 * be part of the array.
2275 */
2276 add_disk(fd, st, info2, d);
2277 }
2278 }
2279 sysfs_free(info2);
2280 }
2281 }
2282 /* We might have been given some devices to add to the
2283 * array. Now that the array has been changed to the right
2284 * level and frozen, we can safely add them.
2285 */
2286 if (devlist)
2287 Manage_subdevs(devname, fd, devlist, verbose,
2288 0,NULL, 0);
2289
2290 if (reshape.backup_blocks == 0) {
2291 /* No restriping needed, but we might need to impose
2292 * some more changes: layout, raid_disks, chunk_size
2293 */
2294 /* read current array info */
2295 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2296 dprintf("Cannot get array information.\n");
2297 goto release;
2298 }
2299 /* compare current array info with new values and if
2300 * it is different update them to new */
2301 if (info->new_layout != UnSet &&
2302 info->new_layout != array.layout) {
2303 array.layout = info->new_layout;
2304 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2305 pr_err("failed to set new layout\n");
2306 goto release;
2307 } else if (verbose >= 0)
2308 printf("layout for %s set to %d\n",
2309 devname, array.layout);
2310 }
2311 if (info->delta_disks != UnSet &&
2312 info->delta_disks != 0 &&
2313 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
2314 array.raid_disks += info->delta_disks;
2315 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2316 pr_err("failed to set raid disks\n");
2317 goto release;
2318 } else if (verbose >= 0) {
2319 printf("raid_disks for %s set to %d\n",
2320 devname, array.raid_disks);
2321 }
2322 }
2323 if (info->new_chunk != 0 &&
2324 info->new_chunk != array.chunk_size) {
2325 if (sysfs_set_num(info, NULL,
2326 "chunk_size", info->new_chunk) != 0) {
2327 pr_err("failed to set chunk size\n");
2328 goto release;
2329 } else if (verbose >= 0)
2330 printf("chunk size for %s set to %d\n",
2331 devname, array.chunk_size);
2332 }
2333 unfreeze(st);
2334 return 0;
2335 }
2336
2337 /*
2338 * There are three possibilities.
2339 * 1/ The array will shrink.
2340 * We need to ensure the reshape will pause before reaching
2341 * the 'critical section'. We also need to fork and wait for
2342 * that to happen. When it does we
2343 * suspend/backup/complete/unfreeze
2344 *
2345 * 2/ The array will not change size.
2346 * This requires that we keep a backup of a sliding window
2347 * so that we can restore data after a crash. So we need
2348 * to fork and monitor progress.
2349 * In future we will allow the data_offset to change, so
2350 * a sliding backup becomes unnecessary.
2351 *
2352 * 3/ The array will grow. This is relatively easy.
2353 * However the kernel's restripe routines will cheerfully
2354 * overwrite some early data before it is safe. So we
2355 * need to make a backup of the early parts of the array
2356 * and be ready to restore it if rebuild aborts very early.
2357 * For externally managed metadata, we still need a forked
2358 * child to monitor the reshape and suspend IO over the region
2359 * that is being reshaped.
2360 *
2361 * We backup data by writing it to one spare, or to a
2362 * file which was given on command line.
2363 *
2364 * In each case, we first make sure that storage is available
2365 * for the required backup.
2366 * Then we:
2367 * - request the shape change.
2368 * - fork to handle backup etc.
2369 */
2370 started:
2371 /* Check that we can hold all the data */
2372 get_dev_size(fd, NULL, &array_size);
2373 if (reshape.new_size < (array_size/512)) {
2374 pr_err("this change will reduce the size of the array.\n"
2375 " use --grow --array-size first to truncate array.\n"
2376 " e.g. mdadm --grow %s --array-size %llu\n",
2377 devname, reshape.new_size/2);
2378 goto release;
2379 }
2380
2381 sra = sysfs_read(fd, 0,
2382 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2383 GET_CACHE);
2384 if (!sra) {
2385 pr_err("%s: Cannot get array details from sysfs\n",
2386 devname);
2387 goto release;
2388 }
2389
2390 /* Decide how many blocks (sectors) for a reshape
2391 * unit. The number we have so far is just a minimum
2392 */
2393 blocks = reshape.backup_blocks;
2394 if (reshape.before.data_disks ==
2395 reshape.after.data_disks) {
2396 /* Make 'blocks' bigger for better throughput, but
2397 * not so big that we reject it below.
2398 * Try for 16 megabytes
2399 */
2400 while (blocks * 32 < sra->component_size &&
2401 blocks < 16*1024*2)
2402 blocks *= 2;
2403 } else
2404 pr_err("Need to backup %luK of critical "
2405 "section..\n", blocks/2);
2406
2407 if (blocks >= sra->component_size/2) {
2408 pr_err("%s: Something wrong"
2409 " - reshape aborted\n",
2410 devname);
2411 goto release;
2412 }
2413
2414 /* Now we need to open all these devices so we can read/write.
2415 */
2416 nrdisks = max(reshape.before.data_disks,
2417 reshape.after.data_disks) + reshape.parity
2418 + sra->array.spare_disks;
2419 fdlist = xcalloc((1+nrdisks), sizeof(int));
2420 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
2421
2422 odisks = reshape.before.data_disks + reshape.parity;
2423 d = reshape_prepare_fdlist(devname, sra, odisks,
2424 nrdisks, blocks, backup_file,
2425 fdlist, offsets);
2426 if (d < 0) {
2427 goto release;
2428 }
2429 if ((st->ss->manage_reshape == NULL) ||
2430 (st->ss->recover_backup == NULL)) {
2431 if (backup_file == NULL) {
2432 if (reshape.after.data_disks <=
2433 reshape.before.data_disks) {
2434 pr_err("%s: Cannot grow - "
2435 "need backup-file\n", devname);
2436 goto release;
2437 } else if (sra->array.spare_disks == 0) {
2438 pr_err("%s: Cannot grow - "
2439 "need a spare or backup-file to backup "
2440 "critical section\n", devname);
2441 goto release;
2442 }
2443 } else {
2444 if (!reshape_open_backup_file(backup_file, fd, devname,
2445 (signed)blocks,
2446 fdlist+d, offsets+d,
2447 restart)) {
2448 goto release;
2449 }
2450 d++;
2451 }
2452 }
2453
2454 /* lastly, check that the internal stripe cache is
2455 * large enough, or it won't work.
2456 * It must hold at least 4 stripes of the larger
2457 * chunk size
2458 */
2459 cache = max(info->array.chunk_size, info->new_chunk);
2460 cache *= 4; /* 4 stripes minimum */
2461 cache /= 512; /* convert to sectors */
2462 disks = min(reshape.before.data_disks, reshape.after.data_disks);
2463 /* make sure there is room for 'blocks' with a bit to spare */
2464 if (cache < 16 + blocks / disks)
2465 cache = 16 + blocks / disks;
2466 cache /= (4096/512); /* Covert from sectors to pages */
2467
2468 if (sra->cache_size < cache)
2469 subarray_set_num(container, sra, "stripe_cache_size",
2470 cache+1);
2471
2472 /* Right, everything seems fine. Let's kick things off.
2473 * If only changing raid_disks, use ioctl, else use
2474 * sysfs.
2475 */
2476 sync_metadata(st);
2477
2478 sra->new_chunk = info->new_chunk;
2479
2480 if (restart) {
2481 /* for external metadata checkpoint saved by mdmon can be lost
2482 * or missed /due to e.g. crash/. Check if md is not during
2483 * restart farther than metadata points to.
2484 * If so, this means metadata information is obsolete.
2485 */
2486 if (st->ss->external)
2487 verify_reshape_position(info, reshape.level);
2488 sra->reshape_progress = info->reshape_progress;
2489 } else {
2490 sra->reshape_progress = 0;
2491 if (reshape.after.data_disks < reshape.before.data_disks)
2492 /* start from the end of the new array */
2493 sra->reshape_progress = (sra->component_size
2494 * reshape.after.data_disks);
2495 }
2496
2497 if (info->array.chunk_size == info->new_chunk &&
2498 reshape.before.layout == reshape.after.layout &&
2499 st->ss->external == 0) {
2500 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2501 ioctl(fd, GET_ARRAY_INFO, &array);
2502 array.raid_disks = reshape.after.data_disks + reshape.parity;
2503 if (!restart &&
2504 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2505 int err = errno;
2506
2507 pr_err("Cannot set device shape for %s: %s\n",
2508 devname, strerror(errno));
2509
2510 if (err == EBUSY &&
2511 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2512 cont_err("Bitmap must be removed before"
2513 " shape can be changed\n");
2514
2515 goto release;
2516 }
2517 } else if (!restart) {
2518 /* set them all just in case some old 'new_*' value
2519 * persists from some earlier problem.
2520 */
2521 int err = 0;
2522 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2523 err = errno;
2524 if (!err && sysfs_set_num(sra, NULL, "layout",
2525 reshape.after.layout) < 0)
2526 err = errno;
2527 if (!err && subarray_set_num(container, sra, "raid_disks",
2528 reshape.after.data_disks +
2529 reshape.parity) < 0)
2530 err = errno;
2531 if (err) {
2532 pr_err("Cannot set device shape for %s\n",
2533 devname);
2534
2535 if (err == EBUSY &&
2536 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2537 cont_err("Bitmap must be removed before"
2538 " shape can be changed\n");
2539 goto release;
2540 }
2541 }
2542
2543 err = start_reshape(sra, restart, reshape.before.data_disks,
2544 reshape.after.data_disks);
2545 if (err) {
2546 pr_err("Cannot %s reshape for %s\n",
2547 restart ? "continue" : "start",
2548 devname);
2549 goto release;
2550 }
2551 if (restart)
2552 sysfs_set_str(sra, NULL, "array_state", "active");
2553 if (freeze_reshape) {
2554 free(fdlist);
2555 free(offsets);
2556 sysfs_free(sra);
2557 pr_err("Reshape has to be continued from"
2558 " location %llu when root filesystem has been mounted.\n",
2559 sra->reshape_progress);
2560 return 1;
2561 }
2562
2563 /* Now we just need to kick off the reshape and watch, while
2564 * handling backups of the data...
2565 * This is all done by a forked background process.
2566 */
2567 switch(forked ? 0 : fork()) {
2568 case -1:
2569 pr_err("Cannot run child to monitor reshape: %s\n",
2570 strerror(errno));
2571 abort_reshape(sra);
2572 goto release;
2573 default:
2574 free(fdlist);
2575 free(offsets);
2576 sysfs_free(sra);
2577 return 0;
2578 case 0:
2579 map_fork();
2580 break;
2581 }
2582
2583 /* If another array on the same devices is busy, the
2584 * reshape will wait for them. This would mean that
2585 * the first section that we suspend will stay suspended
2586 * for a long time. So check on that possibility
2587 * by looking for "DELAYED" in /proc/mdstat, and if found,
2588 * wait a while
2589 */
2590 do {
2591 struct mdstat_ent *mds, *m;
2592 delayed = 0;
2593 mds = mdstat_read(0, 0);
2594 for (m = mds; m; m = mds->next)
2595 if (m->devnum == devname2devnum(sra->sys_name)) {
2596 if (m->resync &&
2597 m->percent == RESYNC_DELAYED)
2598 delayed = 1;
2599 if (m->resync == 0)
2600 /* Haven't started the reshape thread
2601 * yet, wait a bit
2602 */
2603 delayed = 2;
2604 break;
2605 }
2606 free_mdstat(mds);
2607 if (delayed == 1 && get_linux_version() < 3007000) {
2608 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
2609 " You might experience problems until other reshapes complete.\n");
2610 delayed = 0;
2611 }
2612 if (delayed)
2613 sleep(30 - (delayed-1) * 25);
2614 } while (delayed);
2615
2616 close(fd);
2617 if (check_env("MDADM_GROW_VERIFY"))
2618 fd = open(devname, O_RDONLY | O_DIRECT);
2619 else
2620 fd = -1;
2621 mlockall(MCL_FUTURE);
2622
2623 if (st->ss->external) {
2624 /* metadata handler takes it from here */
2625 done = st->ss->manage_reshape(
2626 fd, sra, &reshape, st, blocks,
2627 fdlist, offsets,
2628 d - odisks, fdlist+odisks,
2629 offsets+odisks);
2630 } else
2631 done = child_monitor(
2632 fd, sra, &reshape, st, blocks,
2633 fdlist, offsets,
2634 d - odisks, fdlist+odisks,
2635 offsets+odisks);
2636
2637 free(fdlist);
2638 free(offsets);
2639
2640 if (backup_file && done)
2641 unlink(backup_file);
2642 if (!done) {
2643 abort_reshape(sra);
2644 goto out;
2645 }
2646
2647 if (!st->ss->external &&
2648 !(reshape.before.data_disks != reshape.after.data_disks
2649 && info->custom_array_size) &&
2650 info->new_level == reshape.level &&
2651 !forked) {
2652 /* no need to wait for the reshape to finish as
2653 * there is nothing more to do.
2654 */
2655 sysfs_free(sra);
2656 exit(0);
2657 }
2658 wait_reshape(sra);
2659
2660 if (st->ss->external) {
2661 /* Re-load the metadata as much could have changed */
2662 int cfd = open_dev(st->container_dev);
2663 if (cfd >= 0) {
2664 flush_mdmon(container);
2665 st->ss->free_super(st);
2666 st->ss->load_container(st, cfd, container);
2667 close(cfd);
2668 }
2669 }
2670
2671 /* set new array size if required customer_array_size is used
2672 * by this metadata.
2673 */
2674 if (reshape.before.data_disks !=
2675 reshape.after.data_disks &&
2676 info->custom_array_size)
2677 set_array_size(st, info, info->text_version);
2678
2679 if (info->new_level != reshape.level) {
2680
2681 c = map_num(pers, info->new_level);
2682 if (c) {
2683 err = sysfs_set_str(sra, NULL, "level", c);
2684 if (err)
2685 pr_err("%s: could not set level "
2686 "to %s\n", devname, c);
2687 }
2688 if (info->new_level == 0)
2689 st->update_tail = NULL;
2690 }
2691 out:
2692 sysfs_free(sra);
2693 if (forked)
2694 return 0;
2695 unfreeze(st);
2696 exit(0);
2697
2698 release:
2699 free(fdlist);
2700 free(offsets);
2701 if (orig_level != UnSet && sra) {
2702 c = map_num(pers, orig_level);
2703 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
2704 pr_err("aborting level change\n");
2705 }
2706 sysfs_free(sra);
2707 if (!forked)
2708 unfreeze(st);
2709 return 1;
2710 }
2711
2712 /* mdfd handle is passed to be closed in child process (after fork).
2713 */
2714 int reshape_container(char *container, char *devname,
2715 int mdfd,
2716 struct supertype *st,
2717 struct mdinfo *info,
2718 int force,
2719 char *backup_file,
2720 int verbose, int restart, int freeze_reshape)
2721 {
2722 struct mdinfo *cc = NULL;
2723 int rv = restart;
2724 int last_devnum = -1;
2725
2726 /* component_size is not meaningful for a container,
2727 * so pass '0' meaning 'no change'
2728 */
2729 if (!restart &&
2730 reshape_super(st, 0, info->new_level,
2731 info->new_layout, info->new_chunk,
2732 info->array.raid_disks, info->delta_disks,
2733 backup_file, devname, APPLY_METADATA_CHANGES,
2734 verbose)) {
2735 unfreeze(st);
2736 return 1;
2737 }
2738
2739 sync_metadata(st);
2740
2741 /* ping monitor to be sure that update is on disk
2742 */
2743 ping_monitor(container);
2744
2745 switch (fork()) {
2746 case -1: /* error */
2747 perror("Cannot fork to complete reshape\n");
2748 unfreeze(st);
2749 return 1;
2750 default: /* parent */
2751 if (!freeze_reshape)
2752 printf(Name ": multi-array reshape continues"
2753 " in background\n");
2754 return 0;
2755 case 0: /* child */
2756 map_fork();
2757 break;
2758 }
2759
2760 /* close unused handle in child process
2761 */
2762 if (mdfd > -1)
2763 close(mdfd);
2764
2765 while(1) {
2766 /* For each member array with reshape_active,
2767 * we need to perform the reshape.
2768 * We pick the first array that needs reshaping and
2769 * reshape it. reshape_array() will re-read the metadata
2770 * so the next time through a different array should be
2771 * ready for reshape.
2772 * It is possible that the 'different' array will not
2773 * be assembled yet. In that case we simple exit.
2774 * When it is assembled, the mdadm which assembles it
2775 * will take over the reshape.
2776 */
2777 struct mdinfo *content;
2778 int fd;
2779 struct mdstat_ent *mdstat;
2780 char *adev;
2781
2782 sysfs_free(cc);
2783
2784 cc = st->ss->container_content(st, NULL);
2785
2786 for (content = cc; content ; content = content->next) {
2787 char *subarray;
2788 if (!content->reshape_active)
2789 continue;
2790
2791 subarray = strchr(content->text_version+1, '/')+1;
2792 mdstat = mdstat_by_subdev(subarray,
2793 devname2devnum(container));
2794 if (!mdstat)
2795 continue;
2796 if (mdstat->active == 0) {
2797 pr_err("Skipping inactive "
2798 "array md%i.\n", mdstat->devnum);
2799 free_mdstat(mdstat);
2800 mdstat = NULL;
2801 continue;
2802 }
2803 break;
2804 }
2805 if (!content)
2806 break;
2807
2808 adev = map_dev(dev2major(mdstat->devnum),
2809 dev2minor(mdstat->devnum),
2810 0);
2811 if (!adev)
2812 adev = content->text_version;
2813
2814 fd = open_dev(mdstat->devnum);
2815 if (fd < 0) {
2816 printf(Name ": Device %s cannot be opened for reshape.",
2817 adev);
2818 break;
2819 }
2820
2821 if (last_devnum == mdstat->devnum) {
2822 /* Do not allow for multiple reshape_array() calls for
2823 * the same array.
2824 * It can happen when reshape_array() returns without
2825 * error, when reshape is not finished (wrong reshape
2826 * starting/continuation conditions). Mdmon doesn't
2827 * switch to next array in container and reentry
2828 * conditions for the same array occur.
2829 * This is possibly interim until the behaviour of
2830 * reshape_array is resolved().
2831 */
2832 printf(Name ": Multiple reshape execution detected for "
2833 "device %s.", adev);
2834 close(fd);
2835 break;
2836 }
2837 last_devnum = mdstat->devnum;
2838
2839 sysfs_init(content, fd, mdstat->devnum);
2840
2841 if (mdmon_running(devname2devnum(container)))
2842 flush_mdmon(container);
2843
2844 rv = reshape_array(container, fd, adev, st,
2845 content, force, NULL,
2846 backup_file, verbose, 1, restart,
2847 freeze_reshape);
2848 close(fd);
2849
2850 if (freeze_reshape) {
2851 sysfs_free(cc);
2852 exit(0);
2853 }
2854
2855 restart = 0;
2856 if (rv)
2857 break;
2858
2859 if (mdmon_running(devname2devnum(container)))
2860 flush_mdmon(container);
2861 }
2862 if (!rv)
2863 unfreeze(st);
2864 sysfs_free(cc);
2865 exit(0);
2866 }
2867
2868 /*
2869 * We run a child process in the background which performs the following
2870 * steps:
2871 * - wait for resync to reach a certain point
2872 * - suspend io to the following section
2873 * - backup that section
2874 * - allow resync to proceed further
2875 * - resume io
2876 * - discard the backup.
2877 *
2878 * When are combined in slightly different ways in the three cases.
2879 * Grow:
2880 * - suspend/backup/allow/wait/resume/discard
2881 * Shrink:
2882 * - allow/wait/suspend/backup/allow/wait/resume/discard
2883 * same-size:
2884 * - wait/resume/discard/suspend/backup/allow
2885 *
2886 * suspend/backup/allow always come together
2887 * wait/resume/discard do too.
2888 * For the same-size case we have two backups to improve flow.
2889 *
2890 */
2891
2892 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
2893 unsigned long long backup_point,
2894 unsigned long long wait_point,
2895 unsigned long long *suspend_point,
2896 unsigned long long *reshape_completed)
2897 {
2898 /* This function is called repeatedly by the reshape manager.
2899 * It determines how much progress can safely be made and allows
2900 * that progress.
2901 * - 'info' identifies the array and particularly records in
2902 * ->reshape_progress the metadata's knowledge of progress
2903 * This is a sector offset from the start of the array
2904 * of the next array block to be relocated. This number
2905 * may increase from 0 or decrease from array_size, depending
2906 * on the type of reshape that is happening.
2907 * Note that in contrast, 'sync_completed' is a block count of the
2908 * reshape so far. It gives the distance between the start point
2909 * (head or tail of device) and the next place that data will be
2910 * written. It always increases.
2911 * - 'reshape' is the structure created by analyse_change
2912 * - 'backup_point' shows how much the metadata manager has backed-up
2913 * data. For reshapes with increasing progress, it is the next address
2914 * to be backed up, previous addresses have been backed-up. For
2915 * decreasing progress, it is the earliest address that has been
2916 * backed up - later address are also backed up.
2917 * So addresses between reshape_progress and backup_point are
2918 * backed up providing those are in the 'correct' order.
2919 * - 'wait_point' is an array address. When reshape_completed
2920 * passes this point, progress_reshape should return. It might
2921 * return earlier if it determines that ->reshape_progress needs
2922 * to be updated or further backup is needed.
2923 * - suspend_point is maintained by progress_reshape and the caller
2924 * should not touch it except to initialise to zero.
2925 * It is an array address and it only increases in 2.6.37 and earlier.
2926 * This makes it difficult to handle reducing reshapes with
2927 * external metadata.
2928 * However: it is similar to backup_point in that it records the
2929 * other end of a suspended region from reshape_progress.
2930 * it is moved to extend the region that is safe to backup and/or
2931 * reshape
2932 * - reshape_completed is read from sysfs and returned. The caller
2933 * should copy this into ->reshape_progress when it has reason to
2934 * believe that the metadata knows this, and any backup outside this
2935 * has been erased.
2936 *
2937 * Return value is:
2938 * 1 if more data from backup_point - but only as far as suspend_point,
2939 * should be backed up
2940 * 0 if things are progressing smoothly
2941 * -1 if the reshape is finished because it is all done,
2942 * -2 if the reshape is finished due to an error.
2943 */
2944
2945 int advancing = (reshape->after.data_disks
2946 >= reshape->before.data_disks);
2947 unsigned long long need_backup; /* All data between start of array and
2948 * here will at some point need to
2949 * be backed up.
2950 */
2951 unsigned long long read_offset, write_offset;
2952 unsigned long long write_range;
2953 unsigned long long max_progress, target, completed;
2954 unsigned long long array_size = (info->component_size
2955 * reshape->before.data_disks);
2956 int fd;
2957 char buf[20];
2958
2959 /* First, we unsuspend any region that is now known to be safe.
2960 * If suspend_point is on the 'wrong' side of reshape_progress, then
2961 * we don't have or need suspension at the moment. This is true for
2962 * native metadata when we don't need to back-up.
2963 */
2964 if (advancing) {
2965 if (info->reshape_progress <= *suspend_point)
2966 sysfs_set_num(info, NULL, "suspend_lo",
2967 info->reshape_progress);
2968 } else {
2969 /* Note: this won't work in 2.6.37 and before.
2970 * Something somewhere should make sure we don't need it!
2971 */
2972 if (info->reshape_progress >= *suspend_point)
2973 sysfs_set_num(info, NULL, "suspend_hi",
2974 info->reshape_progress);
2975 }
2976
2977 /* Now work out how far it is safe to progress.
2978 * If the read_offset for ->reshape_progress is less than
2979 * 'blocks' beyond the write_offset, we can only progress as far
2980 * as a backup.
2981 * Otherwise we can progress until the write_offset for the new location
2982 * reaches (within 'blocks' of) the read_offset at the current location.
2983 * However that region must be suspended unless we are using native
2984 * metadata.
2985 * If we need to suspend more, we limit it to 128M per device, which is
2986 * rather arbitrary and should be some time-based calculation.
2987 */
2988 read_offset = info->reshape_progress / reshape->before.data_disks;
2989 write_offset = info->reshape_progress / reshape->after.data_disks;
2990 write_range = info->new_chunk/512;
2991 if (reshape->before.data_disks == reshape->after.data_disks)
2992 need_backup = array_size;
2993 else
2994 need_backup = reshape->backup_blocks;
2995 if (advancing) {
2996 if (read_offset < write_offset + write_range)
2997 max_progress = backup_point;
2998 else
2999 max_progress =
3000 read_offset *
3001 reshape->after.data_disks;
3002 } else {
3003 if (read_offset > write_offset - write_range)
3004 /* Can only progress as far as has been backed up,
3005 * which must be suspended */
3006 max_progress = backup_point;
3007 else if (info->reshape_progress <= need_backup)
3008 max_progress = backup_point;
3009 else {
3010 if (info->array.major_version >= 0)
3011 /* Can progress until backup is needed */
3012 max_progress = need_backup;
3013 else {
3014 /* Can progress until metadata update is required */
3015 max_progress =
3016 read_offset *
3017 reshape->after.data_disks;
3018 /* but data must be suspended */
3019 if (max_progress < *suspend_point)
3020 max_progress = *suspend_point;
3021 }
3022 }
3023 }
3024
3025 /* We know it is safe to progress to 'max_progress' providing
3026 * it is suspended or we are using native metadata.
3027 * Consider extending suspend_point 128M per device if it
3028 * is less than 64M per device beyond reshape_progress.
3029 * But always do a multiple of 'blocks'
3030 * FIXME this is too big - it takes to long to complete
3031 * this much.
3032 */
3033 target = 64*1024*2 * min(reshape->before.data_disks,
3034 reshape->after.data_disks);
3035 target /= reshape->backup_blocks;
3036 if (target < 2)
3037 target = 2;
3038 target *= reshape->backup_blocks;
3039
3040 /* For externally managed metadata we always need to suspend IO to
3041 * the area being reshaped so we regularly push suspend_point forward.
3042 * For native metadata we only need the suspend if we are going to do
3043 * a backup.
3044 */
3045 if (advancing) {
3046 if ((need_backup > info->reshape_progress
3047 || info->array.major_version < 0) &&
3048 *suspend_point < info->reshape_progress + target) {
3049 if (need_backup < *suspend_point + 2 * target)
3050 *suspend_point = need_backup;
3051 else if (*suspend_point + 2 * target < array_size)
3052 *suspend_point += 2 * target;
3053 else
3054 *suspend_point = array_size;
3055 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3056 if (max_progress > *suspend_point)
3057 max_progress = *suspend_point;
3058 }
3059 } else {
3060 if (info->array.major_version >= 0) {
3061 /* Only need to suspend when about to backup */
3062 if (info->reshape_progress < need_backup * 2 &&
3063 *suspend_point > 0) {
3064 *suspend_point = 0;
3065 sysfs_set_num(info, NULL, "suspend_lo", 0);
3066 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3067 }
3068 } else {
3069 /* Need to suspend continually */
3070 if (info->reshape_progress < *suspend_point)
3071 *suspend_point = info->reshape_progress;
3072 if (*suspend_point + target < info->reshape_progress)
3073 /* No need to move suspend region yet */;
3074 else {
3075 if (*suspend_point >= 2 * target)
3076 *suspend_point -= 2 * target;
3077 else
3078 *suspend_point = 0;
3079 sysfs_set_num(info, NULL, "suspend_lo",
3080 *suspend_point);
3081 }
3082 if (max_progress < *suspend_point)
3083 max_progress = *suspend_point;
3084 }
3085 }
3086
3087 /* now set sync_max to allow that progress. sync_max, like
3088 * sync_completed is a count of sectors written per device, so
3089 * we find the difference between max_progress and the start point,
3090 * and divide that by after.data_disks to get a sync_max
3091 * number.
3092 * At the same time we convert wait_point to a similar number
3093 * for comparing against sync_completed.
3094 */
3095 /* scale down max_progress to per_disk */
3096 max_progress /= reshape->after.data_disks;
3097 /* Round to chunk size as some kernels give an erroneously high number */
3098 max_progress /= info->new_chunk/512;
3099 max_progress *= info->new_chunk/512;
3100 /* And round to old chunk size as the kernel wants that */
3101 max_progress /= info->array.chunk_size/512;
3102 max_progress *= info->array.chunk_size/512;
3103 /* Limit progress to the whole device */
3104 if (max_progress > info->component_size)
3105 max_progress = info->component_size;
3106 wait_point /= reshape->after.data_disks;
3107 if (!advancing) {
3108 /* switch from 'device offset' to 'processed block count' */
3109 max_progress = info->component_size - max_progress;
3110 wait_point = info->component_size - wait_point;
3111 }
3112
3113 sysfs_set_num(info, NULL, "sync_max", max_progress);
3114
3115 /* Now wait. If we have already reached the point that we were
3116 * asked to wait to, don't wait at all, else wait for any change.
3117 * We need to select on 'sync_completed' as that is the place that
3118 * notifications happen, but we are really interested in
3119 * 'reshape_position'
3120 */
3121 fd = sysfs_get_fd(info, NULL, "sync_completed");
3122 if (fd < 0)
3123 goto check_progress;
3124
3125 if (sysfs_fd_get_ll(fd, &completed) < 0)
3126 goto check_progress;
3127
3128 while (completed < max_progress && completed < wait_point) {
3129 /* Check that sync_action is still 'reshape' to avoid
3130 * waiting forever on a dead array
3131 */
3132 char action[20];
3133 fd_set rfds;
3134 if (sysfs_get_str(info, NULL, "sync_action",
3135 action, 20) <= 0 ||
3136 strncmp(action, "reshape", 7) != 0)
3137 break;
3138 /* Some kernels reset 'sync_completed' to zero
3139 * before setting 'sync_action' to 'idle'.
3140 * So we need these extra tests.
3141 */
3142 if (completed == 0 && advancing
3143 && info->reshape_progress > 0)
3144 break;
3145 if (completed == 0 && !advancing
3146 && info->reshape_progress < (info->component_size
3147 * reshape->after.data_disks))
3148 break;
3149 FD_ZERO(&rfds);
3150 FD_SET(fd, &rfds);
3151 select(fd+1, NULL, NULL, &rfds, NULL);
3152 if (sysfs_fd_get_ll(fd, &completed) < 0)
3153 goto check_progress;
3154 }
3155 /* Some kernels reset 'sync_completed' to zero,
3156 * we need to have real point we are in md
3157 */
3158 if (completed == 0)
3159 completed = max_progress;
3160
3161 /* some kernels can give an incorrectly high 'completed' number */
3162 completed /= (info->new_chunk/512);
3163 completed *= (info->new_chunk/512);
3164 /* Convert 'completed' back in to a 'progress' number */
3165 completed *= reshape->after.data_disks;
3166 if (!advancing) {
3167 completed = info->component_size * reshape->after.data_disks
3168 - completed;
3169 }
3170 *reshape_completed = completed;
3171
3172 close(fd);
3173
3174 /* We return the need_backup flag. Caller will decide
3175 * how much - a multiple of ->backup_blocks up to *suspend_point
3176 */
3177 if (advancing)
3178 return need_backup > info->reshape_progress;
3179 else
3180 return need_backup >= info->reshape_progress;
3181
3182 check_progress:
3183 /* if we couldn't read a number from sync_completed, then
3184 * either the reshape did complete, or it aborted.
3185 * We can tell which by checking for 'none' in reshape_position.
3186 * If it did abort, then it might immediately restart if it
3187 * it was just a device failure that leaves us degraded but
3188 * functioning.
3189 */
3190 strcpy(buf, "hi");
3191 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3192 || strncmp(buf, "none", 4) != 0) {
3193 /* The abort might only be temporary. Wait up to 10
3194 * seconds for fd to contain a valid number again.
3195 */
3196 struct timeval tv;
3197 int rv = -2;
3198 tv.tv_sec = 10;
3199 tv.tv_usec = 0;
3200 while (fd >= 0 && rv < 0 && tv.tv_sec > 0) {
3201 fd_set rfds;
3202 FD_ZERO(&rfds);
3203 FD_SET(fd, &rfds);
3204 if (select(fd+1, NULL, NULL, &rfds, &tv) != 1)
3205 break;
3206 switch (sysfs_fd_get_ll(fd, &completed)) {
3207 case 0:
3208 /* all good again */
3209 rv = 1;
3210 break;
3211 case -2: /* read error - abort */
3212 tv.tv_sec = 0;
3213 break;
3214 }
3215 }
3216 if (fd >= 0)
3217 close(fd);
3218 return rv; /* abort */
3219 } else {
3220 /* Maybe racing with array shutdown - check state */
3221 if (fd >= 0)
3222 close(fd);
3223 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3224 || strncmp(buf, "inactive", 8) == 0
3225 || strncmp(buf, "clear",5) == 0)
3226 return -2; /* abort */
3227 return -1; /* complete */
3228 }
3229 }
3230
3231 /* FIXME return status is never checked */
3232 static int grow_backup(struct mdinfo *sra,
3233 unsigned long long offset, /* per device */
3234 unsigned long stripes, /* per device, in old chunks */
3235 int *sources, unsigned long long *offsets,
3236 int disks, int chunk, int level, int layout,
3237 int dests, int *destfd, unsigned long long *destoffsets,
3238 int part, int *degraded,
3239 char *buf)
3240 {
3241 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3242 * to storage 'destfd' (offset 'destoffsets'), after first
3243 * suspending IO. Then allow resync to continue
3244 * over the suspended section.
3245 * Use part 'part' of the backup-super-block.
3246 */
3247 int odata = disks;
3248 int rv = 0;
3249 int i;
3250 unsigned long long ll;
3251 int new_degraded;
3252 //printf("offset %llu\n", offset);
3253 if (level >= 4)
3254 odata--;
3255 if (level == 6)
3256 odata--;
3257
3258 /* Check that array hasn't become degraded, else we might backup the wrong data */
3259 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
3260 return -1; /* FIXME this error is ignored */
3261 new_degraded = (int)ll;
3262 if (new_degraded != *degraded) {
3263 /* check each device to ensure it is still working */
3264 struct mdinfo *sd;
3265 for (sd = sra->devs ; sd ; sd = sd->next) {
3266 if (sd->disk.state & (1<<MD_DISK_FAULTY))
3267 continue;
3268 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
3269 char sbuf[20];
3270 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
3271 strstr(sbuf, "faulty") ||
3272 strstr(sbuf, "in_sync") == NULL) {
3273 /* this device is dead */
3274 sd->disk.state = (1<<MD_DISK_FAULTY);
3275 if (sd->disk.raid_disk >= 0 &&
3276 sources[sd->disk.raid_disk] >= 0) {
3277 close(sources[sd->disk.raid_disk]);
3278 sources[sd->disk.raid_disk] = -1;
3279 }
3280 }
3281 }
3282 }
3283 *degraded = new_degraded;
3284 }
3285 if (part) {
3286 bsb.arraystart2 = __cpu_to_le64(offset * odata);
3287 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
3288 } else {
3289 bsb.arraystart = __cpu_to_le64(offset * odata);
3290 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
3291 }
3292 if (part)
3293 bsb.magic[15] = '2';
3294 for (i = 0; i < dests; i++)
3295 if (part)
3296 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
3297 else
3298 lseek64(destfd[i], destoffsets[i], 0);
3299
3300 rv = save_stripes(sources, offsets,
3301 disks, chunk, level, layout,
3302 dests, destfd,
3303 offset*512*odata, stripes * chunk * odata,
3304 buf);
3305
3306 if (rv)
3307 return rv;
3308 bsb.mtime = __cpu_to_le64(time(0));
3309 for (i = 0; i < dests; i++) {
3310 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3311
3312 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3313 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3314 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3315 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3316
3317 rv = -1;
3318 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
3319 != destoffsets[i] - 4096)
3320 break;
3321 if (write(destfd[i], &bsb, 512) != 512)
3322 break;
3323 if (destoffsets[i] > 4096) {
3324 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
3325 destoffsets[i]+stripes*chunk*odata)
3326 break;
3327 if (write(destfd[i], &bsb, 512) != 512)
3328 break;
3329 }
3330 fsync(destfd[i]);
3331 rv = 0;
3332 }
3333
3334 return rv;
3335 }
3336
3337 /* in 2.6.30, the value reported by sync_completed can be
3338 * less that it should be by one stripe.
3339 * This only happens when reshape hits sync_max and pauses.
3340 * So allow wait_backup to either extent sync_max further
3341 * than strictly necessary, or return before the
3342 * sync has got quite as far as we would really like.
3343 * This is what 'blocks2' is for.
3344 * The various caller give appropriate values so that
3345 * every works.
3346 */
3347 /* FIXME return value is often ignored */
3348 static int forget_backup(int dests, int *destfd,
3349 unsigned long long *destoffsets,
3350 int part)
3351 {
3352 /*
3353 * Erase backup 'part' (which is 0 or 1)
3354 */
3355 int i;
3356 int rv;
3357
3358 if (part) {
3359 bsb.arraystart2 = __cpu_to_le64(0);
3360 bsb.length2 = __cpu_to_le64(0);
3361 } else {
3362 bsb.arraystart = __cpu_to_le64(0);
3363 bsb.length = __cpu_to_le64(0);
3364 }
3365 bsb.mtime = __cpu_to_le64(time(0));
3366 rv = 0;
3367 for (i = 0; i < dests; i++) {
3368 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3369 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3370 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3371 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3372 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3373 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
3374 destoffsets[i]-4096)
3375 rv = -1;
3376 if (rv == 0 &&
3377 write(destfd[i], &bsb, 512) != 512)
3378 rv = -1;
3379 fsync(destfd[i]);
3380 }
3381 return rv;
3382 }
3383
3384 static void fail(char *msg)
3385 {
3386 int rv;
3387 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
3388 rv |= (write(2, "\n", 1) != 1);
3389 exit(rv ? 1 : 2);
3390 }
3391
3392 static char *abuf, *bbuf;
3393 static unsigned long long abuflen;
3394 static void validate(int afd, int bfd, unsigned long long offset)
3395 {
3396 /* check that the data in the backup against the array.
3397 * This is only used for regression testing and should not
3398 * be used while the array is active
3399 */
3400 if (afd < 0)
3401 return;
3402 lseek64(bfd, offset - 4096, 0);
3403 if (read(bfd, &bsb2, 512) != 512)
3404 fail("cannot read bsb");
3405 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
3406 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
3407 fail("first csum bad");
3408 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
3409 fail("magic is bad");
3410 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
3411 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
3412 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
3413 fail("second csum bad");
3414
3415 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
3416 fail("devstart is wrong");
3417
3418 if (bsb2.length) {
3419 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
3420
3421 if (abuflen < len) {
3422 free(abuf);
3423 free(bbuf);
3424 abuflen = len;
3425 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
3426 posix_memalign((void**)&bbuf, 4096, abuflen)) {
3427 abuflen = 0;
3428 /* just stop validating on mem-alloc failure */
3429 return;
3430 }
3431 }
3432
3433 lseek64(bfd, offset, 0);
3434 if ((unsigned long long)read(bfd, bbuf, len) != len) {
3435 //printf("len %llu\n", len);
3436 fail("read first backup failed");
3437 }
3438 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
3439 if ((unsigned long long)read(afd, abuf, len) != len)
3440 fail("read first from array failed");
3441 if (memcmp(bbuf, abuf, len) != 0) {
3442 #if 0
3443 int i;
3444 printf("offset=%llu len=%llu\n",
3445 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
3446 for (i=0; i<len; i++)
3447 if (bbuf[i] != abuf[i]) {
3448 printf("first diff byte %d\n", i);
3449 break;
3450 }
3451 #endif
3452 fail("data1 compare failed");
3453 }
3454 }
3455 if (bsb2.length2) {
3456 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
3457
3458 if (abuflen < len) {
3459 free(abuf);
3460 free(bbuf);
3461 abuflen = len;
3462 abuf = xmalloc(abuflen);
3463 bbuf = xmalloc(abuflen);
3464 }
3465
3466 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
3467 if ((unsigned long long)read(bfd, bbuf, len) != len)
3468 fail("read second backup failed");
3469 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
3470 if ((unsigned long long)read(afd, abuf, len) != len)
3471 fail("read second from array failed");
3472 if (memcmp(bbuf, abuf, len) != 0)
3473 fail("data2 compare failed");
3474 }
3475 }
3476
3477 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
3478 struct supertype *st, unsigned long blocks,
3479 int *fds, unsigned long long *offsets,
3480 int dests, int *destfd, unsigned long long *destoffsets)
3481 {
3482 /* Monitor a reshape where backup is being performed using
3483 * 'native' mechanism - either to a backup file, or
3484 * to some space in a spare.
3485 */
3486 char *buf;
3487 int degraded = -1;
3488 unsigned long long speed;
3489 unsigned long long suspend_point, array_size;
3490 unsigned long long backup_point, wait_point;
3491 unsigned long long reshape_completed;
3492 int done = 0;
3493 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
3494 int part = 0; /* The next part of the backup area to fill. It may already
3495 * be full, so we need to check */
3496 int level = reshape->level;
3497 int layout = reshape->before.layout;
3498 int data = reshape->before.data_disks;
3499 int disks = reshape->before.data_disks + reshape->parity;
3500 int chunk = sra->array.chunk_size;
3501 struct mdinfo *sd;
3502 unsigned long stripes;
3503 int uuid[4];
3504
3505 /* set up the backup-super-block. This requires the
3506 * uuid from the array.
3507 */
3508 /* Find a superblock */
3509 for (sd = sra->devs; sd; sd = sd->next) {
3510 char *dn;
3511 int devfd;
3512 int ok;
3513 if (sd->disk.state & (1<<MD_DISK_FAULTY))
3514 continue;
3515 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
3516 devfd = dev_open(dn, O_RDONLY);
3517 if (devfd < 0)
3518 continue;
3519 ok = st->ss->load_super(st, devfd, NULL);
3520 close(devfd);
3521 if (ok == 0)
3522 break;
3523 }
3524 if (!sd) {
3525 pr_err("Cannot find a superblock\n");
3526 return 0;
3527 }
3528
3529 memset(&bsb, 0, 512);
3530 memcpy(bsb.magic, "md_backup_data-1", 16);
3531 st->ss->uuid_from_super(st, uuid);
3532 memcpy(bsb.set_uuid, uuid, 16);
3533 bsb.mtime = __cpu_to_le64(time(0));
3534 bsb.devstart2 = blocks;
3535
3536 stripes = blocks / (sra->array.chunk_size/512) /
3537 reshape->before.data_disks;
3538
3539 if (posix_memalign((void**)&buf, 4096, disks * chunk))
3540 /* Don't start the 'reshape' */
3541 return 0;
3542 if (reshape->before.data_disks == reshape->after.data_disks) {
3543 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
3544 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
3545 }
3546
3547 if (increasing) {
3548 array_size = sra->component_size * reshape->after.data_disks;
3549 backup_point = sra->reshape_progress;
3550 suspend_point = 0;
3551 } else {
3552 array_size = sra->component_size * reshape->before.data_disks;
3553 backup_point = reshape->backup_blocks;
3554 suspend_point = array_size;
3555 }
3556
3557 while (!done) {
3558 int rv;
3559
3560 /* Want to return as soon the oldest backup slot can
3561 * be released as that allows us to start backing up
3562 * some more, providing suspend_point has been
3563 * advanced, which it should have.
3564 */
3565 if (increasing) {
3566 wait_point = array_size;
3567 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
3568 wait_point = (__le64_to_cpu(bsb.arraystart) +
3569 __le64_to_cpu(bsb.length));
3570 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
3571 wait_point = (__le64_to_cpu(bsb.arraystart2) +
3572 __le64_to_cpu(bsb.length2));
3573 } else {
3574 wait_point = 0;
3575 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
3576 wait_point = __le64_to_cpu(bsb.arraystart);
3577 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
3578 wait_point = __le64_to_cpu(bsb.arraystart2);
3579 }
3580
3581 rv = progress_reshape(sra, reshape,
3582 backup_point, wait_point,
3583 &suspend_point, &reshape_completed);
3584 /* external metadata would need to ping_monitor here */
3585 sra->reshape_progress = reshape_completed;
3586
3587 /* Clear any backup region that is before 'here' */
3588 if (increasing) {
3589 if (__le64_to_cpu(bsb.length) > 0 &&
3590 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
3591 __le64_to_cpu(bsb.length)))
3592 forget_backup(dests, destfd,
3593 destoffsets, 0);
3594 if (__le64_to_cpu(bsb.length2) > 0 &&
3595 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
3596 __le64_to_cpu(bsb.length2)))
3597 forget_backup(dests, destfd,
3598 destoffsets, 1);
3599 } else {
3600 if (__le64_to_cpu(bsb.length) > 0 &&
3601 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
3602 forget_backup(dests, destfd,
3603 destoffsets, 0);
3604 if (__le64_to_cpu(bsb.length2) > 0 &&
3605 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
3606 forget_backup(dests, destfd,
3607 destoffsets, 1);
3608 }
3609
3610 if (rv < 0) {
3611 if (rv == -1)
3612 done = 1;
3613 break;
3614 }
3615 if (rv == 0 && increasing && !st->ss->external) {
3616 /* No longer need to monitor this reshape */
3617 done = 1;
3618 break;
3619 }
3620
3621 while (rv) {
3622 unsigned long long offset;
3623 unsigned long actual_stripes;
3624 /* Need to backup some data.
3625 * If 'part' is not used and the desired
3626 * backup size is suspended, do a backup,
3627 * then consider the next part.
3628 */
3629 /* Check that 'part' is unused */
3630 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
3631 break;
3632 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
3633 break;
3634
3635 offset = backup_point / data;
3636 actual_stripes = stripes;
3637 if (increasing) {
3638 if (offset + actual_stripes * (chunk/512) >
3639 sra->component_size)
3640 actual_stripes = ((sra->component_size - offset)
3641 / (chunk/512));
3642 if (offset + actual_stripes * (chunk/512) >
3643 suspend_point/data)
3644 break;
3645 } else {
3646 if (offset < actual_stripes * (chunk/512))
3647 actual_stripes = offset / (chunk/512);
3648 offset -= actual_stripes * (chunk/512);
3649 if (offset < suspend_point/data)
3650 break;
3651 }
3652 if (actual_stripes == 0)
3653 break;
3654 grow_backup(sra, offset, actual_stripes,
3655 fds, offsets,
3656 disks, chunk, level, layout,
3657 dests, destfd, destoffsets,
3658 part, &degraded, buf);
3659 validate(afd, destfd[0], destoffsets[0]);
3660 /* record where 'part' is up to */
3661 part = !part;
3662 if (increasing)
3663 backup_point += actual_stripes * (chunk/512) * data;
3664 else
3665 backup_point -= actual_stripes * (chunk/512) * data;
3666 }
3667 }
3668
3669 /* FIXME maybe call progress_reshape one more time instead */
3670 abort_reshape(sra); /* remove any remaining suspension */
3671 if (reshape->before.data_disks == reshape->after.data_disks)
3672 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
3673 free(buf);
3674 return done;
3675 }
3676
3677 /*
3678 * If any spare contains md_back_data-1 which is recent wrt mtime,
3679 * write that data into the array and update the super blocks with
3680 * the new reshape_progress
3681 */
3682 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
3683 char *backup_file, int verbose)
3684 {
3685 int i, j;
3686 int old_disks;
3687 unsigned long long *offsets;
3688 unsigned long long nstripe, ostripe;
3689 int ndata, odata;
3690
3691 odata = info->array.raid_disks - info->delta_disks - 1;
3692 if (info->array.level == 6) odata--; /* number of data disks */
3693 ndata = info->array.raid_disks - 1;
3694 if (info->new_level == 6) ndata--;
3695
3696 old_disks = info->array.raid_disks - info->delta_disks;
3697
3698 if (info->delta_disks <= 0)
3699 /* Didn't grow, so the backup file must have
3700 * been used
3701 */
3702 old_disks = cnt;
3703 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
3704 struct mdinfo dinfo;
3705 int fd;
3706 int bsbsize;
3707 char *devname, namebuf[20];
3708 unsigned long long lo, hi;
3709
3710 /* This was a spare and may have some saved data on it.
3711 * Load the superblock, find and load the
3712 * backup_super_block.
3713 * If either fail, go on to next device.
3714 * If the backup contains no new info, just return
3715 * else restore data and update all superblocks
3716 */
3717 if (i == old_disks-1) {
3718 fd = open(backup_file, O_RDONLY);
3719 if (fd<0) {
3720 pr_err("backup file %s inaccessible: %s\n",
3721 backup_file, strerror(errno));
3722 continue;
3723 }
3724 devname = backup_file;
3725 } else {
3726 fd = fdlist[i];
3727 if (fd < 0)
3728 continue;
3729 if (st->ss->load_super(st, fd, NULL))
3730 continue;
3731
3732 st->ss->getinfo_super(st, &dinfo, NULL);
3733 st->ss->free_super(st);
3734
3735 if (lseek64(fd,
3736 (dinfo.data_offset + dinfo.component_size - 8) <<9,
3737 0) < 0) {
3738 pr_err("Cannot seek on device %d\n", i);
3739 continue; /* Cannot seek */
3740 }
3741 sprintf(namebuf, "device-%d", i);
3742 devname = namebuf;
3743 }
3744 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
3745 if (verbose)
3746 pr_err("Cannot read from %s\n", devname);
3747 continue; /* Cannot read */
3748 }
3749 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
3750 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
3751 if (verbose)
3752 pr_err("No backup metadata on %s\n", devname);
3753 continue;
3754 }
3755 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
3756 if (verbose)
3757 pr_err("Bad backup-metadata checksum on %s\n", devname);
3758 continue; /* bad checksum */
3759 }
3760 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
3761 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
3762 if (verbose)
3763 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
3764 continue; /* Bad second checksum */
3765 }
3766 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
3767 if (verbose)
3768 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
3769 continue; /* Wrong uuid */
3770 }
3771
3772 /* array utime and backup-mtime should be updated at much the same time, but it seems that
3773 * sometimes they aren't... So allow considerable flexability in matching, and allow
3774 * this test to be overridden by an environment variable.
3775 */
3776 if (info->array.utime > (int)__le64_to_cpu(bsb.mtime) + 2*60*60 ||
3777 info->array.utime < (int)__le64_to_cpu(bsb.mtime) - 10*60) {
3778 if (check_env("MDADM_GROW_ALLOW_OLD")) {
3779 pr_err("accepting backup with timestamp %lu "
3780 "for array with timestamp %lu\n",
3781 (unsigned long)__le64_to_cpu(bsb.mtime),
3782 (unsigned long)info->array.utime);
3783 } else {
3784 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
3785 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
3786 continue; /* time stamp is too bad */
3787 }
3788 }
3789
3790 if (bsb.magic[15] == '1') {
3791 if (bsb.length == 0)
3792 continue;
3793 if (info->delta_disks >= 0) {
3794 /* reshape_progress is increasing */
3795 if (__le64_to_cpu(bsb.arraystart)
3796 + __le64_to_cpu(bsb.length)
3797 < info->reshape_progress) {
3798 nonew:
3799 if (verbose)
3800 pr_err("backup-metadata found on %s but is not needed\n", devname);
3801 continue; /* No new data here */
3802 }
3803 } else {
3804 /* reshape_progress is decreasing */
3805 if (__le64_to_cpu(bsb.arraystart) >=
3806 info->reshape_progress)
3807 goto nonew; /* No new data here */
3808 }
3809 } else {
3810 if (bsb.length == 0 && bsb.length2 == 0)
3811 continue;
3812 if (info->delta_disks >= 0) {
3813 /* reshape_progress is increasing */
3814 if ((__le64_to_cpu(bsb.arraystart)
3815 + __le64_to_cpu(bsb.length)
3816 < info->reshape_progress)
3817 &&
3818 (__le64_to_cpu(bsb.arraystart2)
3819 + __le64_to_cpu(bsb.length2)
3820 < info->reshape_progress))
3821 goto nonew; /* No new data here */
3822 } else {
3823 /* reshape_progress is decreasing */
3824 if (__le64_to_cpu(bsb.arraystart) >=
3825 info->reshape_progress &&
3826 __le64_to_cpu(bsb.arraystart2) >=
3827 info->reshape_progress)
3828 goto nonew; /* No new data here */
3829 }
3830 }
3831 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
3832 second_fail:
3833 if (verbose)
3834 pr_err("Failed to verify secondary backup-metadata block on %s\n",
3835 devname);
3836 continue; /* Cannot seek */
3837 }
3838 /* There should be a duplicate backup superblock 4k before here */
3839 if (lseek64(fd, -4096, 1) < 0 ||
3840 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
3841 goto second_fail; /* Cannot find leading superblock */
3842 if (bsb.magic[15] == '1')
3843 bsbsize = offsetof(struct mdp_backup_super, pad1);
3844 else
3845 bsbsize = offsetof(struct mdp_backup_super, pad);
3846 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
3847 goto second_fail; /* Cannot find leading superblock */
3848
3849 /* Now need the data offsets for all devices. */
3850 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
3851 for(j=0; j<info->array.raid_disks; j++) {
3852 if (fdlist[j] < 0)
3853 continue;
3854 if (st->ss->load_super(st, fdlist[j], NULL))
3855 /* FIXME should be this be an error */
3856 continue;
3857 st->ss->getinfo_super(st, &dinfo, NULL);
3858 st->ss->free_super(st);
3859 offsets[j] = dinfo.data_offset * 512;
3860 }
3861 printf(Name ": restoring critical section\n");
3862
3863 if (restore_stripes(fdlist, offsets,
3864 info->array.raid_disks,
3865 info->new_chunk,
3866 info->new_level,
3867 info->new_layout,
3868 fd, __le64_to_cpu(bsb.devstart)*512,
3869 __le64_to_cpu(bsb.arraystart)*512,
3870 __le64_to_cpu(bsb.length)*512, NULL)) {
3871 /* didn't succeed, so giveup */
3872 if (verbose)
3873 pr_err("Error restoring backup from %s\n",
3874 devname);
3875 free(offsets);
3876 return 1;
3877 }
3878
3879 if (bsb.magic[15] == '2' &&
3880 restore_stripes(fdlist, offsets,
3881 info->array.raid_disks,
3882 info->new_chunk,
3883 info->new_level,
3884 info->new_layout,
3885 fd, __le64_to_cpu(bsb.devstart)*512 +
3886 __le64_to_cpu(bsb.devstart2)*512,
3887 __le64_to_cpu(bsb.arraystart2)*512,
3888 __le64_to_cpu(bsb.length2)*512, NULL)) {
3889 /* didn't succeed, so giveup */
3890 if (verbose)
3891 pr_err("Error restoring second backup from %s\n",
3892 devname);
3893 free(offsets);
3894 return 1;
3895 }
3896
3897 free(offsets);
3898
3899 /* Ok, so the data is restored. Let's update those superblocks. */
3900
3901 lo = hi = 0;
3902 if (bsb.length) {
3903 lo = __le64_to_cpu(bsb.arraystart);
3904 hi = lo + __le64_to_cpu(bsb.length);
3905 }
3906 if (bsb.magic[15] == '2' && bsb.length2) {
3907 unsigned long long lo1, hi1;
3908 lo1 = __le64_to_cpu(bsb.arraystart2);
3909 hi1 = lo1 + __le64_to_cpu(bsb.length2);
3910 if (lo == hi) {
3911 lo = lo1;
3912 hi = hi1;
3913 } else if (lo < lo1)
3914 hi = hi1;
3915 else
3916 lo = lo1;
3917 }
3918 if (lo < hi &&
3919 (info->reshape_progress < lo ||
3920 info->reshape_progress > hi))
3921 /* backup does not affect reshape_progress*/ ;
3922 else if (info->delta_disks >= 0) {
3923 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
3924 __le64_to_cpu(bsb.length);
3925 if (bsb.magic[15] == '2') {
3926 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
3927 __le64_to_cpu(bsb.length2);
3928 if (p2 > info->reshape_progress)
3929 info->reshape_progress = p2;
3930 }
3931 } else {
3932 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
3933 if (bsb.magic[15] == '2') {
3934 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
3935 if (p2 < info->reshape_progress)
3936 info->reshape_progress = p2;
3937 }
3938 }
3939 for (j=0; j<info->array.raid_disks; j++) {
3940 if (fdlist[j] < 0)
3941 continue;
3942 if (st->ss->load_super(st, fdlist[j], NULL))
3943 continue;
3944 st->ss->getinfo_super(st, &dinfo, NULL);
3945 dinfo.reshape_progress = info->reshape_progress;
3946 st->ss->update_super(st, &dinfo,
3947 "_reshape_progress",
3948 NULL,0, 0, NULL);
3949 st->ss->store_super(st, fdlist[j]);
3950 st->ss->free_super(st);
3951 }
3952 return 0;
3953 }
3954 /* Didn't find any backup data, try to see if any
3955 * was needed.
3956 */
3957 if (info->delta_disks < 0) {
3958 /* When shrinking, the critical section is at the end.
3959 * So see if we are before the critical section.
3960 */
3961 unsigned long long first_block;
3962 nstripe = ostripe = 0;
3963 first_block = 0;
3964 while (ostripe >= nstripe) {
3965 ostripe += info->array.chunk_size / 512;
3966 first_block = ostripe * odata;
3967 nstripe = first_block / ndata / (info->new_chunk/512) *
3968 (info->new_chunk/512);
3969 }
3970
3971 if (info->reshape_progress >= first_block)
3972 return 0;
3973 }
3974 if (info->delta_disks > 0) {
3975 /* See if we are beyond the critical section. */
3976 unsigned long long last_block;
3977 nstripe = ostripe = 0;
3978 last_block = 0;
3979 while (nstripe >= ostripe) {
3980 nstripe += info->new_chunk / 512;
3981 last_block = nstripe * ndata;
3982 ostripe = last_block / odata / (info->array.chunk_size/512) *
3983 (info->array.chunk_size/512);
3984 }
3985
3986 if (info->reshape_progress >= last_block)
3987 return 0;
3988 }
3989 /* needed to recover critical section! */
3990 if (verbose)
3991 pr_err("Failed to find backup of critical section\n");
3992 return 1;
3993 }
3994
3995 int Grow_continue_command(char *devname, int fd,
3996 char *backup_file, int verbose)
3997 {
3998 int ret_val = 0;
3999 struct supertype *st = NULL;
4000 struct mdinfo *content = NULL;
4001 struct mdinfo array;
4002 char *subarray = NULL;
4003 struct mdinfo *cc = NULL;
4004 struct mdstat_ent *mdstat = NULL;
4005 char buf[40];
4006 int cfd = -1;
4007 int fd2 = -1;
4008
4009 dprintf("Grow continue from command line called for %s\n",
4010 devname);
4011
4012 st = super_by_fd(fd, &subarray);
4013 if (!st || !st->ss) {
4014 pr_err("Unable to determine metadata format for %s\n",
4015 devname);
4016 return 1;
4017 }
4018 dprintf("Grow continue is run for ");
4019 if (st->ss->external == 0) {
4020 dprintf("native array (%s)\n", devname);
4021 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
4022 pr_err("%s is not an active md array -"
4023 " aborting\n", devname);
4024 ret_val = 1;
4025 goto Grow_continue_command_exit;
4026 }
4027 content = &array;
4028 sysfs_init(content, fd, st->devnum);
4029 } else {
4030 int container_dev;
4031
4032 if (subarray) {
4033 dprintf("subarray (%s)\n", subarray);
4034 container_dev = st->container_dev;
4035 cfd = open_dev_excl(st->container_dev);
4036 } else {
4037 container_dev = st->devnum;
4038 close(fd);
4039 cfd = open_dev_excl(st->devnum);
4040 dprintf("container (%i)\n", container_dev);
4041 fd = cfd;
4042 }
4043 if (cfd < 0) {
4044 pr_err("Unable to open container "
4045 "for %s\n", devname);
4046 ret_val = 1;
4047 goto Grow_continue_command_exit;
4048 }
4049 fmt_devname(buf, container_dev);
4050
4051 /* find in container array under reshape
4052 */
4053 ret_val = st->ss->load_container(st, cfd, NULL);
4054 if (ret_val) {
4055 pr_err("Cannot read superblock for %s\n",
4056 devname);
4057 ret_val = 1;
4058 goto Grow_continue_command_exit;
4059 }
4060
4061 cc = st->ss->container_content(st, subarray);
4062 for (content = cc; content ; content = content->next) {
4063 char *array;
4064 int allow_reshape = 1;
4065
4066 if (content->reshape_active == 0)
4067 continue;
4068 /* The decision about array or container wide
4069 * reshape is taken in Grow_continue based
4070 * content->reshape_active state, therefore we
4071 * need to check_reshape based on
4072 * reshape_active and subarray name
4073 */
4074 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4075 allow_reshape = 0;
4076 if (content->reshape_active == CONTAINER_RESHAPE &&
4077 (content->array.state
4078 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4079 allow_reshape = 0;
4080
4081 if (!allow_reshape) {
4082 pr_err("cannot continue reshape of an array"
4083 " in container with unsupported"
4084 " metadata: %s(%s)\n",
4085 devname, buf);
4086 ret_val = 1;
4087 goto Grow_continue_command_exit;
4088 }
4089
4090 array = strchr(content->text_version+1, '/')+1;
4091 mdstat = mdstat_by_subdev(array, container_dev);
4092 if (!mdstat)
4093 continue;
4094 if (mdstat->active == 0) {
4095 pr_err("Skipping inactive "
4096 "array md%i.\n", mdstat->devnum);
4097 free_mdstat(mdstat);
4098 mdstat = NULL;
4099 continue;
4100 }
4101 break;
4102 }
4103 if (!content) {
4104 pr_err("Unable to determine reshaped "
4105 "array for %s\n", devname);
4106 ret_val = 1;
4107 goto Grow_continue_command_exit;
4108 }
4109 fd2 = open_dev(mdstat->devnum);
4110 if (fd2 < 0) {
4111 pr_err("cannot open (md%i)\n",
4112 mdstat->devnum);
4113 ret_val = 1;
4114 goto Grow_continue_command_exit;
4115 }
4116
4117 sysfs_init(content, fd2, mdstat->devnum);
4118
4119 /* start mdmon in case it is not running
4120 */
4121 if (!mdmon_running(container_dev))
4122 start_mdmon(container_dev);
4123 ping_monitor(buf);
4124
4125 if (mdmon_running(container_dev))
4126 st->update_tail = &st->updates;
4127 else {
4128 pr_err("No mdmon found. "
4129 "Grow cannot continue.\n");
4130 ret_val = 1;
4131 goto Grow_continue_command_exit;
4132 }
4133 }
4134
4135 /* verify that array under reshape is started from
4136 * correct position
4137 */
4138 if (verify_reshape_position(content,
4139 map_name(pers, mdstat->level)) < 0) {
4140 ret_val = 1;
4141 goto Grow_continue_command_exit;
4142 }
4143
4144 /* continue reshape
4145 */
4146 ret_val = Grow_continue(fd, st, content, backup_file, 0);
4147
4148 Grow_continue_command_exit:
4149 if (fd2 > -1)
4150 close(fd2);
4151 if (cfd > -1)
4152 close(cfd);
4153 st->ss->free_super(st);
4154 free_mdstat(mdstat);
4155 sysfs_free(cc);
4156 free(subarray);
4157
4158 return ret_val;
4159 }
4160
4161 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4162 char *backup_file, int freeze_reshape)
4163 {
4164 int ret_val = 2;
4165
4166 if (!info->reshape_active)
4167 return ret_val;
4168
4169 if (st->ss->external) {
4170 char container[40];
4171 int cfd = open_dev(st->container_dev);
4172
4173 if (cfd < 0)
4174 return 1;
4175
4176 fmt_devname(container, st->container_dev);
4177 st->ss->load_container(st, cfd, container);
4178 close(cfd);
4179 ret_val = reshape_container(container, NULL, mdfd,
4180 st, info, 0, backup_file,
4181 0, 1, freeze_reshape);
4182 } else
4183 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4184 NULL, backup_file, 0, 0, 1,
4185 freeze_reshape);
4186
4187 return ret_val;
4188 }