]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
super-intel: ensure suspended region is removed when reshape completes.
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <signal.h>
30 #include <sys/wait.h>
31
32 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
33 #error no endian defined
34 #endif
35 #include "md_u.h"
36 #include "md_p.h"
37
38 int restore_backup(struct supertype *st,
39 struct mdinfo *content,
40 int working_disks,
41 int next_spare,
42 char **backup_filep,
43 int verbose)
44 {
45 int i;
46 int *fdlist;
47 struct mdinfo *dev;
48 int err;
49 int disk_count = next_spare + working_disks;
50 char *backup_file = *backup_filep;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (!backup_file) {
73 backup_file = locate_backup(content->sys_name);
74 *backup_filep = backup_file;
75 }
76
77 if (st->ss->external && st->ss->recover_backup)
78 err = st->ss->recover_backup(st, content);
79 else
80 err = Grow_restart(st, content, fdlist, next_spare,
81 backup_file, verbose > 0);
82
83 while (next_spare > 0) {
84 next_spare--;
85 if (fdlist[next_spare] >= 0)
86 close(fdlist[next_spare]);
87 }
88 free(fdlist);
89 if (err) {
90 pr_err("Failed to restore critical section for reshape - sorry.\n");
91 if (!backup_file)
92 pr_err("Possibly you need to specify a --backup-file\n");
93 return 1;
94 }
95
96 dprintf("restore_backup() returns status OK.\n");
97 return 0;
98 }
99
100 int Grow_Add_device(char *devname, int fd, char *newdev)
101 {
102 /* Add a device to an active array.
103 * Currently, just extend a linear array.
104 * This requires writing a new superblock on the
105 * new device, calling the kernel to add the device,
106 * and if that succeeds, update the superblock on
107 * all other devices.
108 * This means that we need to *find* all other devices.
109 */
110 struct mdinfo info;
111
112 struct stat stb;
113 int nfd, fd2;
114 int d, nd;
115 struct supertype *st = NULL;
116 char *subarray = NULL;
117
118 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
119 pr_err("cannot get array info for %s\n", devname);
120 return 1;
121 }
122
123 if (info.array.level != -1) {
124 pr_err("can only add devices to linear arrays\n");
125 return 1;
126 }
127
128 st = super_by_fd(fd, &subarray);
129 if (!st) {
130 pr_err("cannot handle arrays with superblock version %d\n",
131 info.array.major_version);
132 return 1;
133 }
134
135 if (subarray) {
136 pr_err("Cannot grow linear sub-arrays yet\n");
137 free(subarray);
138 free(st);
139 return 1;
140 }
141
142 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
143 if (nfd < 0) {
144 pr_err("cannot open %s\n", newdev);
145 free(st);
146 return 1;
147 }
148 fstat(nfd, &stb);
149 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
150 pr_err("%s is not a block device!\n", newdev);
151 close(nfd);
152 free(st);
153 return 1;
154 }
155 /* now check out all the devices and make sure we can read the
156 * superblock */
157 for (d=0 ; d < info.array.raid_disks ; d++) {
158 mdu_disk_info_t disk;
159 char *dv;
160
161 st->ss->free_super(st);
162
163 disk.number = d;
164 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
165 pr_err("cannot get device detail for device %d\n",
166 d);
167 close(nfd);
168 free(st);
169 return 1;
170 }
171 dv = map_dev(disk.major, disk.minor, 1);
172 if (!dv) {
173 pr_err("cannot find device file for device %d\n",
174 d);
175 close(nfd);
176 free(st);
177 return 1;
178 }
179 fd2 = dev_open(dv, O_RDWR);
180 if (fd2 < 0) {
181 pr_err("cannot open device file %s\n", dv);
182 close(nfd);
183 free(st);
184 return 1;
185 }
186
187 if (st->ss->load_super(st, fd2, NULL)) {
188 pr_err("cannot find super block on %s\n", dv);
189 close(nfd);
190 close(fd2);
191 free(st);
192 return 1;
193 }
194 close(fd2);
195 }
196 /* Ok, looks good. Lets update the superblock and write it out to
197 * newdev.
198 */
199
200 info.disk.number = d;
201 info.disk.major = major(stb.st_rdev);
202 info.disk.minor = minor(stb.st_rdev);
203 info.disk.raid_disk = d;
204 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
205 st->ss->update_super(st, &info, "linear-grow-new", newdev,
206 0, 0, NULL);
207
208 if (st->ss->store_super(st, nfd)) {
209 pr_err("Cannot store new superblock on %s\n",
210 newdev);
211 close(nfd);
212 return 1;
213 }
214 close(nfd);
215
216 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
217 pr_err("Cannot add new disk to this array\n");
218 return 1;
219 }
220 /* Well, that seems to have worked.
221 * Now go through and update all superblocks
222 */
223
224 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
225 pr_err("cannot get array info for %s\n", devname);
226 return 1;
227 }
228
229 nd = d;
230 for (d=0 ; d < info.array.raid_disks ; d++) {
231 mdu_disk_info_t disk;
232 char *dv;
233
234 disk.number = d;
235 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
236 pr_err("cannot get device detail for device %d\n",
237 d);
238 return 1;
239 }
240 dv = map_dev(disk.major, disk.minor, 1);
241 if (!dv) {
242 pr_err("cannot find device file for device %d\n",
243 d);
244 return 1;
245 }
246 fd2 = dev_open(dv, O_RDWR);
247 if (fd2 < 0) {
248 pr_err("cannot open device file %s\n", dv);
249 return 1;
250 }
251 if (st->ss->load_super(st, fd2, NULL)) {
252 pr_err("cannot find super block on %s\n", dv);
253 close(fd);
254 return 1;
255 }
256 info.array.raid_disks = nd+1;
257 info.array.nr_disks = nd+1;
258 info.array.active_disks = nd+1;
259 info.array.working_disks = nd+1;
260
261 st->ss->update_super(st, &info, "linear-grow-update", dv,
262 0, 0, NULL);
263
264 if (st->ss->store_super(st, fd2)) {
265 pr_err("Cannot store new superblock on %s\n", dv);
266 close(fd2);
267 return 1;
268 }
269 close(fd2);
270 }
271
272 return 0;
273 }
274
275 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
276 {
277 /*
278 * First check that array doesn't have a bitmap
279 * Then create the bitmap
280 * Then add it
281 *
282 * For internal bitmaps, we need to check the version,
283 * find all the active devices, and write the bitmap block
284 * to all devices
285 */
286 mdu_bitmap_file_t bmf;
287 mdu_array_info_t array;
288 struct supertype *st;
289 char *subarray = NULL;
290 int major = BITMAP_MAJOR_HI;
291 int vers = md_get_version(fd);
292 unsigned long long bitmapsize, array_size;
293
294 if (vers < 9003) {
295 major = BITMAP_MAJOR_HOSTENDIAN;
296 pr_err("Warning - bitmaps created on this kernel are not portable\n"
297 " between different architectures. Consider upgrading the Linux kernel.\n");
298 }
299
300 if (s->bitmap_file && strcmp(s->bitmap_file, "clustered") == 0)
301 major = BITMAP_MAJOR_CLUSTERED;
302
303 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
304 if (errno == ENOMEM)
305 pr_err("Memory allocation failure.\n");
306 else
307 pr_err("bitmaps not supported by this kernel.\n");
308 return 1;
309 }
310 if (bmf.pathname[0]) {
311 if (strcmp(s->bitmap_file,"none")==0) {
312 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
313 pr_err("failed to remove bitmap %s\n",
314 bmf.pathname);
315 return 1;
316 }
317 return 0;
318 }
319 pr_err("%s already has a bitmap (%s)\n",
320 devname, bmf.pathname);
321 return 1;
322 }
323 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
324 pr_err("cannot get array status for %s\n", devname);
325 return 1;
326 }
327 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
328 if (strcmp(s->bitmap_file, "none")==0) {
329 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
330 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
331 if (array.state & (1<<MD_SB_CLUSTERED))
332 pr_err("failed to remove clustered bitmap.\n");
333 else
334 pr_err("failed to remove internal bitmap.\n");
335 return 1;
336 }
337 return 0;
338 }
339 pr_err("bitmap already present on %s\n", devname);
340 return 1;
341 }
342
343 if (strcmp(s->bitmap_file, "none") == 0) {
344 pr_err("no bitmap found on %s\n", devname);
345 return 1;
346 }
347 if (array.level <= 0) {
348 pr_err("Bitmaps not meaningful with level %s\n",
349 map_num(pers, array.level)?:"of this array");
350 return 1;
351 }
352 bitmapsize = array.size;
353 bitmapsize <<= 1;
354 if (get_dev_size(fd, NULL, &array_size) &&
355 array_size > (0x7fffffffULL<<9)) {
356 /* Array is big enough that we cannot trust array.size
357 * try other approaches
358 */
359 bitmapsize = get_component_size(fd);
360 }
361 if (bitmapsize == 0) {
362 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
363 return 1;
364 }
365
366 if (array.level == 10) {
367 int ncopies = (array.layout&255)*((array.layout>>8)&255);
368 bitmapsize = bitmapsize * array.raid_disks / ncopies;
369 }
370
371 st = super_by_fd(fd, &subarray);
372 if (!st) {
373 pr_err("Cannot understand version %d.%d\n",
374 array.major_version, array.minor_version);
375 return 1;
376 }
377 if (subarray) {
378 pr_err("Cannot add bitmaps to sub-arrays yet\n");
379 free(subarray);
380 free(st);
381 return 1;
382 }
383 if (strcmp(s->bitmap_file, "internal") == 0 ||
384 strcmp(s->bitmap_file, "clustered") == 0) {
385 int rv;
386 int d;
387 int offset_setable = 0;
388 struct mdinfo *mdi;
389 if (st->ss->add_internal_bitmap == NULL) {
390 pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name);
391 return 1;
392 }
393 st->nodes = c->nodes;
394 st->cluster_name = c->homecluster;
395 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
396 if (mdi)
397 offset_setable = 1;
398 for (d=0; d< st->max_devs; d++) {
399 mdu_disk_info_t disk;
400 char *dv;
401 disk.number = d;
402 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
403 continue;
404 if (disk.major == 0 &&
405 disk.minor == 0)
406 continue;
407 if ((disk.state & (1<<MD_DISK_SYNC))==0)
408 continue;
409 dv = map_dev(disk.major, disk.minor, 1);
410 if (dv) {
411 int fd2 = dev_open(dv, O_RDWR);
412 if (fd2 < 0)
413 continue;
414 if (st->ss->load_super(st, fd2, NULL)==0) {
415 if (st->ss->add_internal_bitmap(
416 st,
417 &s->bitmap_chunk, c->delay, s->write_behind,
418 bitmapsize, offset_setable,
419 major)
420 )
421 st->ss->write_bitmap(st, fd2, NoUpdate);
422 else {
423 pr_err("failed to create internal bitmap - chunksize problem.\n");
424 close(fd2);
425 return 1;
426 }
427 }
428 close(fd2);
429 }
430 }
431 if (offset_setable) {
432 st->ss->getinfo_super(st, mdi, NULL);
433 sysfs_init(mdi, fd, NULL);
434 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
435 mdi->bitmap_offset);
436 } else {
437 if (strcmp(s->bitmap_file, "clustered") == 0)
438 array.state |= (1<<MD_SB_CLUSTERED);
439 array.state |= (1<<MD_SB_BITMAP_PRESENT);
440 rv = ioctl(fd, SET_ARRAY_INFO, &array);
441 }
442 if (rv < 0) {
443 if (errno == EBUSY)
444 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
445 pr_err("failed to set internal bitmap.\n");
446 return 1;
447 }
448 } else {
449 int uuid[4];
450 int bitmap_fd;
451 int d;
452 int max_devs = st->max_devs;
453
454 /* try to load a superblock */
455 for (d = 0; d < max_devs; d++) {
456 mdu_disk_info_t disk;
457 char *dv;
458 int fd2;
459 disk.number = d;
460 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
461 continue;
462 if ((disk.major==0 && disk.minor==0) ||
463 (disk.state & (1<<MD_DISK_REMOVED)))
464 continue;
465 dv = map_dev(disk.major, disk.minor, 1);
466 if (!dv)
467 continue;
468 fd2 = dev_open(dv, O_RDONLY);
469 if (fd2 >= 0) {
470 if (st->ss->load_super(st, fd2, NULL) == 0) {
471 close(fd2);
472 st->ss->uuid_from_super(st, uuid);
473 break;
474 }
475 close(fd2);
476 }
477 }
478 if (d == max_devs) {
479 pr_err("cannot find UUID for array!\n");
480 return 1;
481 }
482 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
483 c->delay, s->write_behind, bitmapsize, major)) {
484 return 1;
485 }
486 bitmap_fd = open(s->bitmap_file, O_RDWR);
487 if (bitmap_fd < 0) {
488 pr_err("weird: %s cannot be opened\n",
489 s->bitmap_file);
490 return 1;
491 }
492 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
493 int err = errno;
494 if (errno == EBUSY)
495 pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n");
496 pr_err("Cannot set bitmap file for %s: %s\n",
497 devname, strerror(err));
498 return 1;
499 }
500 }
501
502 return 0;
503 }
504
505 /*
506 * When reshaping an array we might need to backup some data.
507 * This is written to all spares with a 'super_block' describing it.
508 * The superblock goes 4K from the end of the used space on the
509 * device.
510 * It if written after the backup is complete.
511 * It has the following structure.
512 */
513
514 static struct mdp_backup_super {
515 char magic[16]; /* md_backup_data-1 or -2 */
516 __u8 set_uuid[16];
517 __u64 mtime;
518 /* start/sizes in 512byte sectors */
519 __u64 devstart; /* address on backup device/file of data */
520 __u64 arraystart;
521 __u64 length;
522 __u32 sb_csum; /* csum of preceeding bytes. */
523 __u32 pad1;
524 __u64 devstart2; /* offset in to data of second section */
525 __u64 arraystart2;
526 __u64 length2;
527 __u32 sb_csum2; /* csum of preceeding bytes. */
528 __u8 pad[512-68-32];
529 } __attribute__((aligned(512))) bsb, bsb2;
530
531 static __u32 bsb_csum(char *buf, int len)
532 {
533 int i;
534 int csum = 0;
535 for (i = 0; i < len; i++)
536 csum = (csum<<3) + buf[0];
537 return __cpu_to_le32(csum);
538 }
539
540 static int check_idle(struct supertype *st)
541 {
542 /* Check that all member arrays for this container, or the
543 * container of this array, are idle
544 */
545 char *container = (st->container_devnm[0]
546 ? st->container_devnm : st->devnm);
547 struct mdstat_ent *ent, *e;
548 int is_idle = 1;
549
550 ent = mdstat_read(0, 0);
551 for (e = ent ; e; e = e->next) {
552 if (!is_container_member(e, container))
553 continue;
554 if (e->percent >= 0) {
555 is_idle = 0;
556 break;
557 }
558 }
559 free_mdstat(ent);
560 return is_idle;
561 }
562
563 static int freeze_container(struct supertype *st)
564 {
565 char *container = (st->container_devnm[0]
566 ? st->container_devnm : st->devnm);
567
568 if (!check_idle(st))
569 return -1;
570
571 if (block_monitor(container, 1)) {
572 pr_err("failed to freeze container\n");
573 return -2;
574 }
575
576 return 1;
577 }
578
579 static void unfreeze_container(struct supertype *st)
580 {
581 char *container = (st->container_devnm[0]
582 ? st->container_devnm : st->devnm);
583
584 unblock_monitor(container, 1);
585 }
586
587 static int freeze(struct supertype *st)
588 {
589 /* Try to freeze resync/rebuild on this array/container.
590 * Return -1 if the array is busy,
591 * return -2 container cannot be frozen,
592 * return 0 if this kernel doesn't support 'frozen'
593 * return 1 if it worked.
594 */
595 if (st->ss->external)
596 return freeze_container(st);
597 else {
598 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
599 int err;
600 char buf[20];
601
602 if (!sra)
603 return -1;
604 /* Need to clear any 'read-auto' status */
605 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
606 strncmp(buf, "read-auto", 9) == 0)
607 sysfs_set_str(sra, NULL, "array_state", "clean");
608
609 err = sysfs_freeze_array(sra);
610 sysfs_free(sra);
611 return err;
612 }
613 }
614
615 static void unfreeze(struct supertype *st)
616 {
617 if (st->ss->external)
618 return unfreeze_container(st);
619 else {
620 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
621 char buf[20];
622
623 if (sra &&
624 sysfs_get_str(sra, NULL, "sync_action", buf, 20) > 0
625 && strcmp(buf, "frozen\n") == 0)
626 sysfs_set_str(sra, NULL, "sync_action", "idle");
627 sysfs_free(sra);
628 }
629 }
630
631 static void wait_reshape(struct mdinfo *sra)
632 {
633 int fd = sysfs_get_fd(sra, NULL, "sync_action");
634 char action[20];
635
636 if (fd < 0)
637 return;
638
639 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
640 strncmp(action, "reshape", 7) == 0)
641 sysfs_wait(fd, NULL);
642 close(fd);
643 }
644
645 static int reshape_super(struct supertype *st, unsigned long long size,
646 int level, int layout, int chunksize, int raid_disks,
647 int delta_disks, char *backup_file, char *dev,
648 int direction, int verbose)
649 {
650 /* nothing extra to check in the native case */
651 if (!st->ss->external)
652 return 0;
653 if (!st->ss->reshape_super ||
654 !st->ss->manage_reshape) {
655 pr_err("%s metadata does not support reshape\n",
656 st->ss->name);
657 return 1;
658 }
659
660 return st->ss->reshape_super(st, size, level, layout, chunksize,
661 raid_disks, delta_disks, backup_file, dev,
662 direction, verbose);
663 }
664
665 static void sync_metadata(struct supertype *st)
666 {
667 if (st->ss->external) {
668 if (st->update_tail) {
669 flush_metadata_updates(st);
670 st->update_tail = &st->updates;
671 } else
672 st->ss->sync_metadata(st);
673 }
674 }
675
676 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
677 {
678 /* when dealing with external metadata subarrays we need to be
679 * prepared to handle EAGAIN. The kernel may need to wait for
680 * mdmon to mark the array active so the kernel can handle
681 * allocations/writeback when preparing the reshape action
682 * (md_allow_write()). We temporarily disable safe_mode_delay
683 * to close a race with the array_state going clean before the
684 * next write to raid_disks / stripe_cache_size
685 */
686 char safe[50];
687 int rc;
688
689 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
690 if (!container ||
691 (strcmp(name, "raid_disks") != 0 &&
692 strcmp(name, "stripe_cache_size") != 0))
693 return sysfs_set_num(sra, NULL, name, n);
694
695 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
696 if (rc <= 0)
697 return -1;
698 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
699 rc = sysfs_set_num(sra, NULL, name, n);
700 if (rc < 0 && errno == EAGAIN) {
701 ping_monitor(container);
702 /* if we get EAGAIN here then the monitor is not active
703 * so stop trying
704 */
705 rc = sysfs_set_num(sra, NULL, name, n);
706 }
707 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
708 return rc;
709 }
710
711 int start_reshape(struct mdinfo *sra, int already_running,
712 int before_data_disks, int data_disks)
713 {
714 int err;
715 unsigned long long sync_max_to_set;
716
717 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
718 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
719 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
720 sra->reshape_progress);
721 if (before_data_disks <= data_disks)
722 sync_max_to_set = sra->reshape_progress / data_disks;
723 else
724 sync_max_to_set = (sra->component_size * data_disks
725 - sra->reshape_progress) / data_disks;
726 if (!already_running)
727 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
728 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
729 if (!already_running && err == 0) {
730 int cnt = 5;
731 do {
732 err = sysfs_set_str(sra, NULL, "sync_action", "reshape");
733 if (err)
734 sleep(1);
735 } while (err && errno == EBUSY && cnt-- > 0);
736 }
737 return err;
738 }
739
740 void abort_reshape(struct mdinfo *sra)
741 {
742 sysfs_set_str(sra, NULL, "sync_action", "idle");
743 /*
744 * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and
745 * suspend_hi to decrease as well as increase.")
746 * you could only increase suspend_{lo,hi} unless the region they
747 * covered was empty. So to reset to 0, you need to push suspend_lo
748 * up past suspend_hi first. So to maximize the chance of mdadm
749 * working on all kernels, we want to keep doing that.
750 */
751 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
752 sysfs_set_num(sra, NULL, "suspend_hi", 0);
753 sysfs_set_num(sra, NULL, "suspend_lo", 0);
754 sysfs_set_num(sra, NULL, "sync_min", 0);
755 // It isn't safe to reset sync_max as we aren't monitoring.
756 // Array really should be stopped at this point.
757 }
758
759 int remove_disks_for_takeover(struct supertype *st,
760 struct mdinfo *sra,
761 int layout)
762 {
763 int nr_of_copies;
764 struct mdinfo *remaining;
765 int slot;
766
767 if (sra->array.level == 10)
768 nr_of_copies = layout & 0xff;
769 else if (sra->array.level == 1)
770 nr_of_copies = sra->array.raid_disks;
771 else
772 return 1;
773
774 remaining = sra->devs;
775 sra->devs = NULL;
776 /* for each 'copy', select one device and remove from the list. */
777 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
778 struct mdinfo **diskp;
779 int found = 0;
780
781 /* Find a working device to keep */
782 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
783 struct mdinfo *disk = *diskp;
784
785 if (disk->disk.raid_disk < slot)
786 continue;
787 if (disk->disk.raid_disk >= slot + nr_of_copies)
788 continue;
789 if (disk->disk.state & (1<<MD_DISK_REMOVED))
790 continue;
791 if (disk->disk.state & (1<<MD_DISK_FAULTY))
792 continue;
793 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
794 continue;
795
796 /* We have found a good disk to use! */
797 *diskp = disk->next;
798 disk->next = sra->devs;
799 sra->devs = disk;
800 found = 1;
801 break;
802 }
803 if (!found)
804 break;
805 }
806
807 if (slot < sra->array.raid_disks) {
808 /* didn't find all slots */
809 struct mdinfo **e;
810 e = &remaining;
811 while (*e)
812 e = &(*e)->next;
813 *e = sra->devs;
814 sra->devs = remaining;
815 return 1;
816 }
817
818 /* Remove all 'remaining' devices from the array */
819 while (remaining) {
820 struct mdinfo *sd = remaining;
821 remaining = sd->next;
822
823 sysfs_set_str(sra, sd, "state", "faulty");
824 sysfs_set_str(sra, sd, "slot", "none");
825 /* for external metadata disks should be removed in mdmon */
826 if (!st->ss->external)
827 sysfs_set_str(sra, sd, "state", "remove");
828 sd->disk.state |= (1<<MD_DISK_REMOVED);
829 sd->disk.state &= ~(1<<MD_DISK_SYNC);
830 sd->next = sra->devs;
831 sra->devs = sd;
832 }
833 return 0;
834 }
835
836 void reshape_free_fdlist(int *fdlist,
837 unsigned long long *offsets,
838 int size)
839 {
840 int i;
841
842 for (i = 0; i < size; i++)
843 if (fdlist[i] >= 0)
844 close(fdlist[i]);
845
846 free(fdlist);
847 free(offsets);
848 }
849
850 int reshape_prepare_fdlist(char *devname,
851 struct mdinfo *sra,
852 int raid_disks,
853 int nrdisks,
854 unsigned long blocks,
855 char *backup_file,
856 int *fdlist,
857 unsigned long long *offsets)
858 {
859 int d = 0;
860 struct mdinfo *sd;
861
862 enable_fds(nrdisks);
863 for (d = 0; d <= nrdisks; d++)
864 fdlist[d] = -1;
865 d = raid_disks;
866 for (sd = sra->devs; sd; sd = sd->next) {
867 if (sd->disk.state & (1<<MD_DISK_FAULTY))
868 continue;
869 if (sd->disk.state & (1<<MD_DISK_SYNC) &&
870 sd->disk.raid_disk < raid_disks) {
871 char *dn = map_dev(sd->disk.major,
872 sd->disk.minor, 1);
873 fdlist[sd->disk.raid_disk]
874 = dev_open(dn, O_RDONLY);
875 offsets[sd->disk.raid_disk] = sd->data_offset*512;
876 if (fdlist[sd->disk.raid_disk] < 0) {
877 pr_err("%s: cannot open component %s\n",
878 devname, dn ? dn : "-unknown-");
879 d = -1;
880 goto release;
881 }
882 } else if (backup_file == NULL) {
883 /* spare */
884 char *dn = map_dev(sd->disk.major,
885 sd->disk.minor, 1);
886 fdlist[d] = dev_open(dn, O_RDWR);
887 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
888 if (fdlist[d] < 0) {
889 pr_err("%s: cannot open component %s\n",
890 devname, dn ? dn : "-unknown-");
891 d = -1;
892 goto release;
893 }
894 d++;
895 }
896 }
897 release:
898 return d;
899 }
900
901 int reshape_open_backup_file(char *backup_file,
902 int fd,
903 char *devname,
904 long blocks,
905 int *fdlist,
906 unsigned long long *offsets,
907 char *sys_name,
908 int restart)
909 {
910 /* Return 1 on success, 0 on any form of failure */
911 /* need to check backup file is large enough */
912 char buf[512];
913 struct stat stb;
914 unsigned int dev;
915 int i;
916
917 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
918 S_IRUSR | S_IWUSR);
919 *offsets = 8 * 512;
920 if (*fdlist < 0) {
921 pr_err("%s: cannot create backup file %s: %s\n",
922 devname, backup_file, strerror(errno));
923 return 0;
924 }
925 /* Guard against backup file being on array device.
926 * If array is partitioned or if LVM etc is in the
927 * way this will not notice, but it is better than
928 * nothing.
929 */
930 fstat(*fdlist, &stb);
931 dev = stb.st_dev;
932 fstat(fd, &stb);
933 if (stb.st_rdev == dev) {
934 pr_err("backup file must NOT be on the array being reshaped.\n");
935 close(*fdlist);
936 return 0;
937 }
938
939 memset(buf, 0, 512);
940 for (i=0; i < blocks + 8 ; i++) {
941 if (write(*fdlist, buf, 512) != 512) {
942 pr_err("%s: cannot create backup file %s: %s\n",
943 devname, backup_file, strerror(errno));
944 return 0;
945 }
946 }
947 if (fsync(*fdlist) != 0) {
948 pr_err("%s: cannot create backup file %s: %s\n",
949 devname, backup_file, strerror(errno));
950 return 0;
951 }
952
953 if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) {
954 char *bu = make_backup(sys_name);
955 if (symlink(backup_file, bu))
956 pr_err("Recording backup file in " MAP_DIR " failed: %s\n",
957 strerror(errno));
958 free(bu);
959 }
960
961 return 1;
962 }
963
964 unsigned long compute_backup_blocks(int nchunk, int ochunk,
965 unsigned int ndata, unsigned int odata)
966 {
967 unsigned long a, b, blocks;
968 /* So how much do we need to backup.
969 * We need an amount of data which is both a whole number of
970 * old stripes and a whole number of new stripes.
971 * So LCM for (chunksize*datadisks).
972 */
973 a = (ochunk/512) * odata;
974 b = (nchunk/512) * ndata;
975 /* Find GCD */
976 a = GCD(a, b);
977 /* LCM == product / GCD */
978 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
979
980 return blocks;
981 }
982
983 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
984 {
985 /* Based on the current array state in info->array and
986 * the changes in info->new_* etc, determine:
987 * - whether the change is possible
988 * - Intermediate level/raid_disks/layout
989 * - whether a restriping reshape is needed
990 * - number of sectors in minimum change unit. This
991 * will cover a whole number of stripes in 'before' and
992 * 'after'.
993 *
994 * Return message if the change should be rejected
995 * NULL if the change can be achieved
996 *
997 * This can be called as part of starting a reshape, or
998 * when assembling an array that is undergoing reshape.
999 */
1000 int near, far, offset, copies;
1001 int new_disks;
1002 int old_chunk, new_chunk;
1003 /* delta_parity records change in number of devices
1004 * caused by level change
1005 */
1006 int delta_parity = 0;
1007
1008 memset(re, 0, sizeof(*re));
1009
1010 /* If a new level not explicitly given, we assume no-change */
1011 if (info->new_level == UnSet)
1012 info->new_level = info->array.level;
1013
1014 if (info->new_chunk)
1015 switch (info->new_level) {
1016 case 0:
1017 case 4:
1018 case 5:
1019 case 6:
1020 case 10:
1021 /* chunk size is meaningful, must divide component_size
1022 * evenly
1023 */
1024 if (info->component_size % (info->new_chunk/512)) {
1025 unsigned long long shrink = info->component_size;
1026 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
1027 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
1028 info->new_chunk/1024, info->component_size/2);
1029 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1030 devname, shrink/2);
1031 pr_err("will shrink the array so the given chunk size would work.\n");
1032 return "";
1033 }
1034 break;
1035 default:
1036 return "chunk size not meaningful for this level";
1037 }
1038 else
1039 info->new_chunk = info->array.chunk_size;
1040
1041 switch (info->array.level) {
1042 default:
1043 return "No reshape is possibly for this RAID level";
1044 case LEVEL_LINEAR:
1045 if (info->delta_disks != UnSet)
1046 return "Only --add is supported for LINEAR, setting --raid-disks is not needed";
1047 else
1048 return "Only --add is supported for LINEAR, other --grow options are not meaningful";
1049 case 1:
1050 /* RAID1 can convert to RAID1 with different disks, or
1051 * raid5 with 2 disks, or
1052 * raid0 with 1 disk
1053 */
1054 if (info->new_level > 1 &&
1055 (info->component_size & 7))
1056 return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first.";
1057 if (info->new_level == 0) {
1058 if (info->delta_disks != UnSet &&
1059 info->delta_disks != 0)
1060 return "Cannot change number of disks with RAID1->RAID0 conversion";
1061 re->level = 0;
1062 re->before.data_disks = 1;
1063 re->after.data_disks = 1;
1064 return NULL;
1065 }
1066 if (info->new_level == 1) {
1067 if (info->delta_disks == UnSet)
1068 /* Don't know what to do */
1069 return "no change requested for Growing RAID1";
1070 re->level = 1;
1071 return NULL;
1072 }
1073 if (info->array.raid_disks == 2 &&
1074 info->new_level == 5) {
1075
1076 re->level = 5;
1077 re->before.data_disks = 1;
1078 if (info->delta_disks != UnSet &&
1079 info->delta_disks != 0)
1080 re->after.data_disks = 1 + info->delta_disks;
1081 else
1082 re->after.data_disks = 1;
1083 if (re->after.data_disks < 1)
1084 return "Number of disks too small for RAID5";
1085
1086 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1087 info->array.chunk_size = 65536;
1088 break;
1089 }
1090 /* Could do some multi-stage conversions, but leave that to
1091 * later.
1092 */
1093 return "Impossibly level change request for RAID1";
1094
1095 case 10:
1096 /* RAID10 can be converted from near mode to
1097 * RAID0 by removing some devices.
1098 * It can also be reshaped if the kernel supports
1099 * new_data_offset.
1100 */
1101 switch (info->new_level) {
1102 case 0:
1103 if ((info->array.layout & ~0xff) != 0x100)
1104 return "Cannot Grow RAID10 with far/offset layout";
1105 /* number of devices must be multiple of number of copies */
1106 if (info->array.raid_disks % (info->array.layout & 0xff))
1107 return "RAID10 layout too complex for Grow operation";
1108
1109 new_disks = (info->array.raid_disks
1110 / (info->array.layout & 0xff));
1111 if (info->delta_disks == UnSet)
1112 info->delta_disks = (new_disks
1113 - info->array.raid_disks);
1114
1115 if (info->delta_disks != new_disks - info->array.raid_disks)
1116 return "New number of raid-devices impossible for RAID10";
1117 if (info->new_chunk &&
1118 info->new_chunk != info->array.chunk_size)
1119 return "Cannot change chunk-size with RAID10 Grow";
1120
1121 /* looks good */
1122 re->level = 0;
1123 re->before.data_disks = new_disks;
1124 re->after.data_disks = re->before.data_disks;
1125 return NULL;
1126
1127 case 10:
1128 near = info->array.layout & 0xff;
1129 far = (info->array.layout >> 8) & 0xff;
1130 offset = info->array.layout & 0x10000;
1131 if (far > 1 && !offset)
1132 return "Cannot reshape RAID10 in far-mode";
1133 copies = near * far;
1134
1135 old_chunk = info->array.chunk_size * far;
1136
1137 if (info->new_layout == UnSet)
1138 info->new_layout = info->array.layout;
1139 else {
1140 near = info->new_layout & 0xff;
1141 far = (info->new_layout >> 8) & 0xff;
1142 offset = info->new_layout & 0x10000;
1143 if (far > 1 && !offset)
1144 return "Cannot reshape RAID10 to far-mode";
1145 if (near * far != copies)
1146 return "Cannot change number of copies when reshaping RAID10";
1147 }
1148 if (info->delta_disks == UnSet)
1149 info->delta_disks = 0;
1150 new_disks = (info->array.raid_disks +
1151 info->delta_disks);
1152
1153 new_chunk = info->new_chunk * far;
1154
1155 re->level = 10;
1156 re->before.layout = info->array.layout;
1157 re->before.data_disks = info->array.raid_disks;
1158 re->after.layout = info->new_layout;
1159 re->after.data_disks = new_disks;
1160 /* For RAID10 we don't do backup but do allow reshape,
1161 * so set backup_blocks to INVALID_SECTORS rather than
1162 * zero.
1163 * And there is no need to synchronise stripes on both
1164 * 'old' and 'new'. So the important
1165 * number is the minimum data_offset difference
1166 * which is the larger of (offset copies * chunk).
1167 */
1168 re->backup_blocks = INVALID_SECTORS;
1169 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1170 if (new_disks < re->before.data_disks &&
1171 info->space_after < re->min_offset_change)
1172 /* Reduce component size by one chunk */
1173 re->new_size = (info->component_size -
1174 re->min_offset_change);
1175 else
1176 re->new_size = info->component_size;
1177 re->new_size = re->new_size * new_disks / copies;
1178 return NULL;
1179
1180 default:
1181 return "RAID10 can only be changed to RAID0";
1182 }
1183 case 0:
1184 /* RAID0 can be converted to RAID10, or to RAID456 */
1185 if (info->new_level == 10) {
1186 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1187 /* Assume near=2 layout */
1188 info->new_layout = 0x102;
1189 info->delta_disks = info->array.raid_disks;
1190 }
1191 if (info->new_layout == UnSet) {
1192 int copies = 1 + (info->delta_disks
1193 / info->array.raid_disks);
1194 if (info->array.raid_disks * (copies-1)
1195 != info->delta_disks)
1196 return "Impossible number of devices for RAID0->RAID10";
1197 info->new_layout = 0x100 + copies;
1198 }
1199 if (info->delta_disks == UnSet) {
1200 int copies = info->new_layout & 0xff;
1201 if (info->new_layout != 0x100 + copies)
1202 return "New layout impossible for RAID0->RAID10";;
1203 info->delta_disks = (copies - 1) *
1204 info->array.raid_disks;
1205 }
1206 if (info->new_chunk &&
1207 info->new_chunk != info->array.chunk_size)
1208 return "Cannot change chunk-size with RAID0->RAID10";
1209 /* looks good */
1210 re->level = 10;
1211 re->before.data_disks = (info->array.raid_disks +
1212 info->delta_disks);
1213 re->after.data_disks = re->before.data_disks;
1214 re->before.layout = info->new_layout;
1215 return NULL;
1216 }
1217
1218 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1219 * a raid4 style layout of the final level.
1220 */
1221 switch (info->new_level) {
1222 case 4:
1223 delta_parity = 1;
1224 case 0:
1225 re->level = 4;
1226 re->before.layout = 0;
1227 break;
1228 case 5:
1229 delta_parity = 1;
1230 re->level = 5;
1231 re->before.layout = ALGORITHM_PARITY_N;
1232 if (info->new_layout == UnSet)
1233 info->new_layout = map_name(r5layout, "default");
1234 break;
1235 case 6:
1236 delta_parity = 2;
1237 re->level = 6;
1238 re->before.layout = ALGORITHM_PARITY_N;
1239 if (info->new_layout == UnSet)
1240 info->new_layout = map_name(r6layout, "default");
1241 break;
1242 default:
1243 return "Impossible level change requested";
1244 }
1245 re->before.data_disks = info->array.raid_disks;
1246 /* determining 'after' layout happens outside this 'switch' */
1247 break;
1248
1249 case 4:
1250 info->array.layout = ALGORITHM_PARITY_N;
1251 case 5:
1252 switch (info->new_level) {
1253 case 0:
1254 delta_parity = -1;
1255 case 4:
1256 re->level = info->array.level;
1257 re->before.data_disks = info->array.raid_disks - 1;
1258 re->before.layout = info->array.layout;
1259 break;
1260 case 5:
1261 re->level = 5;
1262 re->before.data_disks = info->array.raid_disks - 1;
1263 re->before.layout = info->array.layout;
1264 break;
1265 case 6:
1266 delta_parity = 1;
1267 re->level = 6;
1268 re->before.data_disks = info->array.raid_disks - 1;
1269 switch (info->array.layout) {
1270 case ALGORITHM_LEFT_ASYMMETRIC:
1271 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1272 break;
1273 case ALGORITHM_RIGHT_ASYMMETRIC:
1274 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1275 break;
1276 case ALGORITHM_LEFT_SYMMETRIC:
1277 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1278 break;
1279 case ALGORITHM_RIGHT_SYMMETRIC:
1280 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1281 break;
1282 case ALGORITHM_PARITY_0:
1283 re->before.layout = ALGORITHM_PARITY_0_6;
1284 break;
1285 case ALGORITHM_PARITY_N:
1286 re->before.layout = ALGORITHM_PARITY_N_6;
1287 break;
1288 default:
1289 return "Cannot convert an array with this layout";
1290 }
1291 break;
1292 case 1:
1293 if (info->array.raid_disks != 2)
1294 return "Can only convert a 2-device array to RAID1";
1295 if (info->delta_disks != UnSet &&
1296 info->delta_disks != 0)
1297 return "Cannot set raid_disk when converting RAID5->RAID1";
1298 re->level = 1;
1299 info->new_chunk = 0;
1300 return NULL;
1301 default:
1302 return "Impossible level change requested";
1303 }
1304 break;
1305 case 6:
1306 switch (info->new_level) {
1307 case 4:
1308 case 5:
1309 delta_parity = -1;
1310 case 6:
1311 re->level = 6;
1312 re->before.data_disks = info->array.raid_disks - 2;
1313 re->before.layout = info->array.layout;
1314 break;
1315 default:
1316 return "Impossible level change requested";
1317 }
1318 break;
1319 }
1320
1321 /* If we reached here then it looks like a re-stripe is
1322 * happening. We have determined the intermediate level
1323 * and initial raid_disks/layout and stored these in 're'.
1324 *
1325 * We need to deduce the final layout that can be atomically
1326 * converted to the end state.
1327 */
1328 switch (info->new_level) {
1329 case 0:
1330 /* We can only get to RAID0 from RAID4 or RAID5
1331 * with appropriate layout and one extra device
1332 */
1333 if (re->level != 4 && re->level != 5)
1334 return "Cannot covert to RAID0 from this level";
1335
1336 switch (re->level) {
1337 case 4:
1338 re->before.layout = 0;
1339 re->after.layout = 0;
1340 break;
1341 case 5:
1342 re->after.layout = ALGORITHM_PARITY_N;
1343 break;
1344 }
1345 break;
1346
1347 case 4:
1348 /* We can only get to RAID4 from RAID5 */
1349 if (re->level != 4 && re->level != 5)
1350 return "Cannot convert to RAID4 from this level";
1351
1352 switch (re->level) {
1353 case 4:
1354 re->after.layout = 0;
1355 break;
1356 case 5:
1357 re->after.layout = ALGORITHM_PARITY_N;
1358 break;
1359 }
1360 break;
1361
1362 case 5:
1363 /* We get to RAID5 from RAID5 or RAID6 */
1364 if (re->level != 5 && re->level != 6)
1365 return "Cannot convert to RAID5 from this level";
1366
1367 switch (re->level) {
1368 case 5:
1369 if (info->new_layout == UnSet)
1370 re->after.layout = re->before.layout;
1371 else
1372 re->after.layout = info->new_layout;
1373 break;
1374 case 6:
1375 if (info->new_layout == UnSet)
1376 info->new_layout = re->before.layout;
1377
1378 /* after.layout needs to be raid6 version of new_layout */
1379 if (info->new_layout == ALGORITHM_PARITY_N)
1380 re->after.layout = ALGORITHM_PARITY_N;
1381 else {
1382 char layout[40];
1383 char *ls = map_num(r5layout, info->new_layout);
1384 int l;
1385 if (ls) {
1386 /* Current RAID6 layout has a RAID5
1387 * equivalent - good
1388 */
1389 strcat(strcpy(layout, ls), "-6");
1390 l = map_name(r6layout, layout);
1391 if (l == UnSet)
1392 return "Cannot find RAID6 layout to convert to";
1393 } else {
1394 /* Current RAID6 has no equivalent.
1395 * If it is already a '-6' layout we
1396 * can leave it unchanged, else we must
1397 * fail
1398 */
1399 ls = map_num(r6layout, info->new_layout);
1400 if (!ls ||
1401 strcmp(ls+strlen(ls)-2, "-6") != 0)
1402 return "Please specify new layout";
1403 l = info->new_layout;
1404 }
1405 re->after.layout = l;
1406 }
1407 }
1408 break;
1409
1410 case 6:
1411 /* We must already be at level 6 */
1412 if (re->level != 6)
1413 return "Impossible level change";
1414 if (info->new_layout == UnSet)
1415 re->after.layout = info->array.layout;
1416 else
1417 re->after.layout = info->new_layout;
1418 break;
1419 default:
1420 return "Impossible level change requested";
1421 }
1422 if (info->delta_disks == UnSet)
1423 info->delta_disks = delta_parity;
1424
1425 re->after.data_disks = (re->before.data_disks
1426 + info->delta_disks
1427 - delta_parity);
1428 switch (re->level) {
1429 case 6: re->parity = 2;
1430 break;
1431 case 4:
1432 case 5: re->parity = 1;
1433 break;
1434 default: re->parity = 0;
1435 break;
1436 }
1437 /* So we have a restripe operation, we need to calculate the number
1438 * of blocks per reshape operation.
1439 */
1440 re->new_size = info->component_size * re->before.data_disks;
1441 if (info->new_chunk == 0)
1442 info->new_chunk = info->array.chunk_size;
1443 if (re->after.data_disks == re->before.data_disks &&
1444 re->after.layout == re->before.layout &&
1445 info->new_chunk == info->array.chunk_size) {
1446 /* Nothing to change, can change level immediately. */
1447 re->level = info->new_level;
1448 re->backup_blocks = 0;
1449 return NULL;
1450 }
1451 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1452 /* chunk and layout changes make no difference */
1453 re->level = info->new_level;
1454 re->backup_blocks = 0;
1455 return NULL;
1456 }
1457
1458 if (re->after.data_disks == re->before.data_disks &&
1459 get_linux_version() < 2006032)
1460 return "in-place reshape is not safe before 2.6.32 - sorry.";
1461
1462 if (re->after.data_disks < re->before.data_disks &&
1463 get_linux_version() < 2006030)
1464 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1465
1466 re->backup_blocks = compute_backup_blocks(
1467 info->new_chunk, info->array.chunk_size,
1468 re->after.data_disks,
1469 re->before.data_disks);
1470 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1471
1472 re->new_size = info->component_size * re->after.data_disks;
1473 return NULL;
1474 }
1475
1476 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1477 char *text_version)
1478 {
1479 struct mdinfo *info;
1480 char *subarray;
1481 int ret_val = -1;
1482
1483 if ((st == NULL) || (sra == NULL))
1484 return ret_val;
1485
1486 if (text_version == NULL)
1487 text_version = sra->text_version;
1488 subarray = strchr(text_version+1, '/')+1;
1489 info = st->ss->container_content(st, subarray);
1490 if (info) {
1491 unsigned long long current_size = 0;
1492 unsigned long long new_size =
1493 info->custom_array_size/2;
1494
1495 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1496 new_size > current_size) {
1497 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1498 < 0)
1499 dprintf("Error: Cannot set array size");
1500 else {
1501 ret_val = 0;
1502 dprintf("Array size changed");
1503 }
1504 dprintf_cont(" from %llu to %llu.\n",
1505 current_size, new_size);
1506 }
1507 sysfs_free(info);
1508 } else
1509 dprintf("Error: set_array_size(): info pointer in NULL\n");
1510
1511 return ret_val;
1512 }
1513
1514 static int reshape_array(char *container, int fd, char *devname,
1515 struct supertype *st, struct mdinfo *info,
1516 int force, struct mddev_dev *devlist,
1517 unsigned long long data_offset,
1518 char *backup_file, int verbose, int forked,
1519 int restart, int freeze_reshape);
1520 static int reshape_container(char *container, char *devname,
1521 int mdfd,
1522 struct supertype *st,
1523 struct mdinfo *info,
1524 int force,
1525 char *backup_file, int verbose,
1526 int forked, int restart, int freeze_reshape);
1527
1528 int Grow_reshape(char *devname, int fd,
1529 struct mddev_dev *devlist,
1530 unsigned long long data_offset,
1531 struct context *c, struct shape *s)
1532 {
1533 /* Make some changes in the shape of an array.
1534 * The kernel must support the change.
1535 *
1536 * There are three different changes. Each can trigger
1537 * a resync or recovery so we freeze that until we have
1538 * requested everything (if kernel supports freezing - 2.6.30).
1539 * The steps are:
1540 * - change size (i.e. component_size)
1541 * - change level
1542 * - change layout/chunksize/ndisks
1543 *
1544 * The last can require a reshape. It is different on different
1545 * levels so we need to check the level before actioning it.
1546 * Some times the level change needs to be requested after the
1547 * reshape (e.g. raid6->raid5, raid5->raid0)
1548 *
1549 */
1550 struct mdu_array_info_s array;
1551 int rv = 0;
1552 struct supertype *st;
1553 char *subarray = NULL;
1554
1555 int frozen;
1556 int changed = 0;
1557 char *container = NULL;
1558 int cfd = -1;
1559
1560 struct mddev_dev *dv;
1561 int added_disks;
1562
1563 struct mdinfo info;
1564 struct mdinfo *sra;
1565
1566 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1567 pr_err("%s is not an active md array - aborting\n",
1568 devname);
1569 return 1;
1570 }
1571 if (data_offset != INVALID_SECTORS && array.level != 10
1572 && (array.level < 4 || array.level > 6)) {
1573 pr_err("--grow --data-offset not yet supported\n");
1574 return 1;
1575 }
1576
1577 if (s->size > 0 &&
1578 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1579 pr_err("cannot change component size at the same time as other changes.\n"
1580 " Change size first, then check data is intact before making other changes.\n");
1581 return 1;
1582 }
1583
1584 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1585 get_linux_version() < 2006032 &&
1586 !check_env("MDADM_FORCE_FEWER")) {
1587 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1588 " Please use a newer kernel\n");
1589 return 1;
1590 }
1591
1592 st = super_by_fd(fd, &subarray);
1593 if (!st) {
1594 pr_err("Unable to determine metadata format for %s\n", devname);
1595 return 1;
1596 }
1597 if (s->raiddisks > st->max_devs) {
1598 pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs);
1599 return 1;
1600 }
1601 if (s->level == 0 &&
1602 (array.state & (1<<MD_SB_BITMAP_PRESENT)) &&
1603 !(array.state & (1<<MD_SB_CLUSTERED))) {
1604 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
1605 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
1606 pr_err("failed to remove internal bitmap.\n");
1607 return 1;
1608 }
1609 }
1610
1611 /* in the external case we need to check that the requested reshape is
1612 * supported, and perform an initial check that the container holds the
1613 * pre-requisite spare devices (mdmon owns final validation)
1614 */
1615 if (st->ss->external) {
1616 int rv;
1617
1618 if (subarray) {
1619 container = st->container_devnm;
1620 cfd = open_dev_excl(st->container_devnm);
1621 } else {
1622 container = st->devnm;
1623 close(fd);
1624 cfd = open_dev_excl(st->devnm);
1625 fd = cfd;
1626 }
1627 if (cfd < 0) {
1628 pr_err("Unable to open container for %s\n",
1629 devname);
1630 free(subarray);
1631 return 1;
1632 }
1633
1634 rv = st->ss->load_container(st, cfd, NULL);
1635
1636 if (rv) {
1637 pr_err("Cannot read superblock for %s\n",
1638 devname);
1639 free(subarray);
1640 return 1;
1641 }
1642
1643 /* check if operation is supported for metadata handler */
1644 if (st->ss->container_content) {
1645 struct mdinfo *cc = NULL;
1646 struct mdinfo *content = NULL;
1647
1648 cc = st->ss->container_content(st, subarray);
1649 for (content = cc; content ; content = content->next) {
1650 int allow_reshape = 1;
1651
1652 /* check if reshape is allowed based on metadata
1653 * indications stored in content.array.status
1654 */
1655 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1656 allow_reshape = 0;
1657 if (content->array.state
1658 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1659 allow_reshape = 0;
1660 if (!allow_reshape) {
1661 pr_err("cannot reshape arrays in container with unsupported metadata: %s(%s)\n",
1662 devname, container);
1663 sysfs_free(cc);
1664 free(subarray);
1665 return 1;
1666 }
1667 }
1668 sysfs_free(cc);
1669 }
1670 if (mdmon_running(container))
1671 st->update_tail = &st->updates;
1672 }
1673
1674 added_disks = 0;
1675 for (dv = devlist; dv; dv = dv->next)
1676 added_disks++;
1677 if (s->raiddisks > array.raid_disks &&
1678 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1679 !c->force) {
1680 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
1681 " Use --force to over-ride this check.\n",
1682 s->raiddisks - array.raid_disks,
1683 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1684 array.spare_disks + added_disks);
1685 return 1;
1686 }
1687
1688 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1689 | GET_STATE | GET_VERSION);
1690 if (sra) {
1691 if (st->ss->external && subarray == NULL) {
1692 array.level = LEVEL_CONTAINER;
1693 sra->array.level = LEVEL_CONTAINER;
1694 }
1695 } else {
1696 pr_err("failed to read sysfs parameters for %s\n",
1697 devname);
1698 return 1;
1699 }
1700 frozen = freeze(st);
1701 if (frozen < -1) {
1702 /* freeze() already spewed the reason */
1703 sysfs_free(sra);
1704 return 1;
1705 } else if (frozen < 0) {
1706 pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname);
1707 sysfs_free(sra);
1708 return 1;
1709 }
1710
1711 /* ========= set size =============== */
1712 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1713 unsigned long long orig_size = get_component_size(fd)/2;
1714 unsigned long long min_csize;
1715 struct mdinfo *mdi;
1716 int raid0_takeover = 0;
1717
1718 if (orig_size == 0)
1719 orig_size = (unsigned) array.size;
1720
1721 if (orig_size == 0) {
1722 pr_err("Cannot set device size in this type of array.\n");
1723 rv = 1;
1724 goto release;
1725 }
1726
1727 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1728 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1729 rv = 1;
1730 goto release;
1731 }
1732 sync_metadata(st);
1733 if (st->ss->external) {
1734 /* metadata can have size limitation
1735 * update size value according to metadata information
1736 */
1737 struct mdinfo *sizeinfo =
1738 st->ss->container_content(st, subarray);
1739 if (sizeinfo) {
1740 unsigned long long new_size =
1741 sizeinfo->custom_array_size/2;
1742 int data_disks = get_data_disks(
1743 sizeinfo->array.level,
1744 sizeinfo->array.layout,
1745 sizeinfo->array.raid_disks);
1746 new_size /= data_disks;
1747 dprintf("Metadata size correction from %llu to %llu (%llu)\n", orig_size, new_size,
1748 new_size * data_disks);
1749 s->size = new_size;
1750 sysfs_free(sizeinfo);
1751 }
1752 }
1753
1754 /* Update the size of each member device in case
1755 * they have been resized. This will never reduce
1756 * below the current used-size. The "size" attribute
1757 * understands '0' to mean 'max'.
1758 */
1759 min_csize = 0;
1760 rv = 0;
1761 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1762 if (sysfs_set_num(sra, mdi, "size",
1763 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1764 /* Probably kernel refusing to let us
1765 * reduce the size - not an error.
1766 */
1767 break;
1768 }
1769 if (array.not_persistent == 0 &&
1770 array.major_version == 0 &&
1771 get_linux_version() < 3001000) {
1772 /* Dangerous to allow size to exceed 2TB */
1773 unsigned long long csize;
1774 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1775 if (csize >= 2ULL*1024*1024*1024)
1776 csize = 2ULL*1024*1024*1024;
1777 if ((min_csize == 0 || (min_csize
1778 > csize)))
1779 min_csize = csize;
1780 }
1781 }
1782 }
1783 if (rv) {
1784 pr_err("Cannot set size on array members.\n");
1785 goto size_change_error;
1786 }
1787 if (min_csize && s->size > min_csize) {
1788 pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n");
1789 rv = 1;
1790 goto size_change_error;
1791 }
1792 if (min_csize && s->size == MAX_SIZE) {
1793 /* Don't let the kernel choose a size - it will get
1794 * it wrong
1795 */
1796 pr_err("Limited v0.90 array to 2TB per device\n");
1797 s->size = min_csize;
1798 }
1799 if (st->ss->external) {
1800 if (sra->array.level == 0) {
1801 rv = sysfs_set_str(sra, NULL, "level",
1802 "raid5");
1803 if (!rv) {
1804 raid0_takeover = 1;
1805 /* get array parameters after takeover
1806 * to change one parameter at time only
1807 */
1808 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1809 }
1810 }
1811 /* make sure mdmon is
1812 * aware of the new level */
1813 if (!mdmon_running(st->container_devnm))
1814 start_mdmon(st->container_devnm);
1815 ping_monitor(container);
1816 if (mdmon_running(st->container_devnm) &&
1817 st->update_tail == NULL)
1818 st->update_tail = &st->updates;
1819 }
1820
1821 if (s->size == MAX_SIZE)
1822 s->size = 0;
1823 array.size = s->size;
1824 if (s->size & ~INT32_MAX) {
1825 /* got truncated to 32bit, write to
1826 * component_size instead
1827 */
1828 if (sra)
1829 rv = sysfs_set_num(sra, NULL,
1830 "component_size", s->size);
1831 else
1832 rv = -1;
1833 } else {
1834 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1835
1836 /* manage array size when it is managed externally
1837 */
1838 if ((rv == 0) && st->ss->external)
1839 rv = set_array_size(st, sra, sra->text_version);
1840 }
1841
1842 if (raid0_takeover) {
1843 /* do not recync non-existing parity,
1844 * we will drop it anyway
1845 */
1846 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1847 /* go back to raid0, drop parity disk
1848 */
1849 sysfs_set_str(sra, NULL, "level", "raid0");
1850 ioctl(fd, GET_ARRAY_INFO, &array);
1851 }
1852
1853 size_change_error:
1854 if (rv != 0) {
1855 int err = errno;
1856
1857 /* restore metadata */
1858 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1859 UnSet, NULL, devname,
1860 ROLLBACK_METADATA_CHANGES,
1861 c->verbose) == 0)
1862 sync_metadata(st);
1863 pr_err("Cannot set device size for %s: %s\n",
1864 devname, strerror(err));
1865 if (err == EBUSY &&
1866 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1867 cont_err("Bitmap must be removed before size can be changed\n");
1868 rv = 1;
1869 goto release;
1870 }
1871 if (s->assume_clean) {
1872 /* This will fail on kernels older than 3.0 unless
1873 * a backport has been arranged.
1874 */
1875 if (sra == NULL ||
1876 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1877 pr_err("--assume-clean not supported with --grow on this kernel\n");
1878 }
1879 ioctl(fd, GET_ARRAY_INFO, &array);
1880 s->size = get_component_size(fd)/2;
1881 if (s->size == 0)
1882 s->size = array.size;
1883 if (c->verbose >= 0) {
1884 if (s->size == orig_size)
1885 pr_err("component size of %s unchanged at %lluK\n",
1886 devname, s->size);
1887 else
1888 pr_err("component size of %s has been set to %lluK\n",
1889 devname, s->size);
1890 }
1891 changed = 1;
1892 } else if (array.level != LEVEL_CONTAINER) {
1893 s->size = get_component_size(fd)/2;
1894 if (s->size == 0)
1895 s->size = array.size;
1896 }
1897
1898 /* See if there is anything else to do */
1899 if ((s->level == UnSet || s->level == array.level) &&
1900 (s->layout_str == NULL) &&
1901 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1902 data_offset == INVALID_SECTORS &&
1903 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1904 /* Nothing more to do */
1905 if (!changed && c->verbose >= 0)
1906 pr_err("%s: no change requested\n",
1907 devname);
1908 goto release;
1909 }
1910
1911 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1912 * current implementation assumes that following conditions must be met:
1913 * - RAID10:
1914 * - far_copies == 1
1915 * - near_copies == 2
1916 */
1917 if ((s->level == 0 && array.level == 10 && sra &&
1918 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1919 (s->level == 0 && array.level == 1 && sra)) {
1920 int err;
1921 err = remove_disks_for_takeover(st, sra, array.layout);
1922 if (err) {
1923 dprintf("Array cannot be reshaped\n");
1924 if (cfd > -1)
1925 close(cfd);
1926 rv = 1;
1927 goto release;
1928 }
1929 /* Make sure mdmon has seen the device removal
1930 * and updated metadata before we continue with
1931 * level change
1932 */
1933 if (container)
1934 ping_monitor(container);
1935 }
1936
1937 memset(&info, 0, sizeof(info));
1938 info.array = array;
1939 sysfs_init(&info, fd, NULL);
1940 strcpy(info.text_version, sra->text_version);
1941 info.component_size = s->size*2;
1942 info.new_level = s->level;
1943 info.new_chunk = s->chunk * 1024;
1944 if (info.array.level == LEVEL_CONTAINER) {
1945 info.delta_disks = UnSet;
1946 info.array.raid_disks = s->raiddisks;
1947 } else if (s->raiddisks)
1948 info.delta_disks = s->raiddisks - info.array.raid_disks;
1949 else
1950 info.delta_disks = UnSet;
1951 if (s->layout_str == NULL) {
1952 info.new_layout = UnSet;
1953 if (info.array.level == 6 &&
1954 (info.new_level == 6 || info.new_level == UnSet) &&
1955 info.array.layout >= 16) {
1956 pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname);
1957 cont_err("during the reshape, please specify --layout=preserve\n");
1958 cont_err("If you want to change it, specify a layout or use --layout=normalise\n");
1959 rv = 1;
1960 goto release;
1961 }
1962 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1963 strcmp(s->layout_str, "normalize") == 0) {
1964 /* If we have a -6 RAID6 layout, remove the '-6'. */
1965 info.new_layout = UnSet;
1966 if (info.array.level == 6 && info.new_level == UnSet) {
1967 char l[40], *h;
1968 strcpy(l, map_num(r6layout, info.array.layout));
1969 h = strrchr(l, '-');
1970 if (h && strcmp(h, "-6") == 0) {
1971 *h = 0;
1972 info.new_layout = map_name(r6layout, l);
1973 }
1974 } else {
1975 pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str);
1976 rv = 1;
1977 goto release;
1978 }
1979 } else if (strcmp(s->layout_str, "preserve") == 0) {
1980 /* This means that a non-standard RAID6 layout
1981 * is OK.
1982 * In particular:
1983 * - When reshape a RAID6 (e.g. adding a device)
1984 * which is in a non-standard layout, it is OK
1985 * to preserve that layout.
1986 * - When converting a RAID5 to RAID6, leave it in
1987 * the XXX-6 layout, don't re-layout.
1988 */
1989 if (info.array.level == 6 && info.new_level == UnSet)
1990 info.new_layout = info.array.layout;
1991 else if (info.array.level == 5 && info.new_level == 6) {
1992 char l[40];
1993 strcpy(l, map_num(r5layout, info.array.layout));
1994 strcat(l, "-6");
1995 info.new_layout = map_name(r6layout, l);
1996 } else {
1997 pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str);
1998 rv = 1;
1999 goto release;
2000 }
2001 } else {
2002 int l = info.new_level;
2003 if (l == UnSet)
2004 l = info.array.level;
2005 switch (l) {
2006 case 5:
2007 info.new_layout = map_name(r5layout, s->layout_str);
2008 break;
2009 case 6:
2010 info.new_layout = map_name(r6layout, s->layout_str);
2011 break;
2012 case 10:
2013 info.new_layout = parse_layout_10(s->layout_str);
2014 break;
2015 case LEVEL_FAULTY:
2016 info.new_layout = parse_layout_faulty(s->layout_str);
2017 break;
2018 default:
2019 pr_err("layout not meaningful with this level\n");
2020 rv = 1;
2021 goto release;
2022 }
2023 if (info.new_layout == UnSet) {
2024 pr_err("layout %s not understood for this level\n",
2025 s->layout_str);
2026 rv = 1;
2027 goto release;
2028 }
2029 }
2030
2031 if (array.level == LEVEL_FAULTY) {
2032 if (s->level != UnSet && s->level != array.level) {
2033 pr_err("cannot change level of Faulty device\n");
2034 rv =1 ;
2035 }
2036 if (s->chunk) {
2037 pr_err("cannot set chunksize of Faulty device\n");
2038 rv =1 ;
2039 }
2040 if (s->raiddisks && s->raiddisks != 1) {
2041 pr_err("cannot set raid_disks of Faulty device\n");
2042 rv =1 ;
2043 }
2044 if (s->layout_str) {
2045 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2046 dprintf("Cannot get array information.\n");
2047 goto release;
2048 }
2049 array.layout = info.new_layout;
2050 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2051 pr_err("failed to set new layout\n");
2052 rv = 1;
2053 } else if (c->verbose >= 0)
2054 printf("layout for %s set to %d\n",
2055 devname, array.layout);
2056 }
2057 } else if (array.level == LEVEL_CONTAINER) {
2058 /* This change is to be applied to every array in the
2059 * container. This is only needed when the metadata imposes
2060 * restraints of the various arrays in the container.
2061 * Currently we only know that IMSM requires all arrays
2062 * to have the same number of devices so changing the
2063 * number of devices (On-Line Capacity Expansion) must be
2064 * performed at the level of the container
2065 */
2066 rv = reshape_container(container, devname, -1, st, &info,
2067 c->force, c->backup_file, c->verbose, 0, 0, 0);
2068 frozen = 0;
2069 } else {
2070 /* get spare devices from external metadata
2071 */
2072 if (st->ss->external) {
2073 struct mdinfo *info2;
2074
2075 info2 = st->ss->container_content(st, subarray);
2076 if (info2) {
2077 info.array.spare_disks =
2078 info2->array.spare_disks;
2079 sysfs_free(info2);
2080 }
2081 }
2082
2083 /* Impose these changes on a single array. First
2084 * check that the metadata is OK with the change. */
2085
2086 if (reshape_super(st, 0, info.new_level,
2087 info.new_layout, info.new_chunk,
2088 info.array.raid_disks, info.delta_disks,
2089 c->backup_file, devname, APPLY_METADATA_CHANGES,
2090 c->verbose)) {
2091 rv = 1;
2092 goto release;
2093 }
2094 sync_metadata(st);
2095 rv = reshape_array(container, fd, devname, st, &info, c->force,
2096 devlist, data_offset, c->backup_file, c->verbose,
2097 0, 0, 0);
2098 frozen = 0;
2099 }
2100 release:
2101 sysfs_free(sra);
2102 if (frozen > 0)
2103 unfreeze(st);
2104 return rv;
2105 }
2106
2107 /* verify_reshape_position()
2108 * Function checks if reshape position in metadata is not farther
2109 * than position in md.
2110 * Return value:
2111 * 0 : not valid sysfs entry
2112 * it can be caused by not started reshape, it should be started
2113 * by reshape array or raid0 array is before takeover
2114 * -1 : error, reshape position is obviously wrong
2115 * 1 : success, reshape progress correct or updated
2116 */
2117 static int verify_reshape_position(struct mdinfo *info, int level)
2118 {
2119 int ret_val = 0;
2120 char buf[40];
2121 int rv;
2122
2123 /* read sync_max, failure can mean raid0 array */
2124 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2125
2126 if (rv > 0) {
2127 char *ep;
2128 unsigned long long position = strtoull(buf, &ep, 0);
2129
2130 dprintf("Read sync_max sysfs entry is: %s\n", buf);
2131 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2132 position *= get_data_disks(level,
2133 info->new_layout,
2134 info->array.raid_disks);
2135 if (info->reshape_progress < position) {
2136 dprintf("Corrected reshape progress (%llu) to md position (%llu)\n",
2137 info->reshape_progress, position);
2138 info->reshape_progress = position;
2139 ret_val = 1;
2140 } else if (info->reshape_progress > position) {
2141 pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n",
2142 position, info->reshape_progress);
2143 ret_val = -1;
2144 } else {
2145 dprintf("Reshape position in md and metadata are the same;");
2146 ret_val = 1;
2147 }
2148 }
2149 } else if (rv == 0) {
2150 /* for valid sysfs entry, 0-length content
2151 * should be indicated as error
2152 */
2153 ret_val = -1;
2154 }
2155
2156 return ret_val;
2157 }
2158
2159 static unsigned long long choose_offset(unsigned long long lo,
2160 unsigned long long hi,
2161 unsigned long long min,
2162 unsigned long long max)
2163 {
2164 /* Choose a new offset between hi and lo.
2165 * It must be between min and max, but
2166 * we would prefer something near the middle of hi/lo, and also
2167 * prefer to be aligned to a big power of 2.
2168 *
2169 * So we start with the middle, then for each bit,
2170 * starting at '1' and increasing, if it is set, we either
2171 * add it or subtract it if possible, preferring the option
2172 * which is furthest from the boundary.
2173 *
2174 * We stop once we get a 1MB alignment. As units are in sectors,
2175 * 1MB = 2*1024 sectors.
2176 */
2177 unsigned long long choice = (lo + hi) / 2;
2178 unsigned long long bit = 1;
2179
2180 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2181 unsigned long long bigger, smaller;
2182 if (! (bit & choice))
2183 continue;
2184 bigger = choice + bit;
2185 smaller = choice - bit;
2186 if (bigger > max && smaller < min)
2187 break;
2188 if (bigger > max)
2189 choice = smaller;
2190 else if (smaller < min)
2191 choice = bigger;
2192 else if (hi - bigger > smaller - lo)
2193 choice = bigger;
2194 else
2195 choice = smaller;
2196 }
2197 return choice;
2198 }
2199
2200 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2201 char *devname, int delta_disks,
2202 unsigned long long data_offset,
2203 unsigned long long min,
2204 int can_fallback)
2205 {
2206 struct mdinfo *sd;
2207 int dir = 0;
2208 int err = 0;
2209 unsigned long long before, after;
2210
2211 /* Need to find min space before and after so same is used
2212 * on all devices
2213 */
2214 before = UINT64_MAX;
2215 after = UINT64_MAX;
2216 for (sd = sra->devs; sd; sd = sd->next) {
2217 char *dn;
2218 int dfd;
2219 int rv;
2220 struct supertype *st2;
2221 struct mdinfo info2;
2222
2223 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2224 continue;
2225 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2226 dfd = dev_open(dn, O_RDONLY);
2227 if (dfd < 0) {
2228 pr_err("%s: cannot open component %s\n",
2229 devname, dn ? dn : "-unknown-");
2230 goto release;
2231 }
2232 st2 = dup_super(st);
2233 rv = st2->ss->load_super(st2,dfd, NULL);
2234 close(dfd);
2235 if (rv) {
2236 free(st2);
2237 pr_err("%s: cannot get superblock from %s\n",
2238 devname, dn);
2239 goto release;
2240 }
2241 st2->ss->getinfo_super(st2, &info2, NULL);
2242 st2->ss->free_super(st2);
2243 free(st2);
2244 if (info2.space_before == 0 &&
2245 info2.space_after == 0) {
2246 /* Metadata doesn't support data_offset changes */
2247 if (!can_fallback)
2248 pr_err("%s: Metadata version doesn't support data_offset changes\n",
2249 devname);
2250 goto fallback;
2251 }
2252 if (before > info2.space_before)
2253 before = info2.space_before;
2254 if (after > info2.space_after)
2255 after = info2.space_after;
2256
2257 if (data_offset != INVALID_SECTORS) {
2258 if (dir == 0) {
2259 if (info2.data_offset == data_offset) {
2260 pr_err("%s: already has that data_offset\n",
2261 dn);
2262 goto release;
2263 }
2264 if (data_offset < info2.data_offset)
2265 dir = -1;
2266 else
2267 dir = 1;
2268 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2269 (data_offset >= info2.data_offset && dir == -1)) {
2270 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2271 dn);
2272 goto release;
2273 }
2274 }
2275 }
2276 if (before == UINT64_MAX)
2277 /* impossible really, there must be no devices */
2278 return 1;
2279
2280 for (sd = sra->devs; sd; sd = sd->next) {
2281 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2282 unsigned long long new_data_offset;
2283
2284 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2285 continue;
2286 if (delta_disks < 0) {
2287 /* Don't need any space as array is shrinking
2288 * just move data_offset up by min
2289 */
2290 if (data_offset == INVALID_SECTORS)
2291 new_data_offset = sd->data_offset + min;
2292 else {
2293 if (data_offset < sd->data_offset + min) {
2294 pr_err("--data-offset too small for %s\n",
2295 dn);
2296 goto release;
2297 }
2298 new_data_offset = data_offset;
2299 }
2300 } else if (delta_disks > 0) {
2301 /* need space before */
2302 if (before < min) {
2303 if (can_fallback)
2304 goto fallback;
2305 pr_err("Insufficient head-space for reshape on %s\n",
2306 dn);
2307 goto release;
2308 }
2309 if (data_offset == INVALID_SECTORS)
2310 new_data_offset = sd->data_offset - min;
2311 else {
2312 if (data_offset > sd->data_offset - min) {
2313 pr_err("--data-offset too large for %s\n",
2314 dn);
2315 goto release;
2316 }
2317 new_data_offset = data_offset;
2318 }
2319 } else {
2320 if (dir == 0) {
2321 /* can move up or down. If 'data_offset'
2322 * was set we would have already decided,
2323 * so just choose direction with most space.
2324 */
2325 if (before > after)
2326 dir = -1;
2327 else
2328 dir = 1;
2329 }
2330 sysfs_set_str(sra, NULL, "reshape_direction",
2331 dir == 1 ? "backwards" : "forwards");
2332 if (dir > 0) {
2333 /* Increase data offset */
2334 if (after < min) {
2335 if (can_fallback)
2336 goto fallback;
2337 pr_err("Insufficient tail-space for reshape on %s\n",
2338 dn);
2339 goto release;
2340 }
2341 if (data_offset != INVALID_SECTORS &&
2342 data_offset < sd->data_offset + min) {
2343 pr_err("--data-offset too small on %s\n",
2344 dn);
2345 goto release;
2346 }
2347 if (data_offset != INVALID_SECTORS)
2348 new_data_offset = data_offset;
2349 else
2350 new_data_offset = choose_offset(sd->data_offset,
2351 sd->data_offset + after,
2352 sd->data_offset + min,
2353 sd->data_offset + after);
2354 } else {
2355 /* Decrease data offset */
2356 if (before < min) {
2357 if (can_fallback)
2358 goto fallback;
2359 pr_err("insufficient head-room on %s\n",
2360 dn);
2361 goto release;
2362 }
2363 if (data_offset != INVALID_SECTORS &&
2364 data_offset < sd->data_offset - min) {
2365 pr_err("--data-offset too small on %s\n",
2366 dn);
2367 goto release;
2368 }
2369 if (data_offset != INVALID_SECTORS)
2370 new_data_offset = data_offset;
2371 else
2372 new_data_offset = choose_offset(sd->data_offset - before,
2373 sd->data_offset,
2374 sd->data_offset - before,
2375 sd->data_offset - min);
2376 }
2377 }
2378 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2379 if (err < 0 && errno == E2BIG) {
2380 /* try again after increasing data size to max */
2381 err = sysfs_set_num(sra, sd, "size", 0);
2382 if (err < 0 && errno == EINVAL &&
2383 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2384 /* some kernels have a bug where you cannot
2385 * use '0' on spare devices. */
2386 sysfs_set_num(sra, sd, "size",
2387 (sra->component_size + after)/2);
2388 }
2389 err = sysfs_set_num(sra, sd, "new_offset",
2390 new_data_offset);
2391 }
2392 if (err < 0) {
2393 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2394 pr_err("data-offset is too big for %s\n",
2395 dn);
2396 goto release;
2397 }
2398 if (sd == sra->devs &&
2399 (errno == ENOENT || errno == E2BIG))
2400 /* Early kernel, no 'new_offset' file,
2401 * or kernel doesn't like us.
2402 * For RAID5/6 this is not fatal
2403 */
2404 return 1;
2405 pr_err("Cannot set new_offset for %s\n",
2406 dn);
2407 break;
2408 }
2409 }
2410 return err;
2411 release:
2412 return -1;
2413 fallback:
2414 /* Just use a backup file */
2415 return 1;
2416 }
2417
2418 static int raid10_reshape(char *container, int fd, char *devname,
2419 struct supertype *st, struct mdinfo *info,
2420 struct reshape *reshape,
2421 unsigned long long data_offset,
2422 int force, int verbose)
2423 {
2424 /* Changing raid_disks, layout, chunksize or possibly
2425 * just data_offset for a RAID10.
2426 * We must always change data_offset. We change by at least
2427 * ->min_offset_change which is the largest of the old and new
2428 * chunk sizes.
2429 * If raid_disks is increasing, then data_offset must decrease
2430 * by at least this copy size.
2431 * If raid_disks is unchanged, data_offset must increase or
2432 * decrease by at least min_offset_change but preferably by much more.
2433 * We choose half of the available space.
2434 * If raid_disks is decreasing, data_offset must increase by
2435 * at least min_offset_change. To allow of this, component_size
2436 * must be decreased by the same amount.
2437 *
2438 * So we calculate the required minimum and direction, possibly
2439 * reduce the component_size, then iterate through the devices
2440 * and set the new_data_offset.
2441 * If that all works, we set chunk_size, layout, raid_disks, and start
2442 * 'reshape'
2443 */
2444 struct mdinfo *sra;
2445 unsigned long long min;
2446 int err = 0;
2447
2448 sra = sysfs_read(fd, NULL,
2449 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2450 );
2451 if (!sra) {
2452 pr_err("%s: Cannot get array details from sysfs\n",
2453 devname);
2454 goto release;
2455 }
2456 min = reshape->min_offset_change;
2457
2458 if (info->delta_disks)
2459 sysfs_set_str(sra, NULL, "reshape_direction",
2460 info->delta_disks < 0 ? "backwards" : "forwards");
2461 if (info->delta_disks < 0 &&
2462 info->space_after < min) {
2463 int rv = sysfs_set_num(sra, NULL, "component_size",
2464 (sra->component_size -
2465 min)/2);
2466 if (rv) {
2467 pr_err("cannot reduce component size\n");
2468 goto release;
2469 }
2470 }
2471 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2472 min, 0);
2473 if (err == 1) {
2474 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2475 cont_err("supported on this kernel\n");
2476 err = -1;
2477 }
2478 if (err < 0)
2479 goto release;
2480
2481 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2482 err = errno;
2483 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2484 err = errno;
2485 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2486 info->array.raid_disks + info->delta_disks) < 0)
2487 err = errno;
2488 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2489 err = errno;
2490 if (err) {
2491 pr_err("Cannot set array shape for %s\n",
2492 devname);
2493 if (err == EBUSY &&
2494 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2495 cont_err(" Bitmap must be removed before shape can be changed\n");
2496 goto release;
2497 }
2498 sysfs_free(sra);
2499 return 0;
2500 release:
2501 sysfs_free(sra);
2502 return 1;
2503 }
2504
2505 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2506 {
2507 struct mdinfo *sra, *sd;
2508 /* Initialisation to silence compiler warning */
2509 unsigned long long min_space_before = 0, min_space_after = 0;
2510 int first = 1;
2511
2512 sra = sysfs_read(fd, NULL, GET_DEVS);
2513 if (!sra)
2514 return;
2515 for (sd = sra->devs; sd; sd = sd->next) {
2516 char *dn;
2517 int dfd;
2518 struct supertype *st2;
2519 struct mdinfo info2;
2520
2521 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2522 continue;
2523 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2524 dfd = dev_open(dn, O_RDONLY);
2525 if (dfd < 0)
2526 break;
2527 st2 = dup_super(st);
2528 if (st2->ss->load_super(st2,dfd, NULL)) {
2529 close(dfd);
2530 free(st2);
2531 break;
2532 }
2533 close(dfd);
2534 st2->ss->getinfo_super(st2, &info2, NULL);
2535 st2->ss->free_super(st2);
2536 free(st2);
2537 if (first ||
2538 min_space_before > info2.space_before)
2539 min_space_before = info2.space_before;
2540 if (first ||
2541 min_space_after > info2.space_after)
2542 min_space_after = info2.space_after;
2543 first = 0;
2544 }
2545 if (sd == NULL && !first) {
2546 info->space_after = min_space_after;
2547 info->space_before = min_space_before;
2548 }
2549 sysfs_free(sra);
2550 }
2551
2552 static void update_cache_size(char *container, struct mdinfo *sra,
2553 struct mdinfo *info,
2554 int disks, unsigned long long blocks)
2555 {
2556 /* Check that the internal stripe cache is
2557 * large enough, or it won't work.
2558 * It must hold at least 4 stripes of the larger
2559 * chunk size
2560 */
2561 unsigned long cache;
2562 cache = max(info->array.chunk_size, info->new_chunk);
2563 cache *= 4; /* 4 stripes minimum */
2564 cache /= 512; /* convert to sectors */
2565 /* make sure there is room for 'blocks' with a bit to spare */
2566 if (cache < 16 + blocks / disks)
2567 cache = 16 + blocks / disks;
2568 cache /= (4096/512); /* Convert from sectors to pages */
2569
2570 if (sra->cache_size < cache)
2571 subarray_set_num(container, sra, "stripe_cache_size",
2572 cache+1);
2573 }
2574
2575 static int impose_reshape(struct mdinfo *sra,
2576 struct mdinfo *info,
2577 struct supertype *st,
2578 int fd,
2579 int restart,
2580 char *devname, char *container,
2581 struct reshape *reshape)
2582 {
2583 struct mdu_array_info_s array;
2584
2585 sra->new_chunk = info->new_chunk;
2586
2587 if (restart) {
2588 /* for external metadata checkpoint saved by mdmon can be lost
2589 * or missed /due to e.g. crash/. Check if md is not during
2590 * restart farther than metadata points to.
2591 * If so, this means metadata information is obsolete.
2592 */
2593 if (st->ss->external)
2594 verify_reshape_position(info, reshape->level);
2595 sra->reshape_progress = info->reshape_progress;
2596 } else {
2597 sra->reshape_progress = 0;
2598 if (reshape->after.data_disks < reshape->before.data_disks)
2599 /* start from the end of the new array */
2600 sra->reshape_progress = (sra->component_size
2601 * reshape->after.data_disks);
2602 }
2603
2604 ioctl(fd, GET_ARRAY_INFO, &array);
2605 if (info->array.chunk_size == info->new_chunk &&
2606 reshape->before.layout == reshape->after.layout &&
2607 st->ss->external == 0) {
2608 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2609 array.raid_disks = reshape->after.data_disks + reshape->parity;
2610 if (!restart &&
2611 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2612 int err = errno;
2613
2614 pr_err("Cannot set device shape for %s: %s\n",
2615 devname, strerror(errno));
2616
2617 if (err == EBUSY &&
2618 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2619 cont_err("Bitmap must be removed before shape can be changed\n");
2620
2621 goto release;
2622 }
2623 } else if (!restart) {
2624 /* set them all just in case some old 'new_*' value
2625 * persists from some earlier problem.
2626 */
2627 int err = 0;
2628 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2629 err = errno;
2630 if (!err && sysfs_set_num(sra, NULL, "layout",
2631 reshape->after.layout) < 0)
2632 err = errno;
2633 if (!err && subarray_set_num(container, sra, "raid_disks",
2634 reshape->after.data_disks +
2635 reshape->parity) < 0)
2636 err = errno;
2637 if (err) {
2638 pr_err("Cannot set device shape for %s\n",
2639 devname);
2640
2641 if (err == EBUSY &&
2642 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2643 cont_err("Bitmap must be removed before shape can be changed\n");
2644 goto release;
2645 }
2646 }
2647 return 0;
2648 release:
2649 return -1;
2650 }
2651
2652 static int impose_level(int fd, int level, char *devname, int verbose)
2653 {
2654 char *c;
2655 struct mdu_array_info_s array;
2656 struct mdinfo info;
2657 sysfs_init(&info, fd, NULL);
2658
2659 ioctl(fd, GET_ARRAY_INFO, &array);
2660 if (level == 0 &&
2661 (array.level >= 4 && array.level <= 6)) {
2662 /* To convert to RAID0 we need to fail and
2663 * remove any non-data devices. */
2664 int found = 0;
2665 int d;
2666 int data_disks = array.raid_disks - 1;
2667 if (array.level == 6)
2668 data_disks -= 1;
2669 if (array.level == 5 &&
2670 array.layout != ALGORITHM_PARITY_N)
2671 return -1;
2672 if (array.level == 6 &&
2673 array.layout != ALGORITHM_PARITY_N_6)
2674 return -1;
2675 sysfs_set_str(&info, NULL,"sync_action", "idle");
2676 /* First remove any spares so no recovery starts */
2677 for (d = 0, found = 0;
2678 d < MAX_DISKS && found < array.nr_disks;
2679 d++) {
2680 mdu_disk_info_t disk;
2681 disk.number = d;
2682 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2683 continue;
2684 if (disk.major == 0 && disk.minor == 0)
2685 continue;
2686 found++;
2687 if ((disk.state & (1 << MD_DISK_ACTIVE))
2688 && disk.raid_disk < data_disks)
2689 /* keep this */
2690 continue;
2691 ioctl(fd, HOT_REMOVE_DISK,
2692 makedev(disk.major, disk.minor));
2693 }
2694 /* Now fail anything left */
2695 ioctl(fd, GET_ARRAY_INFO, &array);
2696 for (d = 0, found = 0;
2697 d < MAX_DISKS && found < array.nr_disks;
2698 d++) {
2699 int cnt;
2700 mdu_disk_info_t disk;
2701 disk.number = d;
2702 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2703 continue;
2704 if (disk.major == 0 && disk.minor == 0)
2705 continue;
2706 found++;
2707 if ((disk.state & (1 << MD_DISK_ACTIVE))
2708 && disk.raid_disk < data_disks)
2709 /* keep this */
2710 continue;
2711 ioctl(fd, SET_DISK_FAULTY,
2712 makedev(disk.major, disk.minor));
2713 cnt = 5;
2714 while (ioctl(fd, HOT_REMOVE_DISK,
2715 makedev(disk.major, disk.minor)) < 0
2716 && errno == EBUSY
2717 && cnt--) {
2718 usleep(10000);
2719 }
2720 }
2721 }
2722 c = map_num(pers, level);
2723 if (c) {
2724 int err = sysfs_set_str(&info, NULL, "level", c);
2725 if (err) {
2726 err = errno;
2727 pr_err("%s: could not set level to %s\n",
2728 devname, c);
2729 if (err == EBUSY &&
2730 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2731 cont_err("Bitmap must be removed before level can be changed\n");
2732 return err;
2733 }
2734 if (verbose >= 0)
2735 pr_err("level of %s changed to %s\n",
2736 devname, c);
2737 }
2738 return 0;
2739 }
2740
2741 int sigterm = 0;
2742 static void catch_term(int sig)
2743 {
2744 sigterm = 1;
2745 }
2746
2747 static int continue_via_systemd(char *devnm)
2748 {
2749 int skipped, i, pid, status;
2750 char pathbuf[1024];
2751 /* In a systemd/udev world, it is best to get systemd to
2752 * run "mdadm --grow --continue" rather than running in the
2753 * background.
2754 */
2755 switch(fork()) {
2756 case 0:
2757 /* FIXME yuk. CLOSE_EXEC?? */
2758 skipped = 0;
2759 for (i = 3; skipped < 20; i++)
2760 if (close(i) < 0)
2761 skipped++;
2762 else
2763 skipped = 0;
2764
2765 /* Don't want to see error messages from
2766 * systemctl. If the service doesn't exist,
2767 * we fork ourselves.
2768 */
2769 close(2);
2770 open("/dev/null", O_WRONLY);
2771 snprintf(pathbuf, sizeof(pathbuf), "mdadm-grow-continue@%s.service",
2772 devnm);
2773 status = execl("/usr/bin/systemctl", "systemctl",
2774 "start",
2775 pathbuf, NULL);
2776 status = execl("/bin/systemctl", "systemctl", "start",
2777 pathbuf, NULL);
2778 exit(1);
2779 case -1: /* Just do it ourselves. */
2780 break;
2781 default: /* parent - good */
2782 pid = wait(&status);
2783 if (pid >= 0 && status == 0)
2784 return 1;
2785 }
2786 return 0;
2787 }
2788
2789 static int reshape_array(char *container, int fd, char *devname,
2790 struct supertype *st, struct mdinfo *info,
2791 int force, struct mddev_dev *devlist,
2792 unsigned long long data_offset,
2793 char *backup_file, int verbose, int forked,
2794 int restart, int freeze_reshape)
2795 {
2796 struct reshape reshape;
2797 int spares_needed;
2798 char *msg;
2799 int orig_level = UnSet;
2800 int odisks;
2801 int delayed;
2802
2803 struct mdu_array_info_s array;
2804 char *c;
2805
2806 struct mddev_dev *dv;
2807 int added_disks;
2808
2809 int *fdlist = NULL;
2810 unsigned long long *offsets = NULL;
2811 int d;
2812 int nrdisks;
2813 int err;
2814 unsigned long blocks;
2815 unsigned long long array_size;
2816 int done;
2817 struct mdinfo *sra = NULL;
2818 char buf[20];
2819
2820 /* when reshaping a RAID0, the component_size might be zero.
2821 * So try to fix that up.
2822 */
2823 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2824 dprintf("Cannot get array information.\n");
2825 goto release;
2826 }
2827 if (array.level == 0 && info->component_size == 0) {
2828 get_dev_size(fd, NULL, &array_size);
2829 info->component_size = array_size / array.raid_disks;
2830 }
2831
2832 if (array.level == 10)
2833 /* Need space_after info */
2834 get_space_after(fd, st, info);
2835
2836 if (info->reshape_active) {
2837 int new_level = info->new_level;
2838 info->new_level = UnSet;
2839 if (info->delta_disks > 0)
2840 info->array.raid_disks -= info->delta_disks;
2841 msg = analyse_change(devname, info, &reshape);
2842 info->new_level = new_level;
2843 if (info->delta_disks > 0)
2844 info->array.raid_disks += info->delta_disks;
2845 if (!restart)
2846 /* Make sure the array isn't read-only */
2847 ioctl(fd, RESTART_ARRAY_RW, 0);
2848 } else
2849 msg = analyse_change(devname, info, &reshape);
2850 if (msg) {
2851 /* if msg == "", error has already been printed */
2852 if (msg[0])
2853 pr_err("%s\n", msg);
2854 goto release;
2855 }
2856 if (restart &&
2857 (reshape.level != info->array.level ||
2858 reshape.before.layout != info->array.layout ||
2859 reshape.before.data_disks + reshape.parity
2860 != info->array.raid_disks - max(0, info->delta_disks))) {
2861 pr_err("reshape info is not in native format - cannot continue.\n");
2862 goto release;
2863 }
2864
2865 if (st->ss->external && restart && (info->reshape_progress == 0) &&
2866 !((sysfs_get_str(info, NULL, "sync_action", buf, sizeof(buf)) > 0) &&
2867 (strncmp(buf, "reshape", 7) == 0))) {
2868 /* When reshape is restarted from '0', very begin of array
2869 * it is possible that for external metadata reshape and array
2870 * configuration doesn't happen.
2871 * Check if md has the same opinion, and reshape is restarted
2872 * from 0. If so, this is regular reshape start after reshape
2873 * switch in metadata to next array only.
2874 */
2875 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2876 (info->reshape_progress == 0))
2877 restart = 0;
2878 }
2879 if (restart) {
2880 /* reshape already started. just skip to monitoring the reshape */
2881 if (reshape.backup_blocks == 0)
2882 return 0;
2883 if (restart & RESHAPE_NO_BACKUP)
2884 return 0;
2885
2886 /* Need 'sra' down at 'started:' */
2887 sra = sysfs_read(fd, NULL,
2888 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2889 GET_CACHE);
2890 if (!sra) {
2891 pr_err("%s: Cannot get array details from sysfs\n",
2892 devname);
2893 goto release;
2894 }
2895
2896 if (!backup_file)
2897 backup_file = locate_backup(sra->sys_name);
2898
2899 goto started;
2900 }
2901 /* The container is frozen but the array may not be.
2902 * So freeze the array so spares don't get put to the wrong use
2903 * FIXME there should probably be a cleaner separation between
2904 * freeze_array and freeze_container.
2905 */
2906 sysfs_freeze_array(info);
2907 /* Check we have enough spares to not be degraded */
2908 added_disks = 0;
2909 for (dv = devlist; dv ; dv=dv->next)
2910 added_disks++;
2911 spares_needed = max(reshape.before.data_disks,
2912 reshape.after.data_disks)
2913 + reshape.parity - array.raid_disks;
2914
2915 if (!force &&
2916 info->new_level > 1 && info->array.level > 1 &&
2917 spares_needed > info->array.spare_disks + added_disks) {
2918 pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n"
2919 " Use --force to over-ride this check.\n",
2920 spares_needed,
2921 spares_needed == 1 ? "" : "s",
2922 info->array.spare_disks + added_disks);
2923 goto release;
2924 }
2925 /* Check we have enough spares to not fail */
2926 spares_needed = max(reshape.before.data_disks,
2927 reshape.after.data_disks)
2928 - array.raid_disks;
2929 if ((info->new_level > 1 || info->new_level == 0) &&
2930 spares_needed > info->array.spare_disks +added_disks) {
2931 pr_err("Need %d spare%s to create working array, and only have %d.\n",
2932 spares_needed,
2933 spares_needed == 1 ? "" : "s",
2934 info->array.spare_disks + added_disks);
2935 goto release;
2936 }
2937
2938 if (reshape.level != array.level) {
2939 int err = impose_level(fd, reshape.level, devname, verbose);
2940 if (err)
2941 goto release;
2942 info->new_layout = UnSet; /* after level change,
2943 * layout is meaningless */
2944 orig_level = array.level;
2945 sysfs_freeze_array(info);
2946
2947 if (reshape.level > 0 && st->ss->external) {
2948 /* make sure mdmon is aware of the new level */
2949 if (mdmon_running(container))
2950 flush_mdmon(container);
2951
2952 if (!mdmon_running(container))
2953 start_mdmon(container);
2954 ping_monitor(container);
2955 if (mdmon_running(container) &&
2956 st->update_tail == NULL)
2957 st->update_tail = &st->updates;
2958 }
2959 }
2960 /* ->reshape_super might have chosen some spares from the
2961 * container that it wants to be part of the new array.
2962 * We can collect them with ->container_content and give
2963 * them to the kernel.
2964 */
2965 if (st->ss->reshape_super && st->ss->container_content) {
2966 char *subarray = strchr(info->text_version+1, '/')+1;
2967 struct mdinfo *info2 =
2968 st->ss->container_content(st, subarray);
2969 struct mdinfo *d;
2970
2971 if (info2) {
2972 sysfs_init(info2, fd, st->devnm);
2973 /* When increasing number of devices, we need to set
2974 * new raid_disks before adding these, or they might
2975 * be rejected.
2976 */
2977 if (reshape.backup_blocks &&
2978 reshape.after.data_disks > reshape.before.data_disks)
2979 subarray_set_num(container, info2, "raid_disks",
2980 reshape.after.data_disks +
2981 reshape.parity);
2982 for (d = info2->devs; d; d = d->next) {
2983 if (d->disk.state == 0 &&
2984 d->disk.raid_disk >= 0) {
2985 /* This is a spare that wants to
2986 * be part of the array.
2987 */
2988 add_disk(fd, st, info2, d);
2989 }
2990 }
2991 sysfs_free(info2);
2992 }
2993 }
2994 /* We might have been given some devices to add to the
2995 * array. Now that the array has been changed to the right
2996 * level and frozen, we can safely add them.
2997 */
2998 if (devlist)
2999 Manage_subdevs(devname, fd, devlist, verbose,
3000 0,NULL, 0);
3001
3002 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
3003 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
3004 if (reshape.backup_blocks == 0) {
3005 /* No restriping needed, but we might need to impose
3006 * some more changes: layout, raid_disks, chunk_size
3007 */
3008 /* read current array info */
3009 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
3010 dprintf("Cannot get array information.\n");
3011 goto release;
3012 }
3013 /* compare current array info with new values and if
3014 * it is different update them to new */
3015 if (info->new_layout != UnSet &&
3016 info->new_layout != array.layout) {
3017 array.layout = info->new_layout;
3018 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3019 pr_err("failed to set new layout\n");
3020 goto release;
3021 } else if (verbose >= 0)
3022 printf("layout for %s set to %d\n",
3023 devname, array.layout);
3024 }
3025 if (info->delta_disks != UnSet &&
3026 info->delta_disks != 0 &&
3027 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
3028 array.raid_disks += info->delta_disks;
3029 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
3030 pr_err("failed to set raid disks\n");
3031 goto release;
3032 } else if (verbose >= 0) {
3033 printf("raid_disks for %s set to %d\n",
3034 devname, array.raid_disks);
3035 }
3036 }
3037 if (info->new_chunk != 0 &&
3038 info->new_chunk != array.chunk_size) {
3039 if (sysfs_set_num(info, NULL,
3040 "chunk_size", info->new_chunk) != 0) {
3041 pr_err("failed to set chunk size\n");
3042 goto release;
3043 } else if (verbose >= 0)
3044 printf("chunk size for %s set to %d\n",
3045 devname, array.chunk_size);
3046 }
3047 unfreeze(st);
3048 return 0;
3049 }
3050
3051 /*
3052 * There are three possibilities.
3053 * 1/ The array will shrink.
3054 * We need to ensure the reshape will pause before reaching
3055 * the 'critical section'. We also need to fork and wait for
3056 * that to happen. When it does we
3057 * suspend/backup/complete/unfreeze
3058 *
3059 * 2/ The array will not change size.
3060 * This requires that we keep a backup of a sliding window
3061 * so that we can restore data after a crash. So we need
3062 * to fork and monitor progress.
3063 * In future we will allow the data_offset to change, so
3064 * a sliding backup becomes unnecessary.
3065 *
3066 * 3/ The array will grow. This is relatively easy.
3067 * However the kernel's restripe routines will cheerfully
3068 * overwrite some early data before it is safe. So we
3069 * need to make a backup of the early parts of the array
3070 * and be ready to restore it if rebuild aborts very early.
3071 * For externally managed metadata, we still need a forked
3072 * child to monitor the reshape and suspend IO over the region
3073 * that is being reshaped.
3074 *
3075 * We backup data by writing it to one spare, or to a
3076 * file which was given on command line.
3077 *
3078 * In each case, we first make sure that storage is available
3079 * for the required backup.
3080 * Then we:
3081 * - request the shape change.
3082 * - fork to handle backup etc.
3083 */
3084 /* Check that we can hold all the data */
3085 get_dev_size(fd, NULL, &array_size);
3086 if (reshape.new_size < (array_size/512)) {
3087 pr_err("this change will reduce the size of the array.\n"
3088 " use --grow --array-size first to truncate array.\n"
3089 " e.g. mdadm --grow %s --array-size %llu\n",
3090 devname, reshape.new_size/2);
3091 goto release;
3092 }
3093
3094 if (array.level == 10) {
3095 /* Reshaping RAID10 does not require any data backup by
3096 * user-space. Instead it requires that the data_offset
3097 * is changed to avoid the need for backup.
3098 * So this is handled very separately
3099 */
3100 if (restart)
3101 /* Nothing to do. */
3102 return 0;
3103 return raid10_reshape(container, fd, devname, st, info,
3104 &reshape, data_offset,
3105 force, verbose);
3106 }
3107 sra = sysfs_read(fd, NULL,
3108 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3109 GET_CACHE);
3110 if (!sra) {
3111 pr_err("%s: Cannot get array details from sysfs\n",
3112 devname);
3113 goto release;
3114 }
3115
3116 if (!backup_file)
3117 switch(set_new_data_offset(sra, st, devname,
3118 reshape.after.data_disks - reshape.before.data_disks,
3119 data_offset,
3120 reshape.min_offset_change, 1)) {
3121 case -1:
3122 goto release;
3123 case 0:
3124 /* Updated data_offset, so it's easy now */
3125 update_cache_size(container, sra, info,
3126 min(reshape.before.data_disks,
3127 reshape.after.data_disks),
3128 reshape.backup_blocks);
3129
3130 /* Right, everything seems fine. Let's kick things off.
3131 */
3132 sync_metadata(st);
3133
3134 if (impose_reshape(sra, info, st, fd, restart,
3135 devname, container, &reshape) < 0)
3136 goto release;
3137 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3138 struct mdinfo *sd;
3139 if (errno != EINVAL) {
3140 pr_err("Failed to initiate reshape!\n");
3141 goto release;
3142 }
3143 /* revert data_offset and try the old way */
3144 for (sd = sra->devs; sd; sd = sd->next) {
3145 sysfs_set_num(sra, sd, "new_offset",
3146 sd->data_offset);
3147 sysfs_set_str(sra, NULL, "reshape_direction",
3148 "forwards");
3149 }
3150 break;
3151 }
3152 if (info->new_level == reshape.level)
3153 return 0;
3154 /* need to adjust level when reshape completes */
3155 switch(fork()) {
3156 case -1: /* ignore error, but don't wait */
3157 return 0;
3158 default: /* parent */
3159 return 0;
3160 case 0:
3161 map_fork();
3162 break;
3163 }
3164 close(fd);
3165 wait_reshape(sra);
3166 fd = open_dev(sra->sys_name);
3167 if (fd >= 0)
3168 impose_level(fd, info->new_level, devname, verbose);
3169 return 0;
3170 case 1: /* Couldn't set data_offset, try the old way */
3171 if (data_offset != INVALID_SECTORS) {
3172 pr_err("Cannot update data_offset on this array\n");
3173 goto release;
3174 }
3175 break;
3176 }
3177
3178 started:
3179 /* Decide how many blocks (sectors) for a reshape
3180 * unit. The number we have so far is just a minimum
3181 */
3182 blocks = reshape.backup_blocks;
3183 if (reshape.before.data_disks ==
3184 reshape.after.data_disks) {
3185 /* Make 'blocks' bigger for better throughput, but
3186 * not so big that we reject it below.
3187 * Try for 16 megabytes
3188 */
3189 while (blocks * 32 < sra->component_size &&
3190 blocks < 16*1024*2)
3191 blocks *= 2;
3192 } else
3193 pr_err("Need to backup %luK of critical section..\n", blocks/2);
3194
3195 if (blocks >= sra->component_size/2) {
3196 pr_err("%s: Something wrong - reshape aborted\n",
3197 devname);
3198 goto release;
3199 }
3200
3201 /* Now we need to open all these devices so we can read/write.
3202 */
3203 nrdisks = max(reshape.before.data_disks,
3204 reshape.after.data_disks) + reshape.parity
3205 + sra->array.spare_disks;
3206 fdlist = xcalloc((1+nrdisks), sizeof(int));
3207 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3208
3209 odisks = reshape.before.data_disks + reshape.parity;
3210 d = reshape_prepare_fdlist(devname, sra, odisks,
3211 nrdisks, blocks, backup_file,
3212 fdlist, offsets);
3213 if (d < odisks) {
3214 goto release;
3215 }
3216 if ((st->ss->manage_reshape == NULL) ||
3217 (st->ss->recover_backup == NULL)) {
3218 if (backup_file == NULL) {
3219 if (reshape.after.data_disks <=
3220 reshape.before.data_disks) {
3221 pr_err("%s: Cannot grow - need backup-file\n",
3222 devname);
3223 pr_err(" Please provide one with \"--backup=...\"\n");
3224 goto release;
3225 } else if (d == odisks) {
3226 pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname);
3227 goto release;
3228 }
3229 } else {
3230 if (!reshape_open_backup_file(backup_file, fd, devname,
3231 (signed)blocks,
3232 fdlist+d, offsets+d,
3233 sra->sys_name,
3234 restart)) {
3235 goto release;
3236 }
3237 d++;
3238 }
3239 }
3240
3241 update_cache_size(container, sra, info,
3242 min(reshape.before.data_disks, reshape.after.data_disks),
3243 blocks);
3244
3245 /* Right, everything seems fine. Let's kick things off.
3246 * If only changing raid_disks, use ioctl, else use
3247 * sysfs.
3248 */
3249 sync_metadata(st);
3250
3251 if (impose_reshape(sra, info, st, fd, restart,
3252 devname, container, &reshape) < 0)
3253 goto release;
3254
3255 err = start_reshape(sra, restart, reshape.before.data_disks,
3256 reshape.after.data_disks);
3257 if (err) {
3258 pr_err("Cannot %s reshape for %s\n",
3259 restart ? "continue" : "start",
3260 devname);
3261 goto release;
3262 }
3263 if (restart)
3264 sysfs_set_str(sra, NULL, "array_state", "active");
3265 if (freeze_reshape) {
3266 free(fdlist);
3267 free(offsets);
3268 sysfs_free(sra);
3269 pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n",
3270 sra->reshape_progress);
3271 return 1;
3272 }
3273
3274 if (!forked && !check_env("MDADM_NO_SYSTEMCTL"))
3275 if (continue_via_systemd(container ?: sra->sys_name)) {
3276 free(fdlist);
3277 free(offsets);
3278 sysfs_free(sra);
3279 return 0;
3280 }
3281
3282 /* Now we just need to kick off the reshape and watch, while
3283 * handling backups of the data...
3284 * This is all done by a forked background process.
3285 */
3286 switch(forked ? 0 : fork()) {
3287 case -1:
3288 pr_err("Cannot run child to monitor reshape: %s\n",
3289 strerror(errno));
3290 abort_reshape(sra);
3291 goto release;
3292 default:
3293 free(fdlist);
3294 free(offsets);
3295 sysfs_free(sra);
3296 return 0;
3297 case 0:
3298 map_fork();
3299 break;
3300 }
3301
3302 /* If another array on the same devices is busy, the
3303 * reshape will wait for them. This would mean that
3304 * the first section that we suspend will stay suspended
3305 * for a long time. So check on that possibility
3306 * by looking for "DELAYED" in /proc/mdstat, and if found,
3307 * wait a while
3308 */
3309 do {
3310 struct mdstat_ent *mds, *m;
3311 delayed = 0;
3312 mds = mdstat_read(1, 0);
3313 for (m = mds; m; m = m->next)
3314 if (strcmp(m->devnm, sra->sys_name) == 0) {
3315 if (m->resync &&
3316 m->percent == RESYNC_DELAYED)
3317 delayed = 1;
3318 if (m->resync == 0)
3319 /* Haven't started the reshape thread
3320 * yet, wait a bit
3321 */
3322 delayed = 2;
3323 break;
3324 }
3325 free_mdstat(mds);
3326 if (delayed == 1 && get_linux_version() < 3007000) {
3327 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3328 " You might experience problems until other reshapes complete.\n");
3329 delayed = 0;
3330 }
3331 if (delayed)
3332 mdstat_wait(30 - (delayed-1) * 25);
3333 } while (delayed);
3334 mdstat_close();
3335 close(fd);
3336 if (check_env("MDADM_GROW_VERIFY"))
3337 fd = open(devname, O_RDONLY | O_DIRECT);
3338 else
3339 fd = -1;
3340 mlockall(MCL_FUTURE);
3341
3342 signal(SIGTERM, catch_term);
3343
3344 if (st->ss->external) {
3345 /* metadata handler takes it from here */
3346 done = st->ss->manage_reshape(
3347 fd, sra, &reshape, st, blocks,
3348 fdlist, offsets,
3349 d - odisks, fdlist+odisks,
3350 offsets+odisks);
3351 } else
3352 done = child_monitor(
3353 fd, sra, &reshape, st, blocks,
3354 fdlist, offsets,
3355 d - odisks, fdlist+odisks,
3356 offsets+odisks);
3357
3358 free(fdlist);
3359 free(offsets);
3360
3361 if (backup_file && done) {
3362 char *bul;
3363 bul = make_backup(sra->sys_name);
3364 if (bul) {
3365 char buf[1024];
3366 int l = readlink(bul, buf, sizeof(buf) - 1);
3367 if (l > 0) {
3368 buf[l]=0;
3369 unlink(buf);
3370 }
3371 unlink(bul);
3372 free(bul);
3373 }
3374 unlink(backup_file);
3375 }
3376 if (!done) {
3377 abort_reshape(sra);
3378 goto out;
3379 }
3380
3381 if (!st->ss->external &&
3382 !(reshape.before.data_disks != reshape.after.data_disks
3383 && info->custom_array_size) &&
3384 info->new_level == reshape.level &&
3385 !forked) {
3386 /* no need to wait for the reshape to finish as
3387 * there is nothing more to do.
3388 */
3389 sysfs_free(sra);
3390 exit(0);
3391 }
3392 wait_reshape(sra);
3393
3394 if (st->ss->external) {
3395 /* Re-load the metadata as much could have changed */
3396 int cfd = open_dev(st->container_devnm);
3397 if (cfd >= 0) {
3398 flush_mdmon(container);
3399 st->ss->free_super(st);
3400 st->ss->load_container(st, cfd, container);
3401 close(cfd);
3402 }
3403 }
3404
3405 /* set new array size if required customer_array_size is used
3406 * by this metadata.
3407 */
3408 if (reshape.before.data_disks !=
3409 reshape.after.data_disks &&
3410 info->custom_array_size)
3411 set_array_size(st, info, info->text_version);
3412
3413 if (info->new_level != reshape.level) {
3414 if (fd < 0)
3415 fd = open(devname, O_RDONLY);
3416 impose_level(fd, info->new_level, devname, verbose);
3417 close(fd);
3418 if (info->new_level == 0)
3419 st->update_tail = NULL;
3420 }
3421 out:
3422 sysfs_free(sra);
3423 if (forked)
3424 return 0;
3425 unfreeze(st);
3426 exit(0);
3427
3428 release:
3429 free(fdlist);
3430 free(offsets);
3431 if (orig_level != UnSet && sra) {
3432 c = map_num(pers, orig_level);
3433 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3434 pr_err("aborting level change\n");
3435 }
3436 sysfs_free(sra);
3437 if (!forked)
3438 unfreeze(st);
3439 return 1;
3440 }
3441
3442 /* mdfd handle is passed to be closed in child process (after fork).
3443 */
3444 int reshape_container(char *container, char *devname,
3445 int mdfd,
3446 struct supertype *st,
3447 struct mdinfo *info,
3448 int force,
3449 char *backup_file, int verbose,
3450 int forked, int restart, int freeze_reshape)
3451 {
3452 struct mdinfo *cc = NULL;
3453 int rv = restart;
3454 char last_devnm[32] = "";
3455
3456 /* component_size is not meaningful for a container,
3457 * so pass '0' meaning 'no change'
3458 */
3459 if (!restart &&
3460 reshape_super(st, 0, info->new_level,
3461 info->new_layout, info->new_chunk,
3462 info->array.raid_disks, info->delta_disks,
3463 backup_file, devname, APPLY_METADATA_CHANGES,
3464 verbose)) {
3465 unfreeze(st);
3466 return 1;
3467 }
3468
3469 sync_metadata(st);
3470
3471 /* ping monitor to be sure that update is on disk
3472 */
3473 ping_monitor(container);
3474
3475 if (!forked && !freeze_reshape && !check_env("MDADM_NO_SYSTEMCTL"))
3476 if (continue_via_systemd(container))
3477 return 0;
3478
3479 switch (forked ? 0 : fork()) {
3480 case -1: /* error */
3481 perror("Cannot fork to complete reshape\n");
3482 unfreeze(st);
3483 return 1;
3484 default: /* parent */
3485 if (!freeze_reshape)
3486 printf("%s: multi-array reshape continues in background\n", Name);
3487 return 0;
3488 case 0: /* child */
3489 map_fork();
3490 break;
3491 }
3492
3493 /* close unused handle in child process
3494 */
3495 if (mdfd > -1)
3496 close(mdfd);
3497
3498 while(1) {
3499 /* For each member array with reshape_active,
3500 * we need to perform the reshape.
3501 * We pick the first array that needs reshaping and
3502 * reshape it. reshape_array() will re-read the metadata
3503 * so the next time through a different array should be
3504 * ready for reshape.
3505 * It is possible that the 'different' array will not
3506 * be assembled yet. In that case we simple exit.
3507 * When it is assembled, the mdadm which assembles it
3508 * will take over the reshape.
3509 */
3510 struct mdinfo *content;
3511 int fd;
3512 struct mdstat_ent *mdstat;
3513 char *adev;
3514 int devid;
3515
3516 sysfs_free(cc);
3517
3518 cc = st->ss->container_content(st, NULL);
3519
3520 for (content = cc; content ; content = content->next) {
3521 char *subarray;
3522 if (!content->reshape_active)
3523 continue;
3524
3525 subarray = strchr(content->text_version+1, '/')+1;
3526 mdstat = mdstat_by_subdev(subarray, container);
3527 if (!mdstat)
3528 continue;
3529 if (mdstat->active == 0) {
3530 pr_err("Skipping inactive array %s.\n",
3531 mdstat->devnm);
3532 free_mdstat(mdstat);
3533 mdstat = NULL;
3534 continue;
3535 }
3536 break;
3537 }
3538 if (!content)
3539 break;
3540
3541 devid = devnm2devid(mdstat->devnm);
3542 adev = map_dev(major(devid), minor(devid), 0);
3543 if (!adev)
3544 adev = content->text_version;
3545
3546 fd = open_dev(mdstat->devnm);
3547 if (fd < 0) {
3548 pr_err("Device %s cannot be opened for reshape.\n", adev);
3549 break;
3550 }
3551
3552 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3553 /* Do not allow for multiple reshape_array() calls for
3554 * the same array.
3555 * It can happen when reshape_array() returns without
3556 * error, when reshape is not finished (wrong reshape
3557 * starting/continuation conditions). Mdmon doesn't
3558 * switch to next array in container and reentry
3559 * conditions for the same array occur.
3560 * This is possibly interim until the behaviour of
3561 * reshape_array is resolved().
3562 */
3563 printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev);
3564 close(fd);
3565 break;
3566 }
3567 strcpy(last_devnm, mdstat->devnm);
3568
3569 sysfs_init(content, fd, mdstat->devnm);
3570
3571 if (mdmon_running(container))
3572 flush_mdmon(container);
3573
3574 rv = reshape_array(container, fd, adev, st,
3575 content, force, NULL, INVALID_SECTORS,
3576 backup_file, verbose, 1, restart,
3577 freeze_reshape);
3578 close(fd);
3579
3580 if (freeze_reshape) {
3581 sysfs_free(cc);
3582 exit(0);
3583 }
3584
3585 restart = 0;
3586 if (rv)
3587 break;
3588
3589 if (mdmon_running(container))
3590 flush_mdmon(container);
3591 }
3592 if (!rv)
3593 unfreeze(st);
3594 sysfs_free(cc);
3595 exit(0);
3596 }
3597
3598 /*
3599 * We run a child process in the background which performs the following
3600 * steps:
3601 * - wait for resync to reach a certain point
3602 * - suspend io to the following section
3603 * - backup that section
3604 * - allow resync to proceed further
3605 * - resume io
3606 * - discard the backup.
3607 *
3608 * When are combined in slightly different ways in the three cases.
3609 * Grow:
3610 * - suspend/backup/allow/wait/resume/discard
3611 * Shrink:
3612 * - allow/wait/suspend/backup/allow/wait/resume/discard
3613 * same-size:
3614 * - wait/resume/discard/suspend/backup/allow
3615 *
3616 * suspend/backup/allow always come together
3617 * wait/resume/discard do too.
3618 * For the same-size case we have two backups to improve flow.
3619 *
3620 */
3621
3622 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3623 unsigned long long backup_point,
3624 unsigned long long wait_point,
3625 unsigned long long *suspend_point,
3626 unsigned long long *reshape_completed, int *frozen)
3627 {
3628 /* This function is called repeatedly by the reshape manager.
3629 * It determines how much progress can safely be made and allows
3630 * that progress.
3631 * - 'info' identifies the array and particularly records in
3632 * ->reshape_progress the metadata's knowledge of progress
3633 * This is a sector offset from the start of the array
3634 * of the next array block to be relocated. This number
3635 * may increase from 0 or decrease from array_size, depending
3636 * on the type of reshape that is happening.
3637 * Note that in contrast, 'sync_completed' is a block count of the
3638 * reshape so far. It gives the distance between the start point
3639 * (head or tail of device) and the next place that data will be
3640 * written. It always increases.
3641 * - 'reshape' is the structure created by analyse_change
3642 * - 'backup_point' shows how much the metadata manager has backed-up
3643 * data. For reshapes with increasing progress, it is the next address
3644 * to be backed up, previous addresses have been backed-up. For
3645 * decreasing progress, it is the earliest address that has been
3646 * backed up - later address are also backed up.
3647 * So addresses between reshape_progress and backup_point are
3648 * backed up providing those are in the 'correct' order.
3649 * - 'wait_point' is an array address. When reshape_completed
3650 * passes this point, progress_reshape should return. It might
3651 * return earlier if it determines that ->reshape_progress needs
3652 * to be updated or further backup is needed.
3653 * - suspend_point is maintained by progress_reshape and the caller
3654 * should not touch it except to initialise to zero.
3655 * It is an array address and it only increases in 2.6.37 and earlier.
3656 * This makes it difficult to handle reducing reshapes with
3657 * external metadata.
3658 * However: it is similar to backup_point in that it records the
3659 * other end of a suspended region from reshape_progress.
3660 * it is moved to extend the region that is safe to backup and/or
3661 * reshape
3662 * - reshape_completed is read from sysfs and returned. The caller
3663 * should copy this into ->reshape_progress when it has reason to
3664 * believe that the metadata knows this, and any backup outside this
3665 * has been erased.
3666 *
3667 * Return value is:
3668 * 1 if more data from backup_point - but only as far as suspend_point,
3669 * should be backed up
3670 * 0 if things are progressing smoothly
3671 * -1 if the reshape is finished because it is all done,
3672 * -2 if the reshape is finished due to an error.
3673 */
3674
3675 int advancing = (reshape->after.data_disks
3676 >= reshape->before.data_disks);
3677 unsigned long long need_backup; /* All data between start of array and
3678 * here will at some point need to
3679 * be backed up.
3680 */
3681 unsigned long long read_offset, write_offset;
3682 unsigned long long write_range;
3683 unsigned long long max_progress, target, completed;
3684 unsigned long long array_size = (info->component_size
3685 * reshape->before.data_disks);
3686 int fd;
3687 char buf[20];
3688
3689 /* First, we unsuspend any region that is now known to be safe.
3690 * If suspend_point is on the 'wrong' side of reshape_progress, then
3691 * we don't have or need suspension at the moment. This is true for
3692 * native metadata when we don't need to back-up.
3693 */
3694 if (advancing) {
3695 if (info->reshape_progress <= *suspend_point)
3696 sysfs_set_num(info, NULL, "suspend_lo",
3697 info->reshape_progress);
3698 } else {
3699 /* Note: this won't work in 2.6.37 and before.
3700 * Something somewhere should make sure we don't need it!
3701 */
3702 if (info->reshape_progress >= *suspend_point)
3703 sysfs_set_num(info, NULL, "suspend_hi",
3704 info->reshape_progress);
3705 }
3706
3707 /* Now work out how far it is safe to progress.
3708 * If the read_offset for ->reshape_progress is less than
3709 * 'blocks' beyond the write_offset, we can only progress as far
3710 * as a backup.
3711 * Otherwise we can progress until the write_offset for the new location
3712 * reaches (within 'blocks' of) the read_offset at the current location.
3713 * However that region must be suspended unless we are using native
3714 * metadata.
3715 * If we need to suspend more, we limit it to 128M per device, which is
3716 * rather arbitrary and should be some time-based calculation.
3717 */
3718 read_offset = info->reshape_progress / reshape->before.data_disks;
3719 write_offset = info->reshape_progress / reshape->after.data_disks;
3720 write_range = info->new_chunk/512;
3721 if (reshape->before.data_disks == reshape->after.data_disks)
3722 need_backup = array_size;
3723 else
3724 need_backup = reshape->backup_blocks;
3725 if (advancing) {
3726 if (read_offset < write_offset + write_range)
3727 max_progress = backup_point;
3728 else
3729 max_progress =
3730 read_offset *
3731 reshape->after.data_disks;
3732 } else {
3733 if (read_offset > write_offset - write_range)
3734 /* Can only progress as far as has been backed up,
3735 * which must be suspended */
3736 max_progress = backup_point;
3737 else if (info->reshape_progress <= need_backup)
3738 max_progress = backup_point;
3739 else {
3740 if (info->array.major_version >= 0)
3741 /* Can progress until backup is needed */
3742 max_progress = need_backup;
3743 else {
3744 /* Can progress until metadata update is required */
3745 max_progress =
3746 read_offset *
3747 reshape->after.data_disks;
3748 /* but data must be suspended */
3749 if (max_progress < *suspend_point)
3750 max_progress = *suspend_point;
3751 }
3752 }
3753 }
3754
3755 /* We know it is safe to progress to 'max_progress' providing
3756 * it is suspended or we are using native metadata.
3757 * Consider extending suspend_point 128M per device if it
3758 * is less than 64M per device beyond reshape_progress.
3759 * But always do a multiple of 'blocks'
3760 * FIXME this is too big - it takes to long to complete
3761 * this much.
3762 */
3763 target = 64*1024*2 * min(reshape->before.data_disks,
3764 reshape->after.data_disks);
3765 target /= reshape->backup_blocks;
3766 if (target < 2)
3767 target = 2;
3768 target *= reshape->backup_blocks;
3769
3770 /* For externally managed metadata we always need to suspend IO to
3771 * the area being reshaped so we regularly push suspend_point forward.
3772 * For native metadata we only need the suspend if we are going to do
3773 * a backup.
3774 */
3775 if (advancing) {
3776 if ((need_backup > info->reshape_progress
3777 || info->array.major_version < 0) &&
3778 *suspend_point < info->reshape_progress + target) {
3779 if (need_backup < *suspend_point + 2 * target)
3780 *suspend_point = need_backup;
3781 else if (*suspend_point + 2 * target < array_size)
3782 *suspend_point += 2 * target;
3783 else
3784 *suspend_point = array_size;
3785 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3786 if (max_progress > *suspend_point)
3787 max_progress = *suspend_point;
3788 }
3789 } else {
3790 if (info->array.major_version >= 0) {
3791 /* Only need to suspend when about to backup */
3792 if (info->reshape_progress < need_backup * 2 &&
3793 *suspend_point > 0) {
3794 *suspend_point = 0;
3795 sysfs_set_num(info, NULL, "suspend_lo", 0);
3796 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3797 }
3798 } else {
3799 /* Need to suspend continually */
3800 if (info->reshape_progress < *suspend_point)
3801 *suspend_point = info->reshape_progress;
3802 if (*suspend_point + target < info->reshape_progress)
3803 /* No need to move suspend region yet */;
3804 else {
3805 if (*suspend_point >= 2 * target)
3806 *suspend_point -= 2 * target;
3807 else
3808 *suspend_point = 0;
3809 sysfs_set_num(info, NULL, "suspend_lo",
3810 *suspend_point);
3811 }
3812 if (max_progress < *suspend_point)
3813 max_progress = *suspend_point;
3814 }
3815 }
3816
3817 /* now set sync_max to allow that progress. sync_max, like
3818 * sync_completed is a count of sectors written per device, so
3819 * we find the difference between max_progress and the start point,
3820 * and divide that by after.data_disks to get a sync_max
3821 * number.
3822 * At the same time we convert wait_point to a similar number
3823 * for comparing against sync_completed.
3824 */
3825 /* scale down max_progress to per_disk */
3826 max_progress /= reshape->after.data_disks;
3827 /* Round to chunk size as some kernels give an erroneously high number */
3828 max_progress /= info->new_chunk/512;
3829 max_progress *= info->new_chunk/512;
3830 /* And round to old chunk size as the kernel wants that */
3831 max_progress /= info->array.chunk_size/512;
3832 max_progress *= info->array.chunk_size/512;
3833 /* Limit progress to the whole device */
3834 if (max_progress > info->component_size)
3835 max_progress = info->component_size;
3836 wait_point /= reshape->after.data_disks;
3837 if (!advancing) {
3838 /* switch from 'device offset' to 'processed block count' */
3839 max_progress = info->component_size - max_progress;
3840 wait_point = info->component_size - wait_point;
3841 }
3842
3843 if (!*frozen)
3844 sysfs_set_num(info, NULL, "sync_max", max_progress);
3845
3846 /* Now wait. If we have already reached the point that we were
3847 * asked to wait to, don't wait at all, else wait for any change.
3848 * We need to select on 'sync_completed' as that is the place that
3849 * notifications happen, but we are really interested in
3850 * 'reshape_position'
3851 */
3852 fd = sysfs_get_fd(info, NULL, "sync_completed");
3853 if (fd < 0)
3854 goto check_progress;
3855
3856 if (sysfs_fd_get_ll(fd, &completed) < 0)
3857 goto check_progress;
3858
3859 while (completed < max_progress && completed < wait_point) {
3860 /* Check that sync_action is still 'reshape' to avoid
3861 * waiting forever on a dead array
3862 */
3863 char action[20];
3864 if (sysfs_get_str(info, NULL, "sync_action",
3865 action, 20) <= 0 ||
3866 strncmp(action, "reshape", 7) != 0)
3867 break;
3868 /* Some kernels reset 'sync_completed' to zero
3869 * before setting 'sync_action' to 'idle'.
3870 * So we need these extra tests.
3871 */
3872 if (completed == 0 && advancing
3873 && strncmp(action, "idle", 4) == 0
3874 && info->reshape_progress > 0)
3875 break;
3876 if (completed == 0 && !advancing
3877 && strncmp(action, "idle", 4) == 0
3878 && info->reshape_progress < (info->component_size
3879 * reshape->after.data_disks))
3880 break;
3881 sysfs_wait(fd, NULL);
3882 if (sysfs_fd_get_ll(fd, &completed) < 0)
3883 goto check_progress;
3884 }
3885 /* Some kernels reset 'sync_completed' to zero,
3886 * we need to have real point we are in md.
3887 * So in that case, read 'reshape_position' from sysfs.
3888 */
3889 if (completed == 0) {
3890 unsigned long long reshapep;
3891 char action[20];
3892 if (sysfs_get_str(info, NULL, "sync_action",
3893 action, 20) > 0 &&
3894 strncmp(action, "idle", 4) == 0 &&
3895 sysfs_get_ll(info, NULL,
3896 "reshape_position", &reshapep) == 0)
3897 *reshape_completed = reshapep;
3898 } else {
3899 /* some kernels can give an incorrectly high
3900 * 'completed' number, so round down */
3901 completed /= (info->new_chunk/512);
3902 completed *= (info->new_chunk/512);
3903 /* Convert 'completed' back in to a 'progress' number */
3904 completed *= reshape->after.data_disks;
3905 if (!advancing)
3906 completed = (info->component_size
3907 * reshape->after.data_disks
3908 - completed);
3909 *reshape_completed = completed;
3910 }
3911
3912 close(fd);
3913
3914 /* We return the need_backup flag. Caller will decide
3915 * how much - a multiple of ->backup_blocks up to *suspend_point
3916 */
3917 if (advancing)
3918 return need_backup > info->reshape_progress;
3919 else
3920 return need_backup >= info->reshape_progress;
3921
3922 check_progress:
3923 /* if we couldn't read a number from sync_completed, then
3924 * either the reshape did complete, or it aborted.
3925 * We can tell which by checking for 'none' in reshape_position.
3926 * If it did abort, then it might immediately restart if it
3927 * it was just a device failure that leaves us degraded but
3928 * functioning.
3929 */
3930 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3931 || strncmp(buf, "none", 4) != 0) {
3932 /* The abort might only be temporary. Wait up to 10
3933 * seconds for fd to contain a valid number again.
3934 */
3935 int wait = 10000;
3936 int rv = -2;
3937 unsigned long long new_sync_max;
3938 while (fd >= 0 && rv < 0 && wait > 0) {
3939 if (sysfs_wait(fd, &wait) != 1)
3940 break;
3941 switch (sysfs_fd_get_ll(fd, &completed)) {
3942 case 0:
3943 /* all good again */
3944 rv = 1;
3945 /* If "sync_max" is no longer max_progress
3946 * we need to freeze things
3947 */
3948 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3949 *frozen = (new_sync_max != max_progress);
3950 break;
3951 case -2: /* read error - abort */
3952 wait = 0;
3953 break;
3954 }
3955 }
3956 if (fd >= 0)
3957 close(fd);
3958 return rv; /* abort */
3959 } else {
3960 /* Maybe racing with array shutdown - check state */
3961 if (fd >= 0)
3962 close(fd);
3963 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3964 || strncmp(buf, "inactive", 8) == 0
3965 || strncmp(buf, "clear",5) == 0)
3966 return -2; /* abort */
3967 return -1; /* complete */
3968 }
3969 }
3970
3971 /* FIXME return status is never checked */
3972 static int grow_backup(struct mdinfo *sra,
3973 unsigned long long offset, /* per device */
3974 unsigned long stripes, /* per device, in old chunks */
3975 int *sources, unsigned long long *offsets,
3976 int disks, int chunk, int level, int layout,
3977 int dests, int *destfd, unsigned long long *destoffsets,
3978 int part, int *degraded,
3979 char *buf)
3980 {
3981 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3982 * to storage 'destfd' (offset 'destoffsets'), after first
3983 * suspending IO. Then allow resync to continue
3984 * over the suspended section.
3985 * Use part 'part' of the backup-super-block.
3986 */
3987 int odata = disks;
3988 int rv = 0;
3989 int i;
3990 unsigned long long ll;
3991 int new_degraded;
3992 //printf("offset %llu\n", offset);
3993 if (level >= 4)
3994 odata--;
3995 if (level == 6)
3996 odata--;
3997
3998 /* Check that array hasn't become degraded, else we might backup the wrong data */
3999 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
4000 return -1; /* FIXME this error is ignored */
4001 new_degraded = (int)ll;
4002 if (new_degraded != *degraded) {
4003 /* check each device to ensure it is still working */
4004 struct mdinfo *sd;
4005 for (sd = sra->devs ; sd ; sd = sd->next) {
4006 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4007 continue;
4008 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
4009 char sbuf[20];
4010 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
4011 strstr(sbuf, "faulty") ||
4012 strstr(sbuf, "in_sync") == NULL) {
4013 /* this device is dead */
4014 sd->disk.state = (1<<MD_DISK_FAULTY);
4015 if (sd->disk.raid_disk >= 0 &&
4016 sources[sd->disk.raid_disk] >= 0) {
4017 close(sources[sd->disk.raid_disk]);
4018 sources[sd->disk.raid_disk] = -1;
4019 }
4020 }
4021 }
4022 }
4023 *degraded = new_degraded;
4024 }
4025 if (part) {
4026 bsb.arraystart2 = __cpu_to_le64(offset * odata);
4027 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
4028 } else {
4029 bsb.arraystart = __cpu_to_le64(offset * odata);
4030 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
4031 }
4032 if (part)
4033 bsb.magic[15] = '2';
4034 for (i = 0; i < dests; i++)
4035 if (part)
4036 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
4037 else
4038 lseek64(destfd[i], destoffsets[i], 0);
4039
4040 rv = save_stripes(sources, offsets,
4041 disks, chunk, level, layout,
4042 dests, destfd,
4043 offset*512*odata, stripes * chunk * odata,
4044 buf);
4045
4046 if (rv)
4047 return rv;
4048 bsb.mtime = __cpu_to_le64(time(0));
4049 for (i = 0; i < dests; i++) {
4050 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4051
4052 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4053 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4054 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4055 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4056
4057 rv = -1;
4058 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
4059 != destoffsets[i] - 4096)
4060 break;
4061 if (write(destfd[i], &bsb, 512) != 512)
4062 break;
4063 if (destoffsets[i] > 4096) {
4064 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
4065 destoffsets[i]+stripes*chunk*odata)
4066 break;
4067 if (write(destfd[i], &bsb, 512) != 512)
4068 break;
4069 }
4070 fsync(destfd[i]);
4071 rv = 0;
4072 }
4073
4074 return rv;
4075 }
4076
4077 /* in 2.6.30, the value reported by sync_completed can be
4078 * less that it should be by one stripe.
4079 * This only happens when reshape hits sync_max and pauses.
4080 * So allow wait_backup to either extent sync_max further
4081 * than strictly necessary, or return before the
4082 * sync has got quite as far as we would really like.
4083 * This is what 'blocks2' is for.
4084 * The various caller give appropriate values so that
4085 * every works.
4086 */
4087 /* FIXME return value is often ignored */
4088 static int forget_backup(int dests, int *destfd,
4089 unsigned long long *destoffsets,
4090 int part)
4091 {
4092 /*
4093 * Erase backup 'part' (which is 0 or 1)
4094 */
4095 int i;
4096 int rv;
4097
4098 if (part) {
4099 bsb.arraystart2 = __cpu_to_le64(0);
4100 bsb.length2 = __cpu_to_le64(0);
4101 } else {
4102 bsb.arraystart = __cpu_to_le64(0);
4103 bsb.length = __cpu_to_le64(0);
4104 }
4105 bsb.mtime = __cpu_to_le64(time(0));
4106 rv = 0;
4107 for (i = 0; i < dests; i++) {
4108 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4109 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4110 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4111 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4112 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4113 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4114 destoffsets[i]-4096)
4115 rv = -1;
4116 if (rv == 0 &&
4117 write(destfd[i], &bsb, 512) != 512)
4118 rv = -1;
4119 fsync(destfd[i]);
4120 }
4121 return rv;
4122 }
4123
4124 static void fail(char *msg)
4125 {
4126 int rv;
4127 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4128 rv |= (write(2, "\n", 1) != 1);
4129 exit(rv ? 1 : 2);
4130 }
4131
4132 static char *abuf, *bbuf;
4133 static unsigned long long abuflen;
4134 static void validate(int afd, int bfd, unsigned long long offset)
4135 {
4136 /* check that the data in the backup against the array.
4137 * This is only used for regression testing and should not
4138 * be used while the array is active
4139 */
4140 if (afd < 0)
4141 return;
4142 lseek64(bfd, offset - 4096, 0);
4143 if (read(bfd, &bsb2, 512) != 512)
4144 fail("cannot read bsb");
4145 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4146 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4147 fail("first csum bad");
4148 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4149 fail("magic is bad");
4150 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4151 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4152 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4153 fail("second csum bad");
4154
4155 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4156 fail("devstart is wrong");
4157
4158 if (bsb2.length) {
4159 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4160
4161 if (abuflen < len) {
4162 free(abuf);
4163 free(bbuf);
4164 abuflen = len;
4165 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4166 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4167 abuflen = 0;
4168 /* just stop validating on mem-alloc failure */
4169 return;
4170 }
4171 }
4172
4173 lseek64(bfd, offset, 0);
4174 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4175 //printf("len %llu\n", len);
4176 fail("read first backup failed");
4177 }
4178 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4179 if ((unsigned long long)read(afd, abuf, len) != len)
4180 fail("read first from array failed");
4181 if (memcmp(bbuf, abuf, len) != 0) {
4182 #if 0
4183 int i;
4184 printf("offset=%llu len=%llu\n",
4185 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4186 for (i=0; i<len; i++)
4187 if (bbuf[i] != abuf[i]) {
4188 printf("first diff byte %d\n", i);
4189 break;
4190 }
4191 #endif
4192 fail("data1 compare failed");
4193 }
4194 }
4195 if (bsb2.length2) {
4196 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4197
4198 if (abuflen < len) {
4199 free(abuf);
4200 free(bbuf);
4201 abuflen = len;
4202 abuf = xmalloc(abuflen);
4203 bbuf = xmalloc(abuflen);
4204 }
4205
4206 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4207 if ((unsigned long long)read(bfd, bbuf, len) != len)
4208 fail("read second backup failed");
4209 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4210 if ((unsigned long long)read(afd, abuf, len) != len)
4211 fail("read second from array failed");
4212 if (memcmp(bbuf, abuf, len) != 0)
4213 fail("data2 compare failed");
4214 }
4215 }
4216
4217 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4218 struct supertype *st, unsigned long blocks,
4219 int *fds, unsigned long long *offsets,
4220 int dests, int *destfd, unsigned long long *destoffsets)
4221 {
4222 /* Monitor a reshape where backup is being performed using
4223 * 'native' mechanism - either to a backup file, or
4224 * to some space in a spare.
4225 */
4226 char *buf;
4227 int degraded = -1;
4228 unsigned long long speed;
4229 unsigned long long suspend_point, array_size;
4230 unsigned long long backup_point, wait_point;
4231 unsigned long long reshape_completed;
4232 int done = 0;
4233 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4234 int part = 0; /* The next part of the backup area to fill. It may already
4235 * be full, so we need to check */
4236 int level = reshape->level;
4237 int layout = reshape->before.layout;
4238 int data = reshape->before.data_disks;
4239 int disks = reshape->before.data_disks + reshape->parity;
4240 int chunk = sra->array.chunk_size;
4241 struct mdinfo *sd;
4242 unsigned long stripes;
4243 int uuid[4];
4244 int frozen = 0;
4245
4246 /* set up the backup-super-block. This requires the
4247 * uuid from the array.
4248 */
4249 /* Find a superblock */
4250 for (sd = sra->devs; sd; sd = sd->next) {
4251 char *dn;
4252 int devfd;
4253 int ok;
4254 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4255 continue;
4256 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4257 devfd = dev_open(dn, O_RDONLY);
4258 if (devfd < 0)
4259 continue;
4260 ok = st->ss->load_super(st, devfd, NULL);
4261 close(devfd);
4262 if (ok == 0)
4263 break;
4264 }
4265 if (!sd) {
4266 pr_err("Cannot find a superblock\n");
4267 return 0;
4268 }
4269
4270 memset(&bsb, 0, 512);
4271 memcpy(bsb.magic, "md_backup_data-1", 16);
4272 st->ss->uuid_from_super(st, uuid);
4273 memcpy(bsb.set_uuid, uuid, 16);
4274 bsb.mtime = __cpu_to_le64(time(0));
4275 bsb.devstart2 = blocks;
4276
4277 stripes = blocks / (sra->array.chunk_size/512) /
4278 reshape->before.data_disks;
4279
4280 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4281 /* Don't start the 'reshape' */
4282 return 0;
4283 if (reshape->before.data_disks == reshape->after.data_disks) {
4284 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4285 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4286 }
4287
4288 if (increasing) {
4289 array_size = sra->component_size * reshape->after.data_disks;
4290 backup_point = sra->reshape_progress;
4291 suspend_point = 0;
4292 } else {
4293 array_size = sra->component_size * reshape->before.data_disks;
4294 backup_point = reshape->backup_blocks;
4295 suspend_point = array_size;
4296 }
4297
4298 while (!done) {
4299 int rv;
4300
4301 /* Want to return as soon the oldest backup slot can
4302 * be released as that allows us to start backing up
4303 * some more, providing suspend_point has been
4304 * advanced, which it should have.
4305 */
4306 if (increasing) {
4307 wait_point = array_size;
4308 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4309 wait_point = (__le64_to_cpu(bsb.arraystart) +
4310 __le64_to_cpu(bsb.length));
4311 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4312 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4313 __le64_to_cpu(bsb.length2));
4314 } else {
4315 wait_point = 0;
4316 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4317 wait_point = __le64_to_cpu(bsb.arraystart);
4318 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4319 wait_point = __le64_to_cpu(bsb.arraystart2);
4320 }
4321
4322 reshape_completed = sra->reshape_progress;
4323 rv = progress_reshape(sra, reshape,
4324 backup_point, wait_point,
4325 &suspend_point, &reshape_completed,
4326 &frozen);
4327 /* external metadata would need to ping_monitor here */
4328 sra->reshape_progress = reshape_completed;
4329
4330 /* Clear any backup region that is before 'here' */
4331 if (increasing) {
4332 if (__le64_to_cpu(bsb.length) > 0 &&
4333 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4334 __le64_to_cpu(bsb.length)))
4335 forget_backup(dests, destfd,
4336 destoffsets, 0);
4337 if (__le64_to_cpu(bsb.length2) > 0 &&
4338 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4339 __le64_to_cpu(bsb.length2)))
4340 forget_backup(dests, destfd,
4341 destoffsets, 1);
4342 } else {
4343 if (__le64_to_cpu(bsb.length) > 0 &&
4344 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4345 forget_backup(dests, destfd,
4346 destoffsets, 0);
4347 if (__le64_to_cpu(bsb.length2) > 0 &&
4348 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4349 forget_backup(dests, destfd,
4350 destoffsets, 1);
4351 }
4352 if (sigterm)
4353 rv = -2;
4354 if (rv < 0) {
4355 if (rv == -1)
4356 done = 1;
4357 break;
4358 }
4359 if (rv == 0 && increasing && !st->ss->external) {
4360 /* No longer need to monitor this reshape */
4361 sysfs_set_str(sra, NULL, "sync_max", "max");
4362 done = 1;
4363 break;
4364 }
4365
4366 while (rv) {
4367 unsigned long long offset;
4368 unsigned long actual_stripes;
4369 /* Need to backup some data.
4370 * If 'part' is not used and the desired
4371 * backup size is suspended, do a backup,
4372 * then consider the next part.
4373 */
4374 /* Check that 'part' is unused */
4375 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4376 break;
4377 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4378 break;
4379
4380 offset = backup_point / data;
4381 actual_stripes = stripes;
4382 if (increasing) {
4383 if (offset + actual_stripes * (chunk/512) >
4384 sra->component_size)
4385 actual_stripes = ((sra->component_size - offset)
4386 / (chunk/512));
4387 if (offset + actual_stripes * (chunk/512) >
4388 suspend_point/data)
4389 break;
4390 } else {
4391 if (offset < actual_stripes * (chunk/512))
4392 actual_stripes = offset / (chunk/512);
4393 offset -= actual_stripes * (chunk/512);
4394 if (offset < suspend_point/data)
4395 break;
4396 }
4397 if (actual_stripes == 0)
4398 break;
4399 grow_backup(sra, offset, actual_stripes,
4400 fds, offsets,
4401 disks, chunk, level, layout,
4402 dests, destfd, destoffsets,
4403 part, &degraded, buf);
4404 validate(afd, destfd[0], destoffsets[0]);
4405 /* record where 'part' is up to */
4406 part = !part;
4407 if (increasing)
4408 backup_point += actual_stripes * (chunk/512) * data;
4409 else
4410 backup_point -= actual_stripes * (chunk/512) * data;
4411 }
4412 }
4413
4414 /* FIXME maybe call progress_reshape one more time instead */
4415 /* remove any remaining suspension */
4416 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
4417 sysfs_set_num(sra, NULL, "suspend_hi", 0);
4418 sysfs_set_num(sra, NULL, "suspend_lo", 0);
4419 sysfs_set_num(sra, NULL, "sync_min", 0);
4420
4421 if (reshape->before.data_disks == reshape->after.data_disks)
4422 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4423 free(buf);
4424 return done;
4425 }
4426
4427 /*
4428 * If any spare contains md_back_data-1 which is recent wrt mtime,
4429 * write that data into the array and update the super blocks with
4430 * the new reshape_progress
4431 */
4432 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4433 char *backup_file, int verbose)
4434 {
4435 int i, j;
4436 int old_disks;
4437 unsigned long long *offsets;
4438 unsigned long long nstripe, ostripe;
4439 int ndata, odata;
4440
4441 odata = info->array.raid_disks - info->delta_disks - 1;
4442 if (info->array.level == 6) odata--; /* number of data disks */
4443 ndata = info->array.raid_disks - 1;
4444 if (info->new_level == 6) ndata--;
4445
4446 old_disks = info->array.raid_disks - info->delta_disks;
4447
4448 if (info->delta_disks <= 0)
4449 /* Didn't grow, so the backup file must have
4450 * been used
4451 */
4452 old_disks = cnt;
4453 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4454 struct mdinfo dinfo;
4455 int fd;
4456 int bsbsize;
4457 char *devname, namebuf[20];
4458 unsigned long long lo, hi;
4459
4460 /* This was a spare and may have some saved data on it.
4461 * Load the superblock, find and load the
4462 * backup_super_block.
4463 * If either fail, go on to next device.
4464 * If the backup contains no new info, just return
4465 * else restore data and update all superblocks
4466 */
4467 if (i == old_disks-1) {
4468 fd = open(backup_file, O_RDONLY);
4469 if (fd<0) {
4470 pr_err("backup file %s inaccessible: %s\n",
4471 backup_file, strerror(errno));
4472 continue;
4473 }
4474 devname = backup_file;
4475 } else {
4476 fd = fdlist[i];
4477 if (fd < 0)
4478 continue;
4479 if (st->ss->load_super(st, fd, NULL))
4480 continue;
4481
4482 st->ss->getinfo_super(st, &dinfo, NULL);
4483 st->ss->free_super(st);
4484
4485 if (lseek64(fd,
4486 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4487 0) < 0) {
4488 pr_err("Cannot seek on device %d\n", i);
4489 continue; /* Cannot seek */
4490 }
4491 sprintf(namebuf, "device-%d", i);
4492 devname = namebuf;
4493 }
4494 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4495 if (verbose)
4496 pr_err("Cannot read from %s\n", devname);
4497 continue; /* Cannot read */
4498 }
4499 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4500 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4501 if (verbose)
4502 pr_err("No backup metadata on %s\n", devname);
4503 continue;
4504 }
4505 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4506 if (verbose)
4507 pr_err("Bad backup-metadata checksum on %s\n", devname);
4508 continue; /* bad checksum */
4509 }
4510 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4511 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4512 if (verbose)
4513 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4514 continue; /* Bad second checksum */
4515 }
4516 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4517 if (verbose)
4518 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4519 continue; /* Wrong uuid */
4520 }
4521
4522 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4523 * sometimes they aren't... So allow considerable flexability in matching, and allow
4524 * this test to be overridden by an environment variable.
4525 */
4526 if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) ||
4527 time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) {
4528 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4529 pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n",
4530 (unsigned long)__le64_to_cpu(bsb.mtime),
4531 (unsigned long)info->array.utime);
4532 } else {
4533 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4534 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4535 continue; /* time stamp is too bad */
4536 }
4537 }
4538
4539 if (bsb.magic[15] == '1') {
4540 if (bsb.length == 0)
4541 continue;
4542 if (info->delta_disks >= 0) {
4543 /* reshape_progress is increasing */
4544 if (__le64_to_cpu(bsb.arraystart)
4545 + __le64_to_cpu(bsb.length)
4546 < info->reshape_progress) {
4547 nonew:
4548 if (verbose)
4549 pr_err("backup-metadata found on %s but is not needed\n", devname);
4550 continue; /* No new data here */
4551 }
4552 } else {
4553 /* reshape_progress is decreasing */
4554 if (__le64_to_cpu(bsb.arraystart) >=
4555 info->reshape_progress)
4556 goto nonew; /* No new data here */
4557 }
4558 } else {
4559 if (bsb.length == 0 && bsb.length2 == 0)
4560 continue;
4561 if (info->delta_disks >= 0) {
4562 /* reshape_progress is increasing */
4563 if ((__le64_to_cpu(bsb.arraystart)
4564 + __le64_to_cpu(bsb.length)
4565 < info->reshape_progress)
4566 &&
4567 (__le64_to_cpu(bsb.arraystart2)
4568 + __le64_to_cpu(bsb.length2)
4569 < info->reshape_progress))
4570 goto nonew; /* No new data here */
4571 } else {
4572 /* reshape_progress is decreasing */
4573 if (__le64_to_cpu(bsb.arraystart) >=
4574 info->reshape_progress &&
4575 __le64_to_cpu(bsb.arraystart2) >=
4576 info->reshape_progress)
4577 goto nonew; /* No new data here */
4578 }
4579 }
4580 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4581 second_fail:
4582 if (verbose)
4583 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4584 devname);
4585 continue; /* Cannot seek */
4586 }
4587 /* There should be a duplicate backup superblock 4k before here */
4588 if (lseek64(fd, -4096, 1) < 0 ||
4589 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4590 goto second_fail; /* Cannot find leading superblock */
4591 if (bsb.magic[15] == '1')
4592 bsbsize = offsetof(struct mdp_backup_super, pad1);
4593 else
4594 bsbsize = offsetof(struct mdp_backup_super, pad);
4595 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4596 goto second_fail; /* Cannot find leading superblock */
4597
4598 /* Now need the data offsets for all devices. */
4599 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4600 for(j=0; j<info->array.raid_disks; j++) {
4601 if (fdlist[j] < 0)
4602 continue;
4603 if (st->ss->load_super(st, fdlist[j], NULL))
4604 /* FIXME should be this be an error */
4605 continue;
4606 st->ss->getinfo_super(st, &dinfo, NULL);
4607 st->ss->free_super(st);
4608 offsets[j] = dinfo.data_offset * 512;
4609 }
4610 printf("%s: restoring critical section\n", Name);
4611
4612 if (restore_stripes(fdlist, offsets,
4613 info->array.raid_disks,
4614 info->new_chunk,
4615 info->new_level,
4616 info->new_layout,
4617 fd, __le64_to_cpu(bsb.devstart)*512,
4618 __le64_to_cpu(bsb.arraystart)*512,
4619 __le64_to_cpu(bsb.length)*512, NULL)) {
4620 /* didn't succeed, so giveup */
4621 if (verbose)
4622 pr_err("Error restoring backup from %s\n",
4623 devname);
4624 free(offsets);
4625 return 1;
4626 }
4627
4628 if (bsb.magic[15] == '2' &&
4629 restore_stripes(fdlist, offsets,
4630 info->array.raid_disks,
4631 info->new_chunk,
4632 info->new_level,
4633 info->new_layout,
4634 fd, __le64_to_cpu(bsb.devstart)*512 +
4635 __le64_to_cpu(bsb.devstart2)*512,
4636 __le64_to_cpu(bsb.arraystart2)*512,
4637 __le64_to_cpu(bsb.length2)*512, NULL)) {
4638 /* didn't succeed, so giveup */
4639 if (verbose)
4640 pr_err("Error restoring second backup from %s\n",
4641 devname);
4642 free(offsets);
4643 return 1;
4644 }
4645
4646 free(offsets);
4647
4648 /* Ok, so the data is restored. Let's update those superblocks. */
4649
4650 lo = hi = 0;
4651 if (bsb.length) {
4652 lo = __le64_to_cpu(bsb.arraystart);
4653 hi = lo + __le64_to_cpu(bsb.length);
4654 }
4655 if (bsb.magic[15] == '2' && bsb.length2) {
4656 unsigned long long lo1, hi1;
4657 lo1 = __le64_to_cpu(bsb.arraystart2);
4658 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4659 if (lo == hi) {
4660 lo = lo1;
4661 hi = hi1;
4662 } else if (lo < lo1)
4663 hi = hi1;
4664 else
4665 lo = lo1;
4666 }
4667 if (lo < hi &&
4668 (info->reshape_progress < lo ||
4669 info->reshape_progress > hi))
4670 /* backup does not affect reshape_progress*/ ;
4671 else if (info->delta_disks >= 0) {
4672 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4673 __le64_to_cpu(bsb.length);
4674 if (bsb.magic[15] == '2') {
4675 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4676 __le64_to_cpu(bsb.length2);
4677 if (p2 > info->reshape_progress)
4678 info->reshape_progress = p2;
4679 }
4680 } else {
4681 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4682 if (bsb.magic[15] == '2') {
4683 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4684 if (p2 < info->reshape_progress)
4685 info->reshape_progress = p2;
4686 }
4687 }
4688 for (j=0; j<info->array.raid_disks; j++) {
4689 if (fdlist[j] < 0)
4690 continue;
4691 if (st->ss->load_super(st, fdlist[j], NULL))
4692 continue;
4693 st->ss->getinfo_super(st, &dinfo, NULL);
4694 dinfo.reshape_progress = info->reshape_progress;
4695 st->ss->update_super(st, &dinfo,
4696 "_reshape_progress",
4697 NULL,0, 0, NULL);
4698 st->ss->store_super(st, fdlist[j]);
4699 st->ss->free_super(st);
4700 }
4701 return 0;
4702 }
4703 /* Didn't find any backup data, try to see if any
4704 * was needed.
4705 */
4706 if (info->delta_disks < 0) {
4707 /* When shrinking, the critical section is at the end.
4708 * So see if we are before the critical section.
4709 */
4710 unsigned long long first_block;
4711 nstripe = ostripe = 0;
4712 first_block = 0;
4713 while (ostripe >= nstripe) {
4714 ostripe += info->array.chunk_size / 512;
4715 first_block = ostripe * odata;
4716 nstripe = first_block / ndata / (info->new_chunk/512) *
4717 (info->new_chunk/512);
4718 }
4719
4720 if (info->reshape_progress >= first_block)
4721 return 0;
4722 }
4723 if (info->delta_disks > 0) {
4724 /* See if we are beyond the critical section. */
4725 unsigned long long last_block;
4726 nstripe = ostripe = 0;
4727 last_block = 0;
4728 while (nstripe >= ostripe) {
4729 nstripe += info->new_chunk / 512;
4730 last_block = nstripe * ndata;
4731 ostripe = last_block / odata / (info->array.chunk_size/512) *
4732 (info->array.chunk_size/512);
4733 }
4734
4735 if (info->reshape_progress >= last_block)
4736 return 0;
4737 }
4738 /* needed to recover critical section! */
4739 if (verbose)
4740 pr_err("Failed to find backup of critical section\n");
4741 return 1;
4742 }
4743
4744 int Grow_continue_command(char *devname, int fd,
4745 char *backup_file, int verbose)
4746 {
4747 int ret_val = 0;
4748 struct supertype *st = NULL;
4749 struct mdinfo *content = NULL;
4750 struct mdinfo array;
4751 char *subarray = NULL;
4752 struct mdinfo *cc = NULL;
4753 struct mdstat_ent *mdstat = NULL;
4754 int cfd = -1;
4755 int fd2 = -1;
4756
4757 dprintf("Grow continue from command line called for %s\n",
4758 devname);
4759
4760 st = super_by_fd(fd, &subarray);
4761 if (!st || !st->ss) {
4762 pr_err("Unable to determine metadata format for %s\n",
4763 devname);
4764 return 1;
4765 }
4766 dprintf("Grow continue is run for ");
4767 if (st->ss->external == 0) {
4768 int d;
4769 dprintf_cont("native array (%s)\n", devname);
4770 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4771 pr_err("%s is not an active md array - aborting\n", devname);
4772 ret_val = 1;
4773 goto Grow_continue_command_exit;
4774 }
4775 content = &array;
4776 /* Need to load a superblock.
4777 * FIXME we should really get what we need from
4778 * sysfs
4779 */
4780 for (d = 0; d < MAX_DISKS; d++) {
4781 mdu_disk_info_t disk;
4782 char *dv;
4783 int err;
4784 disk.number = d;
4785 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4786 continue;
4787 if (disk.major == 0 && disk.minor == 0)
4788 continue;
4789 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4790 continue;
4791 dv = map_dev(disk.major, disk.minor, 1);
4792 if (!dv)
4793 continue;
4794 fd2 = dev_open(dv, O_RDONLY);
4795 if (fd2 < 0)
4796 continue;
4797 err = st->ss->load_super(st, fd2, NULL);
4798 close(fd2);
4799 /* invalidate fd2 to avoid possible double close() */
4800 fd2 = -1;
4801 if (err)
4802 continue;
4803 break;
4804 }
4805 if (d == MAX_DISKS) {
4806 pr_err("Unable to load metadata for %s\n",
4807 devname);
4808 ret_val = 1;
4809 goto Grow_continue_command_exit;
4810 }
4811 st->ss->getinfo_super(st, content, NULL);
4812 } else {
4813 char *container;
4814
4815 if (subarray) {
4816 dprintf_cont("subarray (%s)\n", subarray);
4817 container = st->container_devnm;
4818 cfd = open_dev_excl(st->container_devnm);
4819 } else {
4820 container = st->devnm;
4821 close(fd);
4822 cfd = open_dev_excl(st->devnm);
4823 dprintf_cont("container (%s)\n", container);
4824 fd = cfd;
4825 }
4826 if (cfd < 0) {
4827 pr_err("Unable to open container for %s\n", devname);
4828 ret_val = 1;
4829 goto Grow_continue_command_exit;
4830 }
4831
4832 /* find in container array under reshape
4833 */
4834 ret_val = st->ss->load_container(st, cfd, NULL);
4835 if (ret_val) {
4836 pr_err("Cannot read superblock for %s\n",
4837 devname);
4838 ret_val = 1;
4839 goto Grow_continue_command_exit;
4840 }
4841
4842 cc = st->ss->container_content(st, subarray);
4843 for (content = cc; content ; content = content->next) {
4844 char *array;
4845 int allow_reshape = 1;
4846
4847 if (content->reshape_active == 0)
4848 continue;
4849 /* The decision about array or container wide
4850 * reshape is taken in Grow_continue based
4851 * content->reshape_active state, therefore we
4852 * need to check_reshape based on
4853 * reshape_active and subarray name
4854 */
4855 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4856 allow_reshape = 0;
4857 if (content->reshape_active == CONTAINER_RESHAPE &&
4858 (content->array.state
4859 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4860 allow_reshape = 0;
4861
4862 if (!allow_reshape) {
4863 pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n",
4864 devname, container);
4865 ret_val = 1;
4866 goto Grow_continue_command_exit;
4867 }
4868
4869 array = strchr(content->text_version+1, '/')+1;
4870 mdstat = mdstat_by_subdev(array, container);
4871 if (!mdstat)
4872 continue;
4873 if (mdstat->active == 0) {
4874 pr_err("Skipping inactive array %s.\n",
4875 mdstat->devnm);
4876 free_mdstat(mdstat);
4877 mdstat = NULL;
4878 continue;
4879 }
4880 break;
4881 }
4882 if (!content) {
4883 pr_err("Unable to determine reshaped array for %s\n", devname);
4884 ret_val = 1;
4885 goto Grow_continue_command_exit;
4886 }
4887 fd2 = open_dev(mdstat->devnm);
4888 if (fd2 < 0) {
4889 pr_err("cannot open (%s)\n", mdstat->devnm);
4890 ret_val = 1;
4891 goto Grow_continue_command_exit;
4892 }
4893
4894 sysfs_init(content, fd2, mdstat->devnm);
4895
4896 close(fd2);
4897 fd2 = -1;
4898
4899 /* start mdmon in case it is not running
4900 */
4901 if (!mdmon_running(container))
4902 start_mdmon(container);
4903 ping_monitor(container);
4904
4905 if (mdmon_running(container))
4906 st->update_tail = &st->updates;
4907 else {
4908 pr_err("No mdmon found. Grow cannot continue.\n");
4909 ret_val = 1;
4910 goto Grow_continue_command_exit;
4911 }
4912 }
4913
4914 /* verify that array under reshape is started from
4915 * correct position
4916 */
4917 if (verify_reshape_position(content, content->array.level) < 0) {
4918 ret_val = 1;
4919 goto Grow_continue_command_exit;
4920 }
4921
4922 /* continue reshape
4923 */
4924 ret_val = Grow_continue(fd, st, content, backup_file, 1, 0);
4925
4926 Grow_continue_command_exit:
4927 if (fd2 > -1)
4928 close(fd2);
4929 if (cfd > -1)
4930 close(cfd);
4931 st->ss->free_super(st);
4932 free_mdstat(mdstat);
4933 sysfs_free(cc);
4934 free(subarray);
4935
4936 return ret_val;
4937 }
4938
4939 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4940 char *backup_file, int forked, int freeze_reshape)
4941 {
4942 int ret_val = 2;
4943
4944 if (!info->reshape_active)
4945 return ret_val;
4946
4947 if (st->ss->external) {
4948 int cfd = open_dev(st->container_devnm);
4949
4950 if (cfd < 0)
4951 return 1;
4952
4953 st->ss->load_container(st, cfd, st->container_devnm);
4954 close(cfd);
4955 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4956 st, info, 0, backup_file,
4957 0, forked,
4958 1 | info->reshape_active,
4959 freeze_reshape);
4960 } else
4961 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4962 NULL, INVALID_SECTORS,
4963 backup_file, 0, forked,
4964 1 | info->reshape_active,
4965 freeze_reshape);
4966
4967 return ret_val;
4968 }
4969
4970 char *make_backup(char *name)
4971 {
4972 char *base = "backup_file-";
4973 int len;
4974 char *fname;
4975
4976 len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1;
4977 fname = xmalloc(len);
4978 sprintf(fname, "%s/%s%s", MAP_DIR, base, name);
4979 return fname;
4980 }
4981
4982 char *locate_backup(char *name)
4983 {
4984 char *fl = make_backup(name);
4985 struct stat stb;
4986
4987 if (stat(fl, &stb) == 0 &&
4988 S_ISREG(stb.st_mode))
4989 return fl;
4990
4991 free(fl);
4992 return NULL;
4993 }