]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Grow: centralise level-change code.
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2012 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stdint.h>
28
29 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
30 #error no endian defined
31 #endif
32 #include "md_u.h"
33 #include "md_p.h"
34
35 #ifndef offsetof
36 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
37 #endif
38
39 int restore_backup(struct supertype *st,
40 struct mdinfo *content,
41 int working_disks,
42 int next_spare,
43 char *backup_file,
44 int verbose)
45 {
46 int i;
47 int *fdlist;
48 struct mdinfo *dev;
49 int err;
50 int disk_count = next_spare + working_disks;
51
52 dprintf("Called restore_backup()\n");
53 fdlist = xmalloc(sizeof(int) * disk_count);
54
55 enable_fds(next_spare);
56 for (i = 0; i < next_spare; i++)
57 fdlist[i] = -1;
58 for (dev = content->devs; dev; dev = dev->next) {
59 char buf[22];
60 int fd;
61 sprintf(buf, "%d:%d",
62 dev->disk.major,
63 dev->disk.minor);
64 fd = dev_open(buf, O_RDWR);
65
66 if (dev->disk.raid_disk >= 0)
67 fdlist[dev->disk.raid_disk] = fd;
68 else
69 fdlist[next_spare++] = fd;
70 }
71
72 if (st->ss->external && st->ss->recover_backup)
73 err = st->ss->recover_backup(st, content);
74 else
75 err = Grow_restart(st, content, fdlist, next_spare,
76 backup_file, verbose > 0);
77
78 while (next_spare > 0) {
79 next_spare--;
80 if (fdlist[next_spare] >= 0)
81 close(fdlist[next_spare]);
82 }
83 free(fdlist);
84 if (err) {
85 pr_err("Failed to restore critical"
86 " section for reshape - sorry.\n");
87 if (!backup_file)
88 pr_err("Possibly you need"
89 " to specify a --backup-file\n");
90 return 1;
91 }
92
93 dprintf("restore_backup() returns status OK.\n");
94 return 0;
95 }
96
97 int Grow_Add_device(char *devname, int fd, char *newdev)
98 {
99 /* Add a device to an active array.
100 * Currently, just extend a linear array.
101 * This requires writing a new superblock on the
102 * new device, calling the kernel to add the device,
103 * and if that succeeds, update the superblock on
104 * all other devices.
105 * This means that we need to *find* all other devices.
106 */
107 struct mdinfo info;
108
109 struct stat stb;
110 int nfd, fd2;
111 int d, nd;
112 struct supertype *st = NULL;
113 char *subarray = NULL;
114
115 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
116 pr_err("cannot get array info for %s\n", devname);
117 return 1;
118 }
119
120 if (info.array.level != -1) {
121 pr_err("can only add devices to linear arrays\n");
122 return 1;
123 }
124
125 st = super_by_fd(fd, &subarray);
126 if (!st) {
127 pr_err("cannot handle arrays with superblock version %d\n",
128 info.array.major_version);
129 return 1;
130 }
131
132 if (subarray) {
133 pr_err("Cannot grow linear sub-arrays yet\n");
134 free(subarray);
135 free(st);
136 return 1;
137 }
138
139 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
140 if (nfd < 0) {
141 pr_err("cannot open %s\n", newdev);
142 free(st);
143 return 1;
144 }
145 fstat(nfd, &stb);
146 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
147 pr_err("%s is not a block device!\n", newdev);
148 close(nfd);
149 free(st);
150 return 1;
151 }
152 /* now check out all the devices and make sure we can read the
153 * superblock */
154 for (d=0 ; d < info.array.raid_disks ; d++) {
155 mdu_disk_info_t disk;
156 char *dv;
157
158 st->ss->free_super(st);
159
160 disk.number = d;
161 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
162 pr_err("cannot get device detail for device %d\n",
163 d);
164 close(nfd);
165 free(st);
166 return 1;
167 }
168 dv = map_dev(disk.major, disk.minor, 1);
169 if (!dv) {
170 pr_err("cannot find device file for device %d\n",
171 d);
172 close(nfd);
173 free(st);
174 return 1;
175 }
176 fd2 = dev_open(dv, O_RDWR);
177 if (fd2 < 0) {
178 pr_err("cannot open device file %s\n", dv);
179 close(nfd);
180 free(st);
181 return 1;
182 }
183
184 if (st->ss->load_super(st, fd2, NULL)) {
185 pr_err("cannot find super block on %s\n", dv);
186 close(nfd);
187 close(fd2);
188 free(st);
189 return 1;
190 }
191 close(fd2);
192 }
193 /* Ok, looks good. Lets update the superblock and write it out to
194 * newdev.
195 */
196
197 info.disk.number = d;
198 info.disk.major = major(stb.st_rdev);
199 info.disk.minor = minor(stb.st_rdev);
200 info.disk.raid_disk = d;
201 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
202 st->ss->update_super(st, &info, "linear-grow-new", newdev,
203 0, 0, NULL);
204
205 if (st->ss->store_super(st, nfd)) {
206 pr_err("Cannot store new superblock on %s\n",
207 newdev);
208 close(nfd);
209 return 1;
210 }
211 close(nfd);
212
213 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
214 pr_err("Cannot add new disk to this array\n");
215 return 1;
216 }
217 /* Well, that seems to have worked.
218 * Now go through and update all superblocks
219 */
220
221 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
222 pr_err("cannot get array info for %s\n", devname);
223 return 1;
224 }
225
226 nd = d;
227 for (d=0 ; d < info.array.raid_disks ; d++) {
228 mdu_disk_info_t disk;
229 char *dv;
230
231 disk.number = d;
232 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
233 pr_err("cannot get device detail for device %d\n",
234 d);
235 return 1;
236 }
237 dv = map_dev(disk.major, disk.minor, 1);
238 if (!dv) {
239 pr_err("cannot find device file for device %d\n",
240 d);
241 return 1;
242 }
243 fd2 = dev_open(dv, O_RDWR);
244 if (fd2 < 0) {
245 pr_err("cannot open device file %s\n", dv);
246 return 1;
247 }
248 if (st->ss->load_super(st, fd2, NULL)) {
249 pr_err("cannot find super block on %s\n", dv);
250 close(fd);
251 return 1;
252 }
253 info.array.raid_disks = nd+1;
254 info.array.nr_disks = nd+1;
255 info.array.active_disks = nd+1;
256 info.array.working_disks = nd+1;
257
258 st->ss->update_super(st, &info, "linear-grow-update", dv,
259 0, 0, NULL);
260
261 if (st->ss->store_super(st, fd2)) {
262 pr_err("Cannot store new superblock on %s\n", dv);
263 close(fd2);
264 return 1;
265 }
266 close(fd2);
267 }
268
269 return 0;
270 }
271
272 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
273 {
274 /*
275 * First check that array doesn't have a bitmap
276 * Then create the bitmap
277 * Then add it
278 *
279 * For internal bitmaps, we need to check the version,
280 * find all the active devices, and write the bitmap block
281 * to all devices
282 */
283 mdu_bitmap_file_t bmf;
284 mdu_array_info_t array;
285 struct supertype *st;
286 char *subarray = NULL;
287 int major = BITMAP_MAJOR_HI;
288 int vers = md_get_version(fd);
289 unsigned long long bitmapsize, array_size;
290
291 if (vers < 9003) {
292 major = BITMAP_MAJOR_HOSTENDIAN;
293 pr_err("Warning - bitmaps created on this kernel"
294 " are not portable\n"
295 " between different architectures. Consider upgrading"
296 " the Linux kernel.\n");
297 }
298
299 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
300 if (errno == ENOMEM)
301 pr_err("Memory allocation failure.\n");
302 else
303 pr_err("bitmaps not supported by this kernel.\n");
304 return 1;
305 }
306 if (bmf.pathname[0]) {
307 if (strcmp(s->bitmap_file,"none")==0) {
308 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
309 pr_err("failed to remove bitmap %s\n",
310 bmf.pathname);
311 return 1;
312 }
313 return 0;
314 }
315 pr_err("%s already has a bitmap (%s)\n",
316 devname, bmf.pathname);
317 return 1;
318 }
319 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
320 pr_err("cannot get array status for %s\n", devname);
321 return 1;
322 }
323 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
324 if (strcmp(s->bitmap_file, "none")==0) {
325 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
326 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
327 pr_err("failed to remove internal bitmap.\n");
328 return 1;
329 }
330 return 0;
331 }
332 pr_err("Internal bitmap already present on %s\n",
333 devname);
334 return 1;
335 }
336
337 if (strcmp(s->bitmap_file, "none") == 0) {
338 pr_err("no bitmap found on %s\n", devname);
339 return 1;
340 }
341 if (array.level <= 0) {
342 pr_err("Bitmaps not meaningful with level %s\n",
343 map_num(pers, array.level)?:"of this array");
344 return 1;
345 }
346 bitmapsize = array.size;
347 bitmapsize <<= 1;
348 if (get_dev_size(fd, NULL, &array_size) &&
349 array_size > (0x7fffffffULL<<9)) {
350 /* Array is big enough that we cannot trust array.size
351 * try other approaches
352 */
353 bitmapsize = get_component_size(fd);
354 }
355 if (bitmapsize == 0) {
356 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
357 return 1;
358 }
359
360 if (array.level == 10) {
361 int ncopies = (array.layout&255)*((array.layout>>8)&255);
362 bitmapsize = bitmapsize * array.raid_disks / ncopies;
363 }
364
365 st = super_by_fd(fd, &subarray);
366 if (!st) {
367 pr_err("Cannot understand version %d.%d\n",
368 array.major_version, array.minor_version);
369 return 1;
370 }
371 if (subarray) {
372 pr_err("Cannot add bitmaps to sub-arrays yet\n");
373 free(subarray);
374 free(st);
375 return 1;
376 }
377 if (strcmp(s->bitmap_file, "internal") == 0) {
378 int rv;
379 int d;
380 int offset_setable = 0;
381 struct mdinfo *mdi;
382 if (st->ss->add_internal_bitmap == NULL) {
383 pr_err("Internal bitmaps not supported "
384 "with %s metadata\n", st->ss->name);
385 return 1;
386 }
387 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
388 if (mdi)
389 offset_setable = 1;
390 for (d=0; d< st->max_devs; d++) {
391 mdu_disk_info_t disk;
392 char *dv;
393 disk.number = d;
394 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
395 continue;
396 if (disk.major == 0 &&
397 disk.minor == 0)
398 continue;
399 if ((disk.state & (1<<MD_DISK_SYNC))==0)
400 continue;
401 dv = map_dev(disk.major, disk.minor, 1);
402 if (dv) {
403 int fd2 = dev_open(dv, O_RDWR);
404 if (fd2 < 0)
405 continue;
406 if (st->ss->load_super(st, fd2, NULL)==0) {
407 if (st->ss->add_internal_bitmap(
408 st,
409 &s->bitmap_chunk, c->delay, s->write_behind,
410 bitmapsize, offset_setable,
411 major)
412 )
413 st->ss->write_bitmap(st, fd2);
414 else {
415 pr_err("failed to create internal bitmap"
416 " - chunksize problem.\n");
417 close(fd2);
418 return 1;
419 }
420 }
421 close(fd2);
422 }
423 }
424 if (offset_setable) {
425 st->ss->getinfo_super(st, mdi, NULL);
426 sysfs_init(mdi, fd, NULL);
427 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
428 mdi->bitmap_offset);
429 } else {
430 array.state |= (1<<MD_SB_BITMAP_PRESENT);
431 rv = ioctl(fd, SET_ARRAY_INFO, &array);
432 }
433 if (rv < 0) {
434 if (errno == EBUSY)
435 pr_err("Cannot add bitmap while array is"
436 " resyncing or reshaping etc.\n");
437 pr_err("failed to set internal bitmap.\n");
438 return 1;
439 }
440 } else {
441 int uuid[4];
442 int bitmap_fd;
443 int d;
444 int max_devs = st->max_devs;
445
446 /* try to load a superblock */
447 for (d = 0; d < max_devs; d++) {
448 mdu_disk_info_t disk;
449 char *dv;
450 int fd2;
451 disk.number = d;
452 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
453 continue;
454 if ((disk.major==0 && disk.minor==0) ||
455 (disk.state & (1<<MD_DISK_REMOVED)))
456 continue;
457 dv = map_dev(disk.major, disk.minor, 1);
458 if (!dv)
459 continue;
460 fd2 = dev_open(dv, O_RDONLY);
461 if (fd2 >= 0) {
462 if (st->ss->load_super(st, fd2, NULL) == 0) {
463 close(fd2);
464 st->ss->uuid_from_super(st, uuid);
465 break;
466 }
467 close(fd2);
468 }
469 }
470 if (d == max_devs) {
471 pr_err("cannot find UUID for array!\n");
472 return 1;
473 }
474 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
475 c->delay, s->write_behind, bitmapsize, major)) {
476 return 1;
477 }
478 bitmap_fd = open(s->bitmap_file, O_RDWR);
479 if (bitmap_fd < 0) {
480 pr_err("weird: %s cannot be opened\n",
481 s->bitmap_file);
482 return 1;
483 }
484 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
485 int err = errno;
486 if (errno == EBUSY)
487 pr_err("Cannot add bitmap while array is"
488 " resyncing or reshaping etc.\n");
489 pr_err("Cannot set bitmap file for %s: %s\n",
490 devname, strerror(err));
491 return 1;
492 }
493 }
494
495 return 0;
496 }
497
498 /*
499 * When reshaping an array we might need to backup some data.
500 * This is written to all spares with a 'super_block' describing it.
501 * The superblock goes 4K from the end of the used space on the
502 * device.
503 * It if written after the backup is complete.
504 * It has the following structure.
505 */
506
507 static struct mdp_backup_super {
508 char magic[16]; /* md_backup_data-1 or -2 */
509 __u8 set_uuid[16];
510 __u64 mtime;
511 /* start/sizes in 512byte sectors */
512 __u64 devstart; /* address on backup device/file of data */
513 __u64 arraystart;
514 __u64 length;
515 __u32 sb_csum; /* csum of preceeding bytes. */
516 __u32 pad1;
517 __u64 devstart2; /* offset in to data of second section */
518 __u64 arraystart2;
519 __u64 length2;
520 __u32 sb_csum2; /* csum of preceeding bytes. */
521 __u8 pad[512-68-32];
522 } __attribute__((aligned(512))) bsb, bsb2;
523
524 static __u32 bsb_csum(char *buf, int len)
525 {
526 int i;
527 int csum = 0;
528 for (i = 0; i < len; i++)
529 csum = (csum<<3) + buf[0];
530 return __cpu_to_le32(csum);
531 }
532
533 static int check_idle(struct supertype *st)
534 {
535 /* Check that all member arrays for this container, or the
536 * container of this array, are idle
537 */
538 char *container = (st->container_devnm[0]
539 ? st->container_devnm : st->devnm);
540 struct mdstat_ent *ent, *e;
541 int is_idle = 1;
542
543 ent = mdstat_read(0, 0);
544 for (e = ent ; e; e = e->next) {
545 if (!is_container_member(e, container))
546 continue;
547 if (e->percent >= 0) {
548 is_idle = 0;
549 break;
550 }
551 }
552 free_mdstat(ent);
553 return is_idle;
554 }
555
556 static int freeze_container(struct supertype *st)
557 {
558 char *container = (st->container_devnm[0]
559 ? st->container_devnm : st->devnm);
560
561 if (!check_idle(st))
562 return -1;
563
564 if (block_monitor(container, 1)) {
565 pr_err("failed to freeze container\n");
566 return -2;
567 }
568
569 return 1;
570 }
571
572 static void unfreeze_container(struct supertype *st)
573 {
574 char *container = (st->container_devnm[0]
575 ? st->container_devnm : st->devnm);
576
577 unblock_monitor(container, 1);
578 }
579
580 static int freeze(struct supertype *st)
581 {
582 /* Try to freeze resync/rebuild on this array/container.
583 * Return -1 if the array is busy,
584 * return -2 container cannot be frozen,
585 * return 0 if this kernel doesn't support 'frozen'
586 * return 1 if it worked.
587 */
588 if (st->ss->external)
589 return freeze_container(st);
590 else {
591 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
592 int err;
593 char buf[20];
594
595 if (!sra)
596 return -1;
597 /* Need to clear any 'read-auto' status */
598 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
599 strncmp(buf, "read-auto", 9) == 0)
600 sysfs_set_str(sra, NULL, "array_state", "clean");
601
602 err = sysfs_freeze_array(sra);
603 sysfs_free(sra);
604 return err;
605 }
606 }
607
608 static void unfreeze(struct supertype *st)
609 {
610 if (st->ss->external)
611 return unfreeze_container(st);
612 else {
613 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
614
615 if (sra)
616 sysfs_set_str(sra, NULL, "sync_action", "idle");
617 sysfs_free(sra);
618 }
619 }
620
621 static void wait_reshape(struct mdinfo *sra)
622 {
623 int fd = sysfs_get_fd(sra, NULL, "sync_action");
624 char action[20];
625
626 if (fd < 0)
627 return;
628
629 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
630 strncmp(action, "reshape", 7) == 0) {
631 fd_set rfds;
632 FD_ZERO(&rfds);
633 FD_SET(fd, &rfds);
634 select(fd+1, NULL, NULL, &rfds, NULL);
635 }
636 close(fd);
637 }
638
639 static int reshape_super(struct supertype *st, unsigned long long size,
640 int level, int layout, int chunksize, int raid_disks,
641 int delta_disks, char *backup_file, char *dev,
642 int direction, int verbose)
643 {
644 /* nothing extra to check in the native case */
645 if (!st->ss->external)
646 return 0;
647 if (!st->ss->reshape_super ||
648 !st->ss->manage_reshape) {
649 pr_err("%s metadata does not support reshape\n",
650 st->ss->name);
651 return 1;
652 }
653
654 return st->ss->reshape_super(st, size, level, layout, chunksize,
655 raid_disks, delta_disks, backup_file, dev,
656 direction, verbose);
657 }
658
659 static void sync_metadata(struct supertype *st)
660 {
661 if (st->ss->external) {
662 if (st->update_tail) {
663 flush_metadata_updates(st);
664 st->update_tail = &st->updates;
665 } else
666 st->ss->sync_metadata(st);
667 }
668 }
669
670 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
671 {
672 /* when dealing with external metadata subarrays we need to be
673 * prepared to handle EAGAIN. The kernel may need to wait for
674 * mdmon to mark the array active so the kernel can handle
675 * allocations/writeback when preparing the reshape action
676 * (md_allow_write()). We temporarily disable safe_mode_delay
677 * to close a race with the array_state going clean before the
678 * next write to raid_disks / stripe_cache_size
679 */
680 char safe[50];
681 int rc;
682
683 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
684 if (!container ||
685 (strcmp(name, "raid_disks") != 0 &&
686 strcmp(name, "stripe_cache_size") != 0))
687 return sysfs_set_num(sra, NULL, name, n);
688
689 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
690 if (rc <= 0)
691 return -1;
692 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
693 rc = sysfs_set_num(sra, NULL, name, n);
694 if (rc < 0 && errno == EAGAIN) {
695 ping_monitor(container);
696 /* if we get EAGAIN here then the monitor is not active
697 * so stop trying
698 */
699 rc = sysfs_set_num(sra, NULL, name, n);
700 }
701 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
702 return rc;
703 }
704
705 int start_reshape(struct mdinfo *sra, int already_running,
706 int before_data_disks, int data_disks)
707 {
708 int err;
709 unsigned long long sync_max_to_set;
710
711 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
712 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
713 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
714 sra->reshape_progress);
715 if (before_data_disks <= data_disks)
716 sync_max_to_set = sra->reshape_progress / data_disks;
717 else
718 sync_max_to_set = (sra->component_size * data_disks
719 - sra->reshape_progress) / data_disks;
720 if (!already_running)
721 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
722 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
723 if (!already_running)
724 err = err ?: sysfs_set_str(sra, NULL, "sync_action", "reshape");
725
726 return err;
727 }
728
729 void abort_reshape(struct mdinfo *sra)
730 {
731 sysfs_set_str(sra, NULL, "sync_action", "idle");
732 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
733 sysfs_set_num(sra, NULL, "suspend_hi", 0);
734 sysfs_set_num(sra, NULL, "suspend_lo", 0);
735 sysfs_set_num(sra, NULL, "sync_min", 0);
736 sysfs_set_str(sra, NULL, "sync_max", "max");
737 }
738
739 int remove_disks_for_takeover(struct supertype *st,
740 struct mdinfo *sra,
741 int layout)
742 {
743 int nr_of_copies;
744 struct mdinfo *remaining;
745 int slot;
746
747 if (sra->array.level == 10)
748 nr_of_copies = layout & 0xff;
749 else if (sra->array.level == 1)
750 nr_of_copies = sra->array.raid_disks;
751 else
752 return 1;
753
754 remaining = sra->devs;
755 sra->devs = NULL;
756 /* for each 'copy', select one device and remove from the list. */
757 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
758 struct mdinfo **diskp;
759 int found = 0;
760
761 /* Find a working device to keep */
762 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
763 struct mdinfo *disk = *diskp;
764
765 if (disk->disk.raid_disk < slot)
766 continue;
767 if (disk->disk.raid_disk >= slot + nr_of_copies)
768 continue;
769 if (disk->disk.state & (1<<MD_DISK_REMOVED))
770 continue;
771 if (disk->disk.state & (1<<MD_DISK_FAULTY))
772 continue;
773 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
774 continue;
775
776 /* We have found a good disk to use! */
777 *diskp = disk->next;
778 disk->next = sra->devs;
779 sra->devs = disk;
780 found = 1;
781 break;
782 }
783 if (!found)
784 break;
785 }
786
787 if (slot < sra->array.raid_disks) {
788 /* didn't find all slots */
789 struct mdinfo **e;
790 e = &remaining;
791 while (*e)
792 e = &(*e)->next;
793 *e = sra->devs;
794 sra->devs = remaining;
795 return 1;
796 }
797
798 /* Remove all 'remaining' devices from the array */
799 while (remaining) {
800 struct mdinfo *sd = remaining;
801 remaining = sd->next;
802
803 sysfs_set_str(sra, sd, "state", "faulty");
804 sysfs_set_str(sra, sd, "slot", "none");
805 /* for external metadata disks should be removed in mdmon */
806 if (!st->ss->external)
807 sysfs_set_str(sra, sd, "state", "remove");
808 sd->disk.state |= (1<<MD_DISK_REMOVED);
809 sd->disk.state &= ~(1<<MD_DISK_SYNC);
810 sd->next = sra->devs;
811 sra->devs = sd;
812 }
813 return 0;
814 }
815
816 void reshape_free_fdlist(int *fdlist,
817 unsigned long long *offsets,
818 int size)
819 {
820 int i;
821
822 for (i = 0; i < size; i++)
823 if (fdlist[i] >= 0)
824 close(fdlist[i]);
825
826 free(fdlist);
827 free(offsets);
828 }
829
830 int reshape_prepare_fdlist(char *devname,
831 struct mdinfo *sra,
832 int raid_disks,
833 int nrdisks,
834 unsigned long blocks,
835 char *backup_file,
836 int *fdlist,
837 unsigned long long *offsets)
838 {
839 int d = 0;
840 struct mdinfo *sd;
841
842 enable_fds(nrdisks);
843 for (d = 0; d <= nrdisks; d++)
844 fdlist[d] = -1;
845 d = raid_disks;
846 for (sd = sra->devs; sd; sd = sd->next) {
847 if (sd->disk.state & (1<<MD_DISK_FAULTY))
848 continue;
849 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
850 char *dn = map_dev(sd->disk.major,
851 sd->disk.minor, 1);
852 fdlist[sd->disk.raid_disk]
853 = dev_open(dn, O_RDONLY);
854 offsets[sd->disk.raid_disk] = sd->data_offset*512;
855 if (fdlist[sd->disk.raid_disk] < 0) {
856 pr_err("%s: cannot open component %s\n",
857 devname, dn ? dn : "-unknown-");
858 d = -1;
859 goto release;
860 }
861 } else if (backup_file == NULL) {
862 /* spare */
863 char *dn = map_dev(sd->disk.major,
864 sd->disk.minor, 1);
865 fdlist[d] = dev_open(dn, O_RDWR);
866 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
867 if (fdlist[d] < 0) {
868 pr_err("%s: cannot open component %s\n",
869 devname, dn ? dn : "-unknown-");
870 d = -1;
871 goto release;
872 }
873 d++;
874 }
875 }
876 release:
877 return d;
878 }
879
880 int reshape_open_backup_file(char *backup_file,
881 int fd,
882 char *devname,
883 long blocks,
884 int *fdlist,
885 unsigned long long *offsets,
886 int restart)
887 {
888 /* Return 1 on success, 0 on any form of failure */
889 /* need to check backup file is large enough */
890 char buf[512];
891 struct stat stb;
892 unsigned int dev;
893 int i;
894
895 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
896 S_IRUSR | S_IWUSR);
897 *offsets = 8 * 512;
898 if (*fdlist < 0) {
899 pr_err("%s: cannot create backup file %s: %s\n",
900 devname, backup_file, strerror(errno));
901 return 0;
902 }
903 /* Guard against backup file being on array device.
904 * If array is partitioned or if LVM etc is in the
905 * way this will not notice, but it is better than
906 * nothing.
907 */
908 fstat(*fdlist, &stb);
909 dev = stb.st_dev;
910 fstat(fd, &stb);
911 if (stb.st_rdev == dev) {
912 pr_err("backup file must NOT be"
913 " on the array being reshaped.\n");
914 close(*fdlist);
915 return 0;
916 }
917
918 memset(buf, 0, 512);
919 for (i=0; i < blocks + 8 ; i++) {
920 if (write(*fdlist, buf, 512) != 512) {
921 pr_err("%s: cannot create"
922 " backup file %s: %s\n",
923 devname, backup_file, strerror(errno));
924 return 0;
925 }
926 }
927 if (fsync(*fdlist) != 0) {
928 pr_err("%s: cannot create backup file %s: %s\n",
929 devname, backup_file, strerror(errno));
930 return 0;
931 }
932
933 return 1;
934 }
935
936 unsigned long GCD(unsigned long a, unsigned long b)
937 {
938 while (a != b) {
939 if (a < b)
940 b -= a;
941 if (b < a)
942 a -= b;
943 }
944 return a;
945 }
946
947 unsigned long compute_backup_blocks(int nchunk, int ochunk,
948 unsigned int ndata, unsigned int odata)
949 {
950 unsigned long a, b, blocks;
951 /* So how much do we need to backup.
952 * We need an amount of data which is both a whole number of
953 * old stripes and a whole number of new stripes.
954 * So LCM for (chunksize*datadisks).
955 */
956 a = (ochunk/512) * odata;
957 b = (nchunk/512) * ndata;
958 /* Find GCD */
959 a = GCD(a, b);
960 /* LCM == product / GCD */
961 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
962
963 return blocks;
964 }
965
966 char *analyse_change(struct mdinfo *info, struct reshape *re)
967 {
968 /* Based on the current array state in info->array and
969 * the changes in info->new_* etc, determine:
970 * - whether the change is possible
971 * - Intermediate level/raid_disks/layout
972 * - whether a restriping reshape is needed
973 * - number of sectors in minimum change unit. This
974 * will cover a whole number of stripes in 'before' and
975 * 'after'.
976 *
977 * Return message if the change should be rejected
978 * NULL if the change can be achieved
979 *
980 * This can be called as part of starting a reshape, or
981 * when assembling an array that is undergoing reshape.
982 */
983 int near, far, offset, copies;
984 int new_disks;
985 int old_chunk, new_chunk;
986 /* delta_parity records change in number of devices
987 * caused by level change
988 */
989 int delta_parity = 0;
990
991 memset(re, 0, sizeof(*re));
992
993 /* If a new level not explicitly given, we assume no-change */
994 if (info->new_level == UnSet)
995 info->new_level = info->array.level;
996
997 if (info->new_chunk)
998 switch (info->new_level) {
999 case 0:
1000 case 4:
1001 case 5:
1002 case 6:
1003 case 10:
1004 /* chunk size is meaningful, must divide component_size
1005 * evenly
1006 */
1007 if (info->component_size % (info->new_chunk/512))
1008 return "New chunk size does not"
1009 " divide component size";
1010 break;
1011 default:
1012 return "chunk size not meaningful for this level";
1013 }
1014 else
1015 info->new_chunk = info->array.chunk_size;
1016
1017 switch (info->array.level) {
1018 default:
1019 return "Cannot understand this RAID level";
1020 case 1:
1021 /* RAID1 can convert to RAID1 with different disks, or
1022 * raid5 with 2 disks, or
1023 * raid0 with 1 disk
1024 */
1025 if (info->new_level > 1 &&
1026 (info->component_size & 7))
1027 return "Cannot convert RAID1 of this size - "
1028 "reduce size to multiple of 4K first.";
1029 if (info->new_level == 0) {
1030 if (info->delta_disks != UnSet &&
1031 info->delta_disks != 0)
1032 return "Cannot change number of disks "
1033 "with RAID1->RAID0 conversion";
1034 re->level = 0;
1035 re->before.data_disks = 1;
1036 re->after.data_disks = 1;
1037 return NULL;
1038 }
1039 if (info->new_level == 1) {
1040 if (info->delta_disks == UnSet)
1041 /* Don't know what to do */
1042 return "no change requested for Growing RAID1";
1043 re->level = 1;
1044 return NULL;
1045 }
1046 if (info->array.raid_disks == 2 &&
1047 info->new_level == 5) {
1048
1049 re->level = 5;
1050 re->before.data_disks = 1;
1051 if (info->delta_disks != UnSet &&
1052 info->delta_disks != 0)
1053 re->after.data_disks = 1 + info->delta_disks;
1054 else
1055 re->after.data_disks = 1;
1056 if (re->after.data_disks < 1)
1057 return "Number of disks too small for RAID5";
1058
1059 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1060 info->array.chunk_size = 65536;
1061 break;
1062 }
1063 /* Could do some multi-stage conversions, but leave that to
1064 * later.
1065 */
1066 return "Impossibly level change request for RAID1";
1067
1068 case 10:
1069 /* RAID10 can be converted from near mode to
1070 * RAID0 by removing some devices.
1071 * It can also be reshaped if the kernel supports
1072 * new_data_offset.
1073 */
1074 switch (info->new_level) {
1075 case 0:
1076 if ((info->array.layout & ~0xff) != 0x100)
1077 return "Cannot Grow RAID10 with far/offset layout";
1078 /* number of devices must be multiple of number of copies */
1079 if (info->array.raid_disks % (info->array.layout & 0xff))
1080 return "RAID10 layout too complex for Grow operation";
1081
1082 new_disks = (info->array.raid_disks
1083 / (info->array.layout & 0xff));
1084 if (info->delta_disks == UnSet)
1085 info->delta_disks = (new_disks
1086 - info->array.raid_disks);
1087
1088 if (info->delta_disks != new_disks - info->array.raid_disks)
1089 return "New number of raid-devices impossible for RAID10";
1090 if (info->new_chunk &&
1091 info->new_chunk != info->array.chunk_size)
1092 return "Cannot change chunk-size with RAID10 Grow";
1093
1094 /* looks good */
1095 re->level = 0;
1096 re->before.data_disks = new_disks;
1097 re->after.data_disks = re->before.data_disks;
1098 return NULL;
1099
1100 case 10:
1101 near = info->array.layout & 0xff;
1102 far = (info->array.layout >> 8) & 0xff;
1103 offset = info->array.layout & 0x10000;
1104 if (far > 1 && !offset)
1105 return "Cannot reshape RAID10 in far-mode";
1106 copies = near * far;
1107
1108 old_chunk = info->array.chunk_size * far;
1109
1110 if (info->new_layout == UnSet)
1111 info->new_layout = info->array.layout;
1112 else {
1113 near = info->new_layout & 0xff;
1114 far = (info->new_layout >> 8) & 0xff;
1115 offset = info->new_layout & 0x10000;
1116 if (far > 1 && !offset)
1117 return "Cannot reshape RAID10 to far-mode";
1118 if (near * far != copies)
1119 return "Cannot change number of copies"
1120 " when reshaping RAID10";
1121 }
1122 if (info->delta_disks == UnSet)
1123 info->delta_disks = 0;
1124 new_disks = (info->array.raid_disks +
1125 info->delta_disks);
1126
1127 new_chunk = info->new_chunk * far;
1128
1129 re->level = 10;
1130 re->before.layout = info->array.layout;
1131 re->before.data_disks = info->array.raid_disks;
1132 re->after.layout = info->new_layout;
1133 re->after.data_disks = new_disks;
1134 /* For RAID10 we don't do backup but do allow reshape,
1135 * so set backup_blocks to INVALID_SECTORS rather than
1136 * zero.
1137 * And there is no need to synchronise stripes on both
1138 * 'old' and 'new'. So the important
1139 * number is the minimum data_offset difference
1140 * which is the larger of (offset copies * chunk).
1141 */
1142 re->backup_blocks = INVALID_SECTORS;
1143 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1144 if (new_disks < re->before.data_disks &&
1145 info->space_after < re->min_offset_change)
1146 /* Reduce component size by one chunk */
1147 re->new_size = (info->component_size -
1148 re->min_offset_change);
1149 else
1150 re->new_size = info->component_size;
1151 re->new_size = re->new_size * new_disks / copies;
1152 return NULL;
1153
1154 default:
1155 return "RAID10 can only be changed to RAID0";
1156 }
1157 case 0:
1158 /* RAID0 can be converted to RAID10, or to RAID456 */
1159 if (info->new_level == 10) {
1160 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1161 /* Assume near=2 layout */
1162 info->new_layout = 0x102;
1163 info->delta_disks = info->array.raid_disks;
1164 }
1165 if (info->new_layout == UnSet) {
1166 int copies = 1 + (info->delta_disks
1167 / info->array.raid_disks);
1168 if (info->array.raid_disks * (copies-1)
1169 != info->delta_disks)
1170 return "Impossible number of devices"
1171 " for RAID0->RAID10";
1172 info->new_layout = 0x100 + copies;
1173 }
1174 if (info->delta_disks == UnSet) {
1175 int copies = info->new_layout & 0xff;
1176 if (info->new_layout != 0x100 + copies)
1177 return "New layout impossible"
1178 " for RAID0->RAID10";;
1179 info->delta_disks = (copies - 1) *
1180 info->array.raid_disks;
1181 }
1182 if (info->new_chunk &&
1183 info->new_chunk != info->array.chunk_size)
1184 return "Cannot change chunk-size with RAID0->RAID10";
1185 /* looks good */
1186 re->level = 10;
1187 re->before.data_disks = (info->array.raid_disks +
1188 info->delta_disks);
1189 re->after.data_disks = re->before.data_disks;
1190 re->before.layout = info->new_layout;
1191 return NULL;
1192 }
1193
1194 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1195 * a raid4 style layout of the final level.
1196 */
1197 switch (info->new_level) {
1198 case 4:
1199 delta_parity = 1;
1200 case 0:
1201 re->level = 4;
1202 re->before.layout = 0;
1203 break;
1204 case 5:
1205 delta_parity = 1;
1206 re->level = 5;
1207 re->before.layout = ALGORITHM_PARITY_N;
1208 break;
1209 case 6:
1210 delta_parity = 2;
1211 re->level = 6;
1212 re->before.layout = ALGORITHM_PARITY_N;
1213 break;
1214 default:
1215 return "Impossible level change requested";
1216 }
1217 re->before.data_disks = info->array.raid_disks;
1218 /* determining 'after' layout happens outside this 'switch' */
1219 break;
1220
1221 case 4:
1222 info->array.layout = ALGORITHM_PARITY_N;
1223 case 5:
1224 switch (info->new_level) {
1225 case 0:
1226 delta_parity = -1;
1227 case 4:
1228 re->level = info->array.level;
1229 re->before.data_disks = info->array.raid_disks - 1;
1230 re->before.layout = info->array.layout;
1231 break;
1232 case 5:
1233 re->level = 5;
1234 re->before.data_disks = info->array.raid_disks - 1;
1235 re->before.layout = info->array.layout;
1236 break;
1237 case 6:
1238 delta_parity = 1;
1239 re->level = 6;
1240 re->before.data_disks = info->array.raid_disks - 1;
1241 switch (info->array.layout) {
1242 case ALGORITHM_LEFT_ASYMMETRIC:
1243 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1244 break;
1245 case ALGORITHM_RIGHT_ASYMMETRIC:
1246 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1247 break;
1248 case ALGORITHM_LEFT_SYMMETRIC:
1249 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1250 break;
1251 case ALGORITHM_RIGHT_SYMMETRIC:
1252 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1253 break;
1254 case ALGORITHM_PARITY_0:
1255 re->before.layout = ALGORITHM_PARITY_0_6;
1256 break;
1257 case ALGORITHM_PARITY_N:
1258 re->before.layout = ALGORITHM_PARITY_N_6;
1259 break;
1260 default:
1261 return "Cannot convert an array with this layout";
1262 }
1263 break;
1264 case 1:
1265 if (info->array.raid_disks != 2)
1266 return "Can only convert a 2-device array to RAID1";
1267 if (info->delta_disks != UnSet &&
1268 info->delta_disks != 0)
1269 return "Cannot set raid_disk when "
1270 "converting RAID5->RAID1";
1271 re->level = 1;
1272 info->new_chunk = 0;
1273 return NULL;
1274 default:
1275 return "Impossible level change requested";
1276 }
1277 break;
1278 case 6:
1279 switch (info->new_level) {
1280 case 4:
1281 case 5:
1282 delta_parity = -1;
1283 case 6:
1284 re->level = 6;
1285 re->before.data_disks = info->array.raid_disks - 2;
1286 re->before.layout = info->array.layout;
1287 break;
1288 default:
1289 return "Impossible level change requested";
1290 }
1291 break;
1292 }
1293
1294 /* If we reached here then it looks like a re-stripe is
1295 * happening. We have determined the intermediate level
1296 * and initial raid_disks/layout and stored these in 're'.
1297 *
1298 * We need to deduce the final layout that can be atomically
1299 * converted to the end state.
1300 */
1301 switch (info->new_level) {
1302 case 0:
1303 /* We can only get to RAID0 from RAID4 or RAID5
1304 * with appropriate layout and one extra device
1305 */
1306 if (re->level != 4 && re->level != 5)
1307 return "Cannot covert to RAID0 from this level";
1308
1309 switch (re->level) {
1310 case 4:
1311 re->before.layout = 0;
1312 re->after.layout = 0;
1313 break;
1314 case 5:
1315 re->after.layout = ALGORITHM_PARITY_N;
1316 break;
1317 }
1318 break;
1319
1320 case 4:
1321 /* We can only get to RAID4 from RAID5 */
1322 if (re->level != 4 && re->level != 5)
1323 return "Cannot convert to RAID4 from this level";
1324
1325 switch (re->level) {
1326 case 4:
1327 re->before.layout = 0;
1328 re->after.layout = 0;
1329 break;
1330 case 5:
1331 re->after.layout = ALGORITHM_PARITY_N;
1332 break;
1333 }
1334 break;
1335
1336 case 5:
1337 /* We get to RAID5 from RAID5 or RAID6 */
1338 if (re->level != 5 && re->level != 6)
1339 return "Cannot convert to RAID5 from this level";
1340
1341 switch (re->level) {
1342 case 5:
1343 if (info->new_layout == UnSet)
1344 re->after.layout = re->before.layout;
1345 else
1346 re->after.layout = info->new_layout;
1347 break;
1348 case 6:
1349 if (info->new_layout == UnSet)
1350 info->new_layout = re->before.layout;
1351
1352 /* after.layout needs to be raid6 version of new_layout */
1353 if (info->new_layout == ALGORITHM_PARITY_N)
1354 re->after.layout = ALGORITHM_PARITY_N;
1355 else {
1356 char layout[40];
1357 char *ls = map_num(r5layout, info->new_layout);
1358 int l;
1359 if (ls) {
1360 /* Current RAID6 layout has a RAID5
1361 * equivalent - good
1362 */
1363 strcat(strcpy(layout, ls), "-6");
1364 l = map_name(r6layout, layout);
1365 if (l == UnSet)
1366 return "Cannot find RAID6 layout"
1367 " to convert to";
1368 } else {
1369 /* Current RAID6 has no equivalent.
1370 * If it is already a '-6' layout we
1371 * can leave it unchanged, else we must
1372 * fail
1373 */
1374 ls = map_num(r6layout, info->new_layout);
1375 if (!ls ||
1376 strcmp(ls+strlen(ls)-2, "-6") != 0)
1377 return "Please specify new layout";
1378 l = info->new_layout;
1379 }
1380 re->after.layout = l;
1381 }
1382 }
1383 break;
1384
1385 case 6:
1386 /* We must already be at level 6 */
1387 if (re->level != 6)
1388 return "Impossible level change";
1389 if (info->new_layout == UnSet)
1390 re->after.layout = info->array.layout;
1391 else
1392 re->after.layout = info->new_layout;
1393 break;
1394 default:
1395 return "Impossible level change requested";
1396 }
1397 if (info->delta_disks == UnSet)
1398 info->delta_disks = delta_parity;
1399
1400 re->after.data_disks = (re->before.data_disks
1401 + info->delta_disks
1402 - delta_parity);
1403 switch (re->level) {
1404 case 6: re->parity = 2;
1405 break;
1406 case 4:
1407 case 5: re->parity = 1;
1408 break;
1409 default: re->parity = 0;
1410 break;
1411 }
1412 /* So we have a restripe operation, we need to calculate the number
1413 * of blocks per reshape operation.
1414 */
1415 re->new_size = info->component_size * re->before.data_disks;
1416 if (info->new_chunk == 0)
1417 info->new_chunk = info->array.chunk_size;
1418 if (re->after.data_disks == re->before.data_disks &&
1419 re->after.layout == re->before.layout &&
1420 info->new_chunk == info->array.chunk_size) {
1421 /* Nothing to change, can change level immediately. */
1422 re->level = info->new_level;
1423 re->backup_blocks = 0;
1424 return NULL;
1425 }
1426 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1427 /* chunk and layout changes make no difference */
1428 re->level = info->new_level;
1429 re->backup_blocks = 0;
1430 return NULL;
1431 }
1432
1433 if (re->after.data_disks == re->before.data_disks &&
1434 get_linux_version() < 2006032)
1435 return "in-place reshape is not safe before 2.6.32 - sorry.";
1436
1437 if (re->after.data_disks < re->before.data_disks &&
1438 get_linux_version() < 2006030)
1439 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1440
1441 re->backup_blocks = compute_backup_blocks(
1442 info->new_chunk, info->array.chunk_size,
1443 re->after.data_disks,
1444 re->before.data_disks);
1445 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1446
1447 re->new_size = info->component_size * re->after.data_disks;
1448 return NULL;
1449 }
1450
1451 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1452 char *text_version)
1453 {
1454 struct mdinfo *info;
1455 char *subarray;
1456 int ret_val = -1;
1457
1458 if ((st == NULL) || (sra == NULL))
1459 return ret_val;
1460
1461 if (text_version == NULL)
1462 text_version = sra->text_version;
1463 subarray = strchr(text_version+1, '/')+1;
1464 info = st->ss->container_content(st, subarray);
1465 if (info) {
1466 unsigned long long current_size = 0;
1467 unsigned long long new_size =
1468 info->custom_array_size/2;
1469
1470 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1471 new_size > current_size) {
1472 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1473 < 0)
1474 dprintf("Error: Cannot set array size");
1475 else {
1476 ret_val = 0;
1477 dprintf("Array size changed");
1478 }
1479 dprintf(" from %llu to %llu.\n",
1480 current_size, new_size);
1481 }
1482 sysfs_free(info);
1483 } else
1484 dprintf("Error: set_array_size(): info pointer in NULL\n");
1485
1486 return ret_val;
1487 }
1488
1489 static int reshape_array(char *container, int fd, char *devname,
1490 struct supertype *st, struct mdinfo *info,
1491 int force, struct mddev_dev *devlist,
1492 unsigned long long data_offset,
1493 char *backup_file, int verbose, int forked,
1494 int restart, int freeze_reshape);
1495 static int reshape_container(char *container, char *devname,
1496 int mdfd,
1497 struct supertype *st,
1498 struct mdinfo *info,
1499 int force,
1500 char *backup_file,
1501 int verbose, int restart, int freeze_reshape);
1502
1503 int Grow_reshape(char *devname, int fd,
1504 struct mddev_dev *devlist,
1505 unsigned long long data_offset,
1506 struct context *c, struct shape *s)
1507 {
1508 /* Make some changes in the shape of an array.
1509 * The kernel must support the change.
1510 *
1511 * There are three different changes. Each can trigger
1512 * a resync or recovery so we freeze that until we have
1513 * requested everything (if kernel supports freezing - 2.6.30).
1514 * The steps are:
1515 * - change size (i.e. component_size)
1516 * - change level
1517 * - change layout/chunksize/ndisks
1518 *
1519 * The last can require a reshape. It is different on different
1520 * levels so we need to check the level before actioning it.
1521 * Some times the level change needs to be requested after the
1522 * reshape (e.g. raid6->raid5, raid5->raid0)
1523 *
1524 */
1525 struct mdu_array_info_s array;
1526 int rv = 0;
1527 struct supertype *st;
1528 char *subarray = NULL;
1529
1530 int frozen;
1531 int changed = 0;
1532 char *container = NULL;
1533 int cfd = -1;
1534
1535 struct mddev_dev *dv;
1536 int added_disks;
1537
1538 struct mdinfo info;
1539 struct mdinfo *sra;
1540
1541 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1542 pr_err("%s is not an active md array - aborting\n",
1543 devname);
1544 return 1;
1545 }
1546 if (data_offset != INVALID_SECTORS && array.level != 10
1547 && (array.level < 4 || array.level > 6)) {
1548 pr_err("--grow --data-offset not yet supported\n");
1549 return 1;
1550 }
1551
1552 if (s->size > 0 &&
1553 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1554 pr_err("cannot change component size at the same time "
1555 "as other changes.\n"
1556 " Change size first, then check data is intact before "
1557 "making other changes.\n");
1558 return 1;
1559 }
1560
1561 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1562 get_linux_version() < 2006032 &&
1563 !check_env("MDADM_FORCE_FEWER")) {
1564 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1565 " Please use a newer kernel\n");
1566 return 1;
1567 }
1568
1569 st = super_by_fd(fd, &subarray);
1570 if (!st) {
1571 pr_err("Unable to determine metadata format for %s\n", devname);
1572 return 1;
1573 }
1574 if (s->raiddisks > st->max_devs) {
1575 pr_err("Cannot increase raid-disks on this array"
1576 " beyond %d\n", st->max_devs);
1577 return 1;
1578 }
1579
1580 /* in the external case we need to check that the requested reshape is
1581 * supported, and perform an initial check that the container holds the
1582 * pre-requisite spare devices (mdmon owns final validation)
1583 */
1584 if (st->ss->external) {
1585 int rv;
1586
1587 if (subarray) {
1588 container = st->container_devnm;
1589 cfd = open_dev_excl(st->container_devnm);
1590 } else {
1591 container = st->devnm;
1592 close(fd);
1593 cfd = open_dev_excl(st->devnm);
1594 fd = cfd;
1595 }
1596 if (cfd < 0) {
1597 pr_err("Unable to open container for %s\n",
1598 devname);
1599 free(subarray);
1600 return 1;
1601 }
1602
1603 rv = st->ss->load_container(st, cfd, NULL);
1604
1605 if (rv) {
1606 pr_err("Cannot read superblock for %s\n",
1607 devname);
1608 free(subarray);
1609 return 1;
1610 }
1611
1612 /* check if operation is supported for metadata handler */
1613 if (st->ss->container_content) {
1614 struct mdinfo *cc = NULL;
1615 struct mdinfo *content = NULL;
1616
1617 cc = st->ss->container_content(st, subarray);
1618 for (content = cc; content ; content = content->next) {
1619 int allow_reshape = 1;
1620
1621 /* check if reshape is allowed based on metadata
1622 * indications stored in content.array.status
1623 */
1624 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1625 allow_reshape = 0;
1626 if (content->array.state
1627 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1628 allow_reshape = 0;
1629 if (!allow_reshape) {
1630 pr_err("cannot reshape arrays in"
1631 " container with unsupported"
1632 " metadata: %s(%s)\n",
1633 devname, container);
1634 sysfs_free(cc);
1635 free(subarray);
1636 return 1;
1637 }
1638 }
1639 sysfs_free(cc);
1640 }
1641 if (mdmon_running(container))
1642 st->update_tail = &st->updates;
1643 }
1644
1645 added_disks = 0;
1646 for (dv = devlist; dv; dv = dv->next)
1647 added_disks++;
1648 if (s->raiddisks > array.raid_disks &&
1649 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1650 !c->force) {
1651 pr_err("Need %d spare%s to avoid degraded array,"
1652 " and only have %d.\n"
1653 " Use --force to over-ride this check.\n",
1654 s->raiddisks - array.raid_disks,
1655 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1656 array.spare_disks + added_disks);
1657 return 1;
1658 }
1659
1660 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1661 | GET_STATE | GET_VERSION);
1662 if (sra) {
1663 if (st->ss->external && subarray == NULL) {
1664 array.level = LEVEL_CONTAINER;
1665 sra->array.level = LEVEL_CONTAINER;
1666 }
1667 } else {
1668 pr_err("failed to read sysfs parameters for %s\n",
1669 devname);
1670 return 1;
1671 }
1672 frozen = freeze(st);
1673 if (frozen < -1) {
1674 /* freeze() already spewed the reason */
1675 sysfs_free(sra);
1676 return 1;
1677 } else if (frozen < 0) {
1678 pr_err("%s is performing resync/recovery and cannot"
1679 " be reshaped\n", devname);
1680 sysfs_free(sra);
1681 return 1;
1682 }
1683
1684 /* ========= set size =============== */
1685 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1686 unsigned long long orig_size = get_component_size(fd)/2;
1687 unsigned long long min_csize;
1688 struct mdinfo *mdi;
1689 int raid0_takeover = 0;
1690
1691 if (orig_size == 0)
1692 orig_size = (unsigned) array.size;
1693
1694 if (orig_size == 0) {
1695 pr_err("Cannot set device size in this type of array.\n");
1696 rv = 1;
1697 goto release;
1698 }
1699
1700 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1701 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1702 rv = 1;
1703 goto release;
1704 }
1705 sync_metadata(st);
1706 if (st->ss->external) {
1707 /* metadata can have size limitation
1708 * update size value according to metadata information
1709 */
1710 struct mdinfo *sizeinfo =
1711 st->ss->container_content(st, subarray);
1712 if (sizeinfo) {
1713 unsigned long long new_size =
1714 sizeinfo->custom_array_size/2;
1715 int data_disks = get_data_disks(
1716 sizeinfo->array.level,
1717 sizeinfo->array.layout,
1718 sizeinfo->array.raid_disks);
1719 new_size /= data_disks;
1720 dprintf("Metadata size correction from %llu to "
1721 "%llu (%llu)\n", orig_size, new_size,
1722 new_size * data_disks);
1723 s->size = new_size;
1724 sysfs_free(sizeinfo);
1725 }
1726 }
1727
1728 /* Update the size of each member device in case
1729 * they have been resized. This will never reduce
1730 * below the current used-size. The "size" attribute
1731 * understands '0' to mean 'max'.
1732 */
1733 min_csize = 0;
1734 rv = 0;
1735 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1736 if (sysfs_set_num(sra, mdi, "size",
1737 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1738 /* Probably kernel refusing to let us
1739 * reduce the size - not an error.
1740 */
1741 break;
1742 }
1743 if (array.not_persistent == 0 &&
1744 array.major_version == 0 &&
1745 get_linux_version() < 3001000) {
1746 /* Dangerous to allow size to exceed 2TB */
1747 unsigned long long csize;
1748 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1749 if (csize >= 2ULL*1024*1024*1024)
1750 csize = 2ULL*1024*1024*1024;
1751 if ((min_csize == 0 || (min_csize
1752 > csize)))
1753 min_csize = csize;
1754 }
1755 }
1756 }
1757 if (rv) {
1758 pr_err("Cannot set size on "
1759 "array members.\n");
1760 goto size_change_error;
1761 }
1762 if (min_csize && s->size > min_csize) {
1763 pr_err("Cannot safely make this array "
1764 "use more than 2TB per device on this kernel.\n");
1765 rv = 1;
1766 goto size_change_error;
1767 }
1768 if (min_csize && s->size == MAX_SIZE) {
1769 /* Don't let the kernel choose a size - it will get
1770 * it wrong
1771 */
1772 pr_err("Limited v0.90 array to "
1773 "2TB per device\n");
1774 s->size = min_csize;
1775 }
1776 if (st->ss->external) {
1777 if (sra->array.level == 0) {
1778 rv = sysfs_set_str(sra, NULL, "level",
1779 "raid5");
1780 if (!rv) {
1781 raid0_takeover = 1;
1782 /* get array parametes after takeover
1783 * to chane one parameter at time only
1784 */
1785 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1786 }
1787 }
1788 /* make sure mdmon is
1789 * aware of the new level */
1790 if (!mdmon_running(st->container_devnm))
1791 start_mdmon(st->container_devnm);
1792 ping_monitor(container);
1793 if (mdmon_running(st->container_devnm) &&
1794 st->update_tail == NULL)
1795 st->update_tail = &st->updates;
1796 }
1797
1798 if (s->size == MAX_SIZE)
1799 s->size = 0;
1800 array.size = s->size;
1801 if ((unsigned)array.size != s->size) {
1802 /* got truncated to 32bit, write to
1803 * component_size instead
1804 */
1805 if (sra)
1806 rv = sysfs_set_num(sra, NULL,
1807 "component_size", s->size);
1808 else
1809 rv = -1;
1810 } else {
1811 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1812
1813 /* manage array size when it is managed externally
1814 */
1815 if ((rv == 0) && st->ss->external)
1816 rv = set_array_size(st, sra, sra->text_version);
1817 }
1818
1819 if (raid0_takeover) {
1820 /* do not recync non-existing parity,
1821 * we will drop it anyway
1822 */
1823 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1824 /* go back to raid0, drop parity disk
1825 */
1826 sysfs_set_str(sra, NULL, "level", "raid0");
1827 ioctl(fd, GET_ARRAY_INFO, &array);
1828 }
1829
1830 size_change_error:
1831 if (rv != 0) {
1832 int err = errno;
1833
1834 /* restore metadata */
1835 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1836 UnSet, NULL, devname,
1837 ROLLBACK_METADATA_CHANGES,
1838 c->verbose) == 0)
1839 sync_metadata(st);
1840 pr_err("Cannot set device size for %s: %s\n",
1841 devname, strerror(err));
1842 if (err == EBUSY &&
1843 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1844 cont_err("Bitmap must be removed before size can be changed\n");
1845 rv = 1;
1846 goto release;
1847 }
1848 if (s->assume_clean) {
1849 /* This will fail on kernels older than 3.0 unless
1850 * a backport has been arranged.
1851 */
1852 if (sra == NULL ||
1853 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1854 pr_err("--assume-clean not supported with --grow on this kernel\n");
1855 }
1856 ioctl(fd, GET_ARRAY_INFO, &array);
1857 s->size = get_component_size(fd)/2;
1858 if (s->size == 0)
1859 s->size = array.size;
1860 if (c->verbose >= 0) {
1861 if (s->size == orig_size)
1862 pr_err("component size of %s "
1863 "unchanged at %lluK\n",
1864 devname, s->size);
1865 else
1866 pr_err("component size of %s "
1867 "has been set to %lluK\n",
1868 devname, s->size);
1869 }
1870 changed = 1;
1871 } else if (array.level != LEVEL_CONTAINER) {
1872 s->size = get_component_size(fd)/2;
1873 if (s->size == 0)
1874 s->size = array.size;
1875 }
1876
1877 /* See if there is anything else to do */
1878 if ((s->level == UnSet || s->level == array.level) &&
1879 (s->layout_str == NULL) &&
1880 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1881 data_offset == INVALID_SECTORS &&
1882 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1883 /* Nothing more to do */
1884 if (!changed && c->verbose >= 0)
1885 pr_err("%s: no change requested\n",
1886 devname);
1887 goto release;
1888 }
1889
1890 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1891 * current implementation assumes that following conditions must be met:
1892 * - RAID10:
1893 * - far_copies == 1
1894 * - near_copies == 2
1895 */
1896 if ((s->level == 0 && array.level == 10 && sra &&
1897 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1898 (s->level == 0 && array.level == 1 && sra)) {
1899 int err;
1900 err = remove_disks_for_takeover(st, sra, array.layout);
1901 if (err) {
1902 dprintf(Name": Array cannot be reshaped\n");
1903 if (cfd > -1)
1904 close(cfd);
1905 rv = 1;
1906 goto release;
1907 }
1908 /* Make sure mdmon has seen the device removal
1909 * and updated metadata before we continue with
1910 * level change
1911 */
1912 if (container)
1913 ping_monitor(container);
1914 }
1915
1916 memset(&info, 0, sizeof(info));
1917 info.array = array;
1918 sysfs_init(&info, fd, NULL);
1919 strcpy(info.text_version, sra->text_version);
1920 info.component_size = s->size*2;
1921 info.new_level = s->level;
1922 info.new_chunk = s->chunk * 1024;
1923 if (info.array.level == LEVEL_CONTAINER) {
1924 info.delta_disks = UnSet;
1925 info.array.raid_disks = s->raiddisks;
1926 } else if (s->raiddisks)
1927 info.delta_disks = s->raiddisks - info.array.raid_disks;
1928 else
1929 info.delta_disks = UnSet;
1930 if (s->layout_str == NULL) {
1931 info.new_layout = UnSet;
1932 if (info.array.level == 6 &&
1933 (info.new_level == 6 || info.new_level == UnSet) &&
1934 info.array.layout >= 16) {
1935 pr_err("%s has a non-standard layout. If you"
1936 " wish to preserve this\n", devname);
1937 cont_err("during the reshape, please specify"
1938 " --layout=preserve\n");
1939 cont_err("If you want to change it, specify a"
1940 " layout or use --layout=normalise\n");
1941 rv = 1;
1942 goto release;
1943 }
1944 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1945 strcmp(s->layout_str, "normalize") == 0) {
1946 /* If we have a -6 RAID6 layout, remove the '-6'. */
1947 info.new_layout = UnSet;
1948 if (info.array.level == 6 && info.new_level == UnSet) {
1949 char l[40], *h;
1950 strcpy(l, map_num(r6layout, info.array.layout));
1951 h = strrchr(l, '-');
1952 if (h && strcmp(h, "-6") == 0) {
1953 *h = 0;
1954 info.new_layout = map_name(r6layout, l);
1955 }
1956 } else {
1957 pr_err("%s is only meaningful when reshaping"
1958 " a RAID6 array.\n", s->layout_str);
1959 rv = 1;
1960 goto release;
1961 }
1962 } else if (strcmp(s->layout_str, "preserve") == 0) {
1963 /* This means that a non-standard RAID6 layout
1964 * is OK.
1965 * In particular:
1966 * - When reshape a RAID6 (e.g. adding a device)
1967 * which is in a non-standard layout, it is OK
1968 * to preserve that layout.
1969 * - When converting a RAID5 to RAID6, leave it in
1970 * the XXX-6 layout, don't re-layout.
1971 */
1972 if (info.array.level == 6 && info.new_level == UnSet)
1973 info.new_layout = info.array.layout;
1974 else if (info.array.level == 5 && info.new_level == 6) {
1975 char l[40];
1976 strcpy(l, map_num(r5layout, info.array.layout));
1977 strcat(l, "-6");
1978 info.new_layout = map_name(r6layout, l);
1979 } else {
1980 pr_err("%s in only meaningful when reshaping"
1981 " to RAID6\n", s->layout_str);
1982 rv = 1;
1983 goto release;
1984 }
1985 } else {
1986 int l = info.new_level;
1987 if (l == UnSet)
1988 l = info.array.level;
1989 switch (l) {
1990 case 5:
1991 info.new_layout = map_name(r5layout, s->layout_str);
1992 break;
1993 case 6:
1994 info.new_layout = map_name(r6layout, s->layout_str);
1995 break;
1996 case 10:
1997 info.new_layout = parse_layout_10(s->layout_str);
1998 break;
1999 case LEVEL_FAULTY:
2000 info.new_layout = parse_layout_faulty(s->layout_str);
2001 break;
2002 default:
2003 pr_err("layout not meaningful"
2004 " with this level\n");
2005 rv = 1;
2006 goto release;
2007 }
2008 if (info.new_layout == UnSet) {
2009 pr_err("layout %s not understood"
2010 " for this level\n",
2011 s->layout_str);
2012 rv = 1;
2013 goto release;
2014 }
2015 }
2016
2017 if (array.level == LEVEL_FAULTY) {
2018 if (s->level != UnSet && s->level != array.level) {
2019 pr_err("cannot change level of Faulty device\n");
2020 rv =1 ;
2021 }
2022 if (s->chunk) {
2023 pr_err("cannot set chunksize of Faulty device\n");
2024 rv =1 ;
2025 }
2026 if (s->raiddisks && s->raiddisks != 1) {
2027 pr_err("cannot set raid_disks of Faulty device\n");
2028 rv =1 ;
2029 }
2030 if (s->layout_str) {
2031 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2032 dprintf("Cannot get array information.\n");
2033 goto release;
2034 }
2035 array.layout = info.new_layout;
2036 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2037 pr_err("failed to set new layout\n");
2038 rv = 1;
2039 } else if (c->verbose >= 0)
2040 printf("layout for %s set to %d\n",
2041 devname, array.layout);
2042 }
2043 } else if (array.level == LEVEL_CONTAINER) {
2044 /* This change is to be applied to every array in the
2045 * container. This is only needed when the metadata imposes
2046 * restraints of the various arrays in the container.
2047 * Currently we only know that IMSM requires all arrays
2048 * to have the same number of devices so changing the
2049 * number of devices (On-Line Capacity Expansion) must be
2050 * performed at the level of the container
2051 */
2052 rv = reshape_container(container, devname, -1, st, &info,
2053 c->force, c->backup_file, c->verbose, 0, 0);
2054 frozen = 0;
2055 } else {
2056 /* get spare devices from external metadata
2057 */
2058 if (st->ss->external) {
2059 struct mdinfo *info2;
2060
2061 info2 = st->ss->container_content(st, subarray);
2062 if (info2) {
2063 info.array.spare_disks =
2064 info2->array.spare_disks;
2065 sysfs_free(info2);
2066 }
2067 }
2068
2069 /* Impose these changes on a single array. First
2070 * check that the metadata is OK with the change. */
2071
2072 if (reshape_super(st, 0, info.new_level,
2073 info.new_layout, info.new_chunk,
2074 info.array.raid_disks, info.delta_disks,
2075 c->backup_file, devname, APPLY_METADATA_CHANGES,
2076 c->verbose)) {
2077 rv = 1;
2078 goto release;
2079 }
2080 sync_metadata(st);
2081 rv = reshape_array(container, fd, devname, st, &info, c->force,
2082 devlist, data_offset, c->backup_file, c->verbose,
2083 0, 0, 0);
2084 frozen = 0;
2085 }
2086 release:
2087 sysfs_free(sra);
2088 if (frozen > 0)
2089 unfreeze(st);
2090 return rv;
2091 }
2092
2093 /* verify_reshape_position()
2094 * Function checks if reshape position in metadata is not farther
2095 * than position in md.
2096 * Return value:
2097 * 0 : not valid sysfs entry
2098 * it can be caused by not started reshape, it should be started
2099 * by reshape array or raid0 array is before takeover
2100 * -1 : error, reshape position is obviously wrong
2101 * 1 : success, reshape progress correct or updated
2102 */
2103 static int verify_reshape_position(struct mdinfo *info, int level)
2104 {
2105 int ret_val = 0;
2106 char buf[40];
2107 int rv;
2108
2109 /* read sync_max, failure can mean raid0 array */
2110 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2111
2112 if (rv > 0) {
2113 char *ep;
2114 unsigned long long position = strtoull(buf, &ep, 0);
2115
2116 dprintf(Name": Read sync_max sysfs entry is: %s\n", buf);
2117 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2118 position *= get_data_disks(level,
2119 info->new_layout,
2120 info->array.raid_disks);
2121 if (info->reshape_progress < position) {
2122 dprintf("Corrected reshape progress (%llu) to "
2123 "md position (%llu)\n",
2124 info->reshape_progress, position);
2125 info->reshape_progress = position;
2126 ret_val = 1;
2127 } else if (info->reshape_progress > position) {
2128 pr_err("Fatal error: array "
2129 "reshape was not properly frozen "
2130 "(expected reshape position is %llu, "
2131 "but reshape progress is %llu.\n",
2132 position, info->reshape_progress);
2133 ret_val = -1;
2134 } else {
2135 dprintf("Reshape position in md and metadata "
2136 "are the same;");
2137 ret_val = 1;
2138 }
2139 }
2140 } else if (rv == 0) {
2141 /* for valid sysfs entry, 0-length content
2142 * should be indicated as error
2143 */
2144 ret_val = -1;
2145 }
2146
2147 return ret_val;
2148 }
2149
2150 static unsigned long long choose_offset(unsigned long long lo,
2151 unsigned long long hi,
2152 unsigned long long min,
2153 unsigned long long max)
2154 {
2155 /* Choose a new offset between hi and lo.
2156 * It must be between min and max, but
2157 * we would prefer something near the middle of hi/lo, and also
2158 * prefer to be aligned to a big power of 2.
2159 *
2160 * So we start with the middle, then for each bit,
2161 * starting at '1' and increasing, if it is set, we either
2162 * add it or subtract it if possible, preferring the option
2163 * which is furthest from the boundary.
2164 *
2165 * We stop once we get a 1MB alignment. As units are in sectors,
2166 * 1MB = 2*1024 sectors.
2167 */
2168 unsigned long long choice = (lo + hi) / 2;
2169 unsigned long long bit = 1;
2170
2171 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2172 unsigned long long bigger, smaller;
2173 if (! (bit & choice))
2174 continue;
2175 bigger = choice + bit;
2176 smaller = choice - bit;
2177 if (bigger > max && smaller < min)
2178 break;
2179 if (bigger > max)
2180 choice = smaller;
2181 else if (smaller < min)
2182 choice = bigger;
2183 else if (hi - bigger > smaller - lo)
2184 choice = bigger;
2185 else
2186 choice = smaller;
2187 }
2188 return choice;
2189 }
2190
2191 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2192 char *devname, int delta_disks,
2193 unsigned long long data_offset,
2194 unsigned long long min)
2195 {
2196 struct mdinfo *sd;
2197 int dir = 0;
2198 int err = 0;
2199 unsigned long long before, after;
2200
2201 /* Need to find min space before and after so same is used
2202 * on all devices
2203 */
2204 before = UINT64_MAX;
2205 after = UINT64_MAX;
2206 for (sd = sra->devs; sd; sd = sd->next) {
2207 char *dn;
2208 int dfd;
2209 int rv;
2210 struct supertype *st2;
2211 struct mdinfo info2;
2212
2213 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2214 continue;
2215 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2216 dfd = dev_open(dn, O_RDONLY);
2217 if (dfd < 0) {
2218 pr_err("%s: cannot open component %s\n",
2219 devname, dn ? dn : "-unknown-");
2220 goto release;
2221 }
2222 st2 = dup_super(st);
2223 rv = st2->ss->load_super(st2,dfd, NULL);
2224 close(dfd);
2225 if (rv) {
2226 free(st2);
2227 pr_err("%s: cannot get superblock from %s\n",
2228 devname, dn);
2229 goto release;
2230 }
2231 st2->ss->getinfo_super(st2, &info2, NULL);
2232 st2->ss->free_super(st2);
2233 free(st2);
2234 if (info2.space_before == 0 &&
2235 info2.space_after == 0) {
2236 /* Metadata doesn't support data_offset changes */
2237 return 1;
2238 }
2239 if (before > info2.space_before)
2240 before = info2.space_before;
2241 if (after > info2.space_after)
2242 after = info2.space_after;
2243
2244 if (data_offset != INVALID_SECTORS) {
2245 if (dir == 0) {
2246 if (info2.data_offset == data_offset) {
2247 pr_err("%s: already has that data_offset\n",
2248 dn);
2249 goto release;
2250 }
2251 if (data_offset < info2.data_offset)
2252 dir = -1;
2253 else
2254 dir = 1;
2255 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2256 (data_offset >= info2.data_offset && dir == -1)) {
2257 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2258 dn);
2259 goto release;
2260 }
2261 }
2262 }
2263 if (before == UINT64_MAX)
2264 /* impossible really, there must be no devices */
2265 return 1;
2266
2267 for (sd = sra->devs; sd; sd = sd->next) {
2268 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2269 unsigned long long new_data_offset;
2270
2271 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2272 continue;
2273 if (delta_disks < 0) {
2274 /* Don't need any space as array is shrinking
2275 * just move data_offset up by min
2276 */
2277 if (data_offset == INVALID_SECTORS)
2278 new_data_offset = sd->data_offset + min;
2279 else {
2280 if (data_offset < sd->data_offset + min) {
2281 pr_err("--data-offset too small for %s\n",
2282 dn);
2283 goto release;
2284 }
2285 new_data_offset = data_offset;
2286 }
2287 } else if (delta_disks > 0) {
2288 /* need space before */
2289 if (before < min) {
2290 pr_err("Insufficient head-space for reshape on %s\n",
2291 dn);
2292 goto release;
2293 }
2294 if (data_offset == INVALID_SECTORS)
2295 new_data_offset = sd->data_offset - min;
2296 else {
2297 if (data_offset > sd->data_offset - min) {
2298 pr_err("--data-offset too large for %s\n",
2299 dn);
2300 goto release;
2301 }
2302 new_data_offset = data_offset;
2303 }
2304 } else {
2305 if (dir == 0) {
2306 /* can move up or down. If 'data_offset'
2307 * was set we would have already decided,
2308 * so just choose direction with most space.
2309 */
2310 if (before > after)
2311 dir = -1;
2312 else
2313 dir = 1;
2314 }
2315 sysfs_set_str(sra, NULL, "reshape_direction",
2316 dir == 1 ? "backwards" : "forwards");
2317 if (dir > 0) {
2318 /* Increase data offset */
2319 if (after < min) {
2320 pr_err("Insufficient tail-space for reshape on %s\n",
2321 dn);
2322 goto release;
2323 }
2324 if (data_offset != INVALID_SECTORS &&
2325 data_offset < sd->data_offset + min) {
2326 pr_err("--data-offset too small on %s\n",
2327 dn);
2328 goto release;
2329 }
2330 if (data_offset != INVALID_SECTORS)
2331 new_data_offset = data_offset;
2332 else
2333 new_data_offset = choose_offset(sd->data_offset,
2334 sd->data_offset + after,
2335 sd->data_offset + min,
2336 sd->data_offset + after);
2337 } else {
2338 /* Decrease data offset */
2339 if (before < min) {
2340 pr_err("insufficient head-room on %s\n",
2341 dn);
2342 goto release;
2343 }
2344 if (data_offset != INVALID_SECTORS &&
2345 data_offset < sd->data_offset - min) {
2346 pr_err("--data-offset too small on %s\n",
2347 dn);
2348 goto release;
2349 }
2350 if (data_offset != INVALID_SECTORS)
2351 new_data_offset = data_offset;
2352 else
2353 new_data_offset = choose_offset(sd->data_offset - before,
2354 sd->data_offset,
2355 sd->data_offset - before,
2356 sd->data_offset - min);
2357 }
2358 }
2359 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2360 if (err < 0 && errno == E2BIG) {
2361 /* try again after increasing data size to max */
2362 err = sysfs_set_num(sra, sd, "size", 0);
2363 if (err < 0 && errno == EINVAL &&
2364 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2365 /* some kernels have a bug where you cannot
2366 * use '0' on spare devices. */
2367 sysfs_set_num(sra, sd, "size",
2368 (sra->component_size + after)/2);
2369 }
2370 err = sysfs_set_num(sra, sd, "new_offset",
2371 new_data_offset);
2372 }
2373 if (err < 0) {
2374 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2375 pr_err("data-offset is too big for %s\n",
2376 dn);
2377 goto release;
2378 }
2379 if (sd == sra->devs &&
2380 (errno == ENOENT || errno == E2BIG))
2381 /* Early kernel, no 'new_offset' file,
2382 * or kernel doesn't like us.
2383 * For RAID5/6 this is not fatal
2384 */
2385 return 1;
2386 pr_err("Cannot set new_offset for %s\n",
2387 dn);
2388 break;
2389 }
2390 }
2391 return err;
2392 release:
2393 return -1;
2394 }
2395
2396 static int raid10_reshape(char *container, int fd, char *devname,
2397 struct supertype *st, struct mdinfo *info,
2398 struct reshape *reshape,
2399 unsigned long long data_offset,
2400 int force, int verbose)
2401 {
2402 /* Changing raid_disks, layout, chunksize or possibly
2403 * just data_offset for a RAID10.
2404 * We must always change data_offset. We change by at least
2405 * ->min_offset_change which is the largest of the old and new
2406 * chunk sizes.
2407 * If raid_disks is increasing, then data_offset must decrease
2408 * by at least this copy size.
2409 * If raid_disks is unchanged, data_offset must increase or
2410 * decrease by at least min_offset_change but preferably by much more.
2411 * We choose half of the available space.
2412 * If raid_disks is decreasing, data_offset must increase by
2413 * at least min_offset_change. To allow of this, component_size
2414 * must be decreased by the same amount.
2415 *
2416 * So we calculate the required minimum and direction, possibly
2417 * reduce the component_size, then iterate through the devices
2418 * and set the new_data_offset.
2419 * If that all works, we set chunk_size, layout, raid_disks, and start
2420 * 'reshape'
2421 */
2422 struct mdinfo *sra;
2423 unsigned long long min;
2424 int err = 0;
2425
2426 sra = sysfs_read(fd, NULL,
2427 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2428 );
2429 if (!sra) {
2430 pr_err("%s: Cannot get array details from sysfs\n",
2431 devname);
2432 goto release;
2433 }
2434 min = reshape->min_offset_change;
2435
2436 if (info->delta_disks)
2437 sysfs_set_str(sra, NULL, "reshape_direction",
2438 info->delta_disks < 0 ? "backwards" : "forwards");
2439 if (info->delta_disks < 0 &&
2440 info->space_after < min) {
2441 int rv = sysfs_set_num(sra, NULL, "component_size",
2442 (sra->component_size -
2443 min)/2);
2444 if (rv) {
2445 pr_err("cannot reduce component size\n");
2446 goto release;
2447 }
2448 }
2449 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2450 min);
2451 if (err == 1) {
2452 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2453 cont_err("supported on this kernel\n");
2454 err = -1;
2455 }
2456 if (err < 0)
2457 goto release;
2458
2459 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2460 err = errno;
2461 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2462 err = errno;
2463 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2464 info->array.raid_disks + info->delta_disks) < 0)
2465 err = errno;
2466 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2467 err = errno;
2468 if (err) {
2469 pr_err("Cannot set array shape for %s\n",
2470 devname);
2471 if (err == EBUSY &&
2472 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2473 cont_err(" Bitmap must be removed before"
2474 " shape can be changed\n");
2475 goto release;
2476 }
2477 sysfs_free(sra);
2478 return 0;
2479 release:
2480 sysfs_free(sra);
2481 return 1;
2482 }
2483
2484 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2485 {
2486 struct mdinfo *sra, *sd;
2487 /* Initialisation to silence compiler warning */
2488 unsigned long long min_space_before = 0, min_space_after = 0;
2489 int first = 1;
2490
2491 sra = sysfs_read(fd, NULL, GET_DEVS);
2492 if (!sra)
2493 return;
2494 for (sd = sra->devs; sd; sd = sd->next) {
2495 char *dn;
2496 int dfd;
2497 struct supertype *st2;
2498 struct mdinfo info2;
2499
2500 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2501 continue;
2502 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2503 dfd = dev_open(dn, O_RDONLY);
2504 if (dfd < 0)
2505 break;
2506 st2 = dup_super(st);
2507 if (st2->ss->load_super(st2,dfd, NULL)) {
2508 close(dfd);
2509 free(st2);
2510 break;
2511 }
2512 close(dfd);
2513 st2->ss->getinfo_super(st2, &info2, NULL);
2514 st2->ss->free_super(st2);
2515 free(st2);
2516 if (first ||
2517 min_space_before > info2.space_before)
2518 min_space_before = info2.space_before;
2519 if (first ||
2520 min_space_after > info2.space_after)
2521 min_space_after = info2.space_after;
2522 first = 0;
2523 }
2524 if (sd == NULL && !first) {
2525 info->space_after = min_space_after;
2526 info->space_before = min_space_before;
2527 }
2528 sysfs_free(sra);
2529 }
2530
2531 static void update_cache_size(char *container, struct mdinfo *sra,
2532 struct mdinfo *info,
2533 int disks, unsigned long long blocks)
2534 {
2535 /* Check that the internal stripe cache is
2536 * large enough, or it won't work.
2537 * It must hold at least 4 stripes of the larger
2538 * chunk size
2539 */
2540 unsigned long cache;
2541 cache = max(info->array.chunk_size, info->new_chunk);
2542 cache *= 4; /* 4 stripes minimum */
2543 cache /= 512; /* convert to sectors */
2544 /* make sure there is room for 'blocks' with a bit to spare */
2545 if (cache < 16 + blocks / disks)
2546 cache = 16 + blocks / disks;
2547 cache /= (4096/512); /* Covert from sectors to pages */
2548
2549 if (sra->cache_size < cache)
2550 subarray_set_num(container, sra, "stripe_cache_size",
2551 cache+1);
2552 }
2553
2554 static int impose_reshape(struct mdinfo *sra,
2555 struct mdinfo *info,
2556 struct supertype *st,
2557 int fd,
2558 int restart,
2559 char *devname, char *container,
2560 struct reshape *reshape)
2561 {
2562 struct mdu_array_info_s array;
2563
2564 sra->new_chunk = info->new_chunk;
2565
2566 if (restart) {
2567 /* for external metadata checkpoint saved by mdmon can be lost
2568 * or missed /due to e.g. crash/. Check if md is not during
2569 * restart farther than metadata points to.
2570 * If so, this means metadata information is obsolete.
2571 */
2572 if (st->ss->external)
2573 verify_reshape_position(info, reshape->level);
2574 sra->reshape_progress = info->reshape_progress;
2575 } else {
2576 sra->reshape_progress = 0;
2577 if (reshape->after.data_disks < reshape->before.data_disks)
2578 /* start from the end of the new array */
2579 sra->reshape_progress = (sra->component_size
2580 * reshape->after.data_disks);
2581 }
2582
2583 ioctl(fd, GET_ARRAY_INFO, &array);
2584 if (info->array.chunk_size == info->new_chunk &&
2585 reshape->before.layout == reshape->after.layout &&
2586 st->ss->external == 0) {
2587 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2588 array.raid_disks = reshape->after.data_disks + reshape->parity;
2589 if (!restart &&
2590 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2591 int err = errno;
2592
2593 pr_err("Cannot set device shape for %s: %s\n",
2594 devname, strerror(errno));
2595
2596 if (err == EBUSY &&
2597 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2598 cont_err("Bitmap must be removed before"
2599 " shape can be changed\n");
2600
2601 goto release;
2602 }
2603 } else if (!restart) {
2604 /* set them all just in case some old 'new_*' value
2605 * persists from some earlier problem.
2606 */
2607 int err = 0;
2608 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2609 err = errno;
2610 if (!err && sysfs_set_num(sra, NULL, "layout",
2611 reshape->after.layout) < 0)
2612 err = errno;
2613 if (!err && subarray_set_num(container, sra, "raid_disks",
2614 reshape->after.data_disks +
2615 reshape->parity) < 0)
2616 err = errno;
2617 if (err) {
2618 pr_err("Cannot set device shape for %s\n",
2619 devname);
2620
2621 if (err == EBUSY &&
2622 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2623 cont_err("Bitmap must be removed before"
2624 " shape can be changed\n");
2625 goto release;
2626 }
2627 }
2628 return 0;
2629 release:
2630 return -1;
2631 }
2632
2633 static int impose_level(int fd, int level, char *devname, int verbose)
2634 {
2635 char *c;
2636 struct mdu_array_info_s array;
2637 struct mdinfo info;
2638 sysfs_init(&info, fd, NULL);
2639
2640 ioctl(fd, GET_ARRAY_INFO, &array);
2641 if (level == 0 &&
2642 (array.level >= 4 && array.level <= 6)) {
2643 /* To convert to RAID0 we need to fail and
2644 * remove any non-data devices. */
2645 int found = 0;
2646 int d;
2647 int data_disks = array.raid_disks - 1;
2648 if (array.level == 6)
2649 data_disks -= 1;
2650 if (array.level == 5 &&
2651 array.layout != ALGORITHM_PARITY_N)
2652 return -1;
2653 if (array.level == 6 &&
2654 array.layout != ALGORITHM_PARITY_N_6)
2655 return -1;
2656 sysfs_set_str(&info, NULL,"sync_action", "idle");
2657 /* First remove any spares so no recovery starts */
2658 for (d = 0, found = 0;
2659 d < MAX_DISKS && found < array.nr_disks;
2660 d++) {
2661 mdu_disk_info_t disk;
2662 disk.number = d;
2663 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2664 continue;
2665 if (disk.major == 0 && disk.minor == 0)
2666 continue;
2667 found++;
2668 if ((disk.state & (1 << MD_DISK_ACTIVE))
2669 && disk.raid_disk < data_disks)
2670 /* keep this */
2671 continue;
2672 ioctl(fd, HOT_REMOVE_DISK,
2673 makedev(disk.major, disk.minor));
2674 }
2675 /* Now fail anything left */
2676 ioctl(fd, GET_ARRAY_INFO, &array);
2677 for (d = 0, found = 0;
2678 d < MAX_DISKS && found < array.nr_disks;
2679 d++) {
2680 int cnt;
2681 mdu_disk_info_t disk;
2682 disk.number = d;
2683 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2684 continue;
2685 if (disk.major == 0 && disk.minor == 0)
2686 continue;
2687 found++;
2688 if ((disk.state & (1 << MD_DISK_ACTIVE))
2689 && disk.raid_disk < data_disks)
2690 /* keep this */
2691 continue;
2692 ioctl(fd, SET_DISK_FAULTY,
2693 makedev(disk.major, disk.minor));
2694 cnt = 5;
2695 while (ioctl(fd, HOT_REMOVE_DISK,
2696 makedev(disk.major, disk.minor)) < 0
2697 && errno == EBUSY
2698 && cnt--) {
2699 usleep(10000);
2700 }
2701 }
2702 }
2703 c = map_num(pers, level);
2704 if (c) {
2705 int err = sysfs_set_str(&info, NULL, "level", c);
2706 if (err) {
2707 err = errno;
2708 pr_err("%s: could not set level to %s\n",
2709 devname, c);
2710 if (err == EBUSY &&
2711 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2712 cont_err("Bitmap must be removed"
2713 " before level can be changed\n");
2714 return err;
2715 }
2716 if (verbose >= 0)
2717 pr_err("level of %s changed to %s\n",
2718 devname, c);
2719 }
2720 return 0;
2721 }
2722
2723 static int reshape_array(char *container, int fd, char *devname,
2724 struct supertype *st, struct mdinfo *info,
2725 int force, struct mddev_dev *devlist,
2726 unsigned long long data_offset,
2727 char *backup_file, int verbose, int forked,
2728 int restart, int freeze_reshape)
2729 {
2730 struct reshape reshape;
2731 int spares_needed;
2732 char *msg;
2733 int orig_level = UnSet;
2734 int odisks;
2735 int delayed;
2736
2737 struct mdu_array_info_s array;
2738 char *c;
2739
2740 struct mddev_dev *dv;
2741 int added_disks;
2742
2743 int *fdlist = NULL;
2744 unsigned long long *offsets = NULL;
2745 int d;
2746 int nrdisks;
2747 int err;
2748 unsigned long blocks;
2749 unsigned long long array_size;
2750 int done;
2751 struct mdinfo *sra = NULL;
2752
2753 /* when reshaping a RAID0, the component_size might be zero.
2754 * So try to fix that up.
2755 */
2756 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2757 dprintf("Cannot get array information.\n");
2758 goto release;
2759 }
2760 if (array.level == 0 && info->component_size == 0) {
2761 get_dev_size(fd, NULL, &array_size);
2762 info->component_size = array_size / array.raid_disks;
2763 }
2764
2765 if (array.level == 10)
2766 /* Need space_after info */
2767 get_space_after(fd, st, info);
2768
2769 if (info->reshape_active) {
2770 int new_level = info->new_level;
2771 info->new_level = UnSet;
2772 if (info->delta_disks > 0)
2773 info->array.raid_disks -= info->delta_disks;
2774 msg = analyse_change(info, &reshape);
2775 info->new_level = new_level;
2776 if (info->delta_disks > 0)
2777 info->array.raid_disks += info->delta_disks;
2778 if (!restart)
2779 /* Make sure the array isn't read-only */
2780 ioctl(fd, RESTART_ARRAY_RW, 0);
2781 } else
2782 msg = analyse_change(info, &reshape);
2783 if (msg) {
2784 pr_err("%s\n", msg);
2785 goto release;
2786 }
2787 if (restart &&
2788 (reshape.level != info->array.level ||
2789 reshape.before.layout != info->array.layout ||
2790 reshape.before.data_disks + reshape.parity
2791 != info->array.raid_disks - max(0, info->delta_disks))) {
2792 pr_err("reshape info is not in native format -"
2793 " cannot continue.\n");
2794 goto release;
2795 }
2796
2797 if (st->ss->external && restart && (info->reshape_progress == 0)) {
2798 /* When reshape is restarted from '0', very begin of array
2799 * it is possible that for external metadata reshape and array
2800 * configuration doesn't happen.
2801 * Check if md has the same opinion, and reshape is restarted
2802 * from 0. If so, this is regular reshape start after reshape
2803 * switch in metadata to next array only.
2804 */
2805 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2806 (info->reshape_progress == 0))
2807 restart = 0;
2808 }
2809 if (restart) {
2810 /* reshape already started. just skip to monitoring the reshape */
2811 if (reshape.backup_blocks == 0)
2812 return 0;
2813 if (restart & RESHAPE_NO_BACKUP)
2814 return 0;
2815 goto started;
2816 }
2817 /* The container is frozen but the array may not be.
2818 * So freeze the array so spares don't get put to the wrong use
2819 * FIXME there should probably be a cleaner separation between
2820 * freeze_array and freeze_container.
2821 */
2822 sysfs_freeze_array(info);
2823 /* Check we have enough spares to not be degraded */
2824 added_disks = 0;
2825 for (dv = devlist; dv ; dv=dv->next)
2826 added_disks++;
2827 spares_needed = max(reshape.before.data_disks,
2828 reshape.after.data_disks)
2829 + reshape.parity - array.raid_disks;
2830
2831 if (!force &&
2832 info->new_level > 1 && info->array.level > 1 &&
2833 spares_needed > info->array.spare_disks + added_disks) {
2834 pr_err("Need %d spare%s to avoid degraded array,"
2835 " and only have %d.\n"
2836 " Use --force to over-ride this check.\n",
2837 spares_needed,
2838 spares_needed == 1 ? "" : "s",
2839 info->array.spare_disks + added_disks);
2840 goto release;
2841 }
2842 /* Check we have enough spares to not fail */
2843 spares_needed = max(reshape.before.data_disks,
2844 reshape.after.data_disks)
2845 - array.raid_disks;
2846 if ((info->new_level > 1 || info->new_level == 0) &&
2847 spares_needed > info->array.spare_disks +added_disks) {
2848 pr_err("Need %d spare%s to create working array,"
2849 " and only have %d.\n",
2850 spares_needed,
2851 spares_needed == 1 ? "" : "s",
2852 info->array.spare_disks + added_disks);
2853 goto release;
2854 }
2855
2856 if (reshape.level != array.level) {
2857 int err = impose_level(fd, reshape.level, devname, verbose);
2858 if (err)
2859 goto release;
2860 info->new_layout = UnSet; /* after level change,
2861 * layout is meaningless */
2862 orig_level = array.level;
2863 sysfs_freeze_array(info);
2864
2865 if (reshape.level > 0 && st->ss->external) {
2866 /* make sure mdmon is aware of the new level */
2867 if (mdmon_running(container))
2868 flush_mdmon(container);
2869
2870 if (!mdmon_running(container))
2871 start_mdmon(container);
2872 ping_monitor(container);
2873 if (mdmon_running(container) &&
2874 st->update_tail == NULL)
2875 st->update_tail = &st->updates;
2876 }
2877 }
2878 /* ->reshape_super might have chosen some spares from the
2879 * container that it wants to be part of the new array.
2880 * We can collect them with ->container_content and give
2881 * them to the kernel.
2882 */
2883 if (st->ss->reshape_super && st->ss->container_content) {
2884 char *subarray = strchr(info->text_version+1, '/')+1;
2885 struct mdinfo *info2 =
2886 st->ss->container_content(st, subarray);
2887 struct mdinfo *d;
2888
2889 if (info2) {
2890 sysfs_init(info2, fd, st->devnm);
2891 /* When increasing number of devices, we need to set
2892 * new raid_disks before adding these, or they might
2893 * be rejected.
2894 */
2895 if (reshape.backup_blocks &&
2896 reshape.after.data_disks > reshape.before.data_disks)
2897 subarray_set_num(container, info2, "raid_disks",
2898 reshape.after.data_disks +
2899 reshape.parity);
2900 for (d = info2->devs; d; d = d->next) {
2901 if (d->disk.state == 0 &&
2902 d->disk.raid_disk >= 0) {
2903 /* This is a spare that wants to
2904 * be part of the array.
2905 */
2906 add_disk(fd, st, info2, d);
2907 }
2908 }
2909 sysfs_free(info2);
2910 }
2911 }
2912 /* We might have been given some devices to add to the
2913 * array. Now that the array has been changed to the right
2914 * level and frozen, we can safely add them.
2915 */
2916 if (devlist)
2917 Manage_subdevs(devname, fd, devlist, verbose,
2918 0,NULL, 0);
2919
2920 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
2921 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
2922 if (reshape.backup_blocks == 0) {
2923 /* No restriping needed, but we might need to impose
2924 * some more changes: layout, raid_disks, chunk_size
2925 */
2926 /* read current array info */
2927 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2928 dprintf("Cannot get array information.\n");
2929 goto release;
2930 }
2931 /* compare current array info with new values and if
2932 * it is different update them to new */
2933 if (info->new_layout != UnSet &&
2934 info->new_layout != array.layout) {
2935 array.layout = info->new_layout;
2936 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2937 pr_err("failed to set new layout\n");
2938 goto release;
2939 } else if (verbose >= 0)
2940 printf("layout for %s set to %d\n",
2941 devname, array.layout);
2942 }
2943 if (info->delta_disks != UnSet &&
2944 info->delta_disks != 0 &&
2945 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
2946 array.raid_disks += info->delta_disks;
2947 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2948 pr_err("failed to set raid disks\n");
2949 goto release;
2950 } else if (verbose >= 0) {
2951 printf("raid_disks for %s set to %d\n",
2952 devname, array.raid_disks);
2953 }
2954 }
2955 if (info->new_chunk != 0 &&
2956 info->new_chunk != array.chunk_size) {
2957 if (sysfs_set_num(info, NULL,
2958 "chunk_size", info->new_chunk) != 0) {
2959 pr_err("failed to set chunk size\n");
2960 goto release;
2961 } else if (verbose >= 0)
2962 printf("chunk size for %s set to %d\n",
2963 devname, array.chunk_size);
2964 }
2965 unfreeze(st);
2966 return 0;
2967 }
2968
2969 /*
2970 * There are three possibilities.
2971 * 1/ The array will shrink.
2972 * We need to ensure the reshape will pause before reaching
2973 * the 'critical section'. We also need to fork and wait for
2974 * that to happen. When it does we
2975 * suspend/backup/complete/unfreeze
2976 *
2977 * 2/ The array will not change size.
2978 * This requires that we keep a backup of a sliding window
2979 * so that we can restore data after a crash. So we need
2980 * to fork and monitor progress.
2981 * In future we will allow the data_offset to change, so
2982 * a sliding backup becomes unnecessary.
2983 *
2984 * 3/ The array will grow. This is relatively easy.
2985 * However the kernel's restripe routines will cheerfully
2986 * overwrite some early data before it is safe. So we
2987 * need to make a backup of the early parts of the array
2988 * and be ready to restore it if rebuild aborts very early.
2989 * For externally managed metadata, we still need a forked
2990 * child to monitor the reshape and suspend IO over the region
2991 * that is being reshaped.
2992 *
2993 * We backup data by writing it to one spare, or to a
2994 * file which was given on command line.
2995 *
2996 * In each case, we first make sure that storage is available
2997 * for the required backup.
2998 * Then we:
2999 * - request the shape change.
3000 * - fork to handle backup etc.
3001 */
3002 /* Check that we can hold all the data */
3003 get_dev_size(fd, NULL, &array_size);
3004 if (reshape.new_size < (array_size/512)) {
3005 pr_err("this change will reduce the size of the array.\n"
3006 " use --grow --array-size first to truncate array.\n"
3007 " e.g. mdadm --grow %s --array-size %llu\n",
3008 devname, reshape.new_size/2);
3009 goto release;
3010 }
3011
3012 if (array.level == 10) {
3013 /* Reshaping RAID10 does not require any data backup by
3014 * user-space. Instead it requires that the data_offset
3015 * is changed to avoid the need for backup.
3016 * So this is handled very separately
3017 */
3018 if (restart)
3019 /* Nothing to do. */
3020 return 0;
3021 return raid10_reshape(container, fd, devname, st, info,
3022 &reshape, data_offset,
3023 force, verbose);
3024 }
3025 sra = sysfs_read(fd, NULL,
3026 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3027 GET_CACHE);
3028 if (!sra) {
3029 pr_err("%s: Cannot get array details from sysfs\n",
3030 devname);
3031 goto release;
3032 }
3033
3034 if (!backup_file)
3035 switch(set_new_data_offset(sra, st, devname,
3036 reshape.after.data_disks - reshape.before.data_disks,
3037 data_offset,
3038 reshape.min_offset_change)) {
3039 case -1:
3040 goto release;
3041 case 0:
3042 /* Updated data_offset, so it's easy now */
3043 update_cache_size(container, sra, info,
3044 min(reshape.before.data_disks,
3045 reshape.after.data_disks),
3046 reshape.backup_blocks);
3047
3048 /* Right, everything seems fine. Let's kick things off.
3049 */
3050 sync_metadata(st);
3051
3052 if (impose_reshape(sra, info, st, fd, restart,
3053 devname, container, &reshape) < 0)
3054 goto release;
3055 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3056 pr_err("Failed to initiate reshape!\n");
3057 goto release;
3058 }
3059 if (info->new_level == reshape.level)
3060 return 0;
3061 /* need to adjust level when reshape completes */
3062 switch(fork()) {
3063 case -1: /* ignore error, but don't wait */
3064 return 0;
3065 default: /* parent */
3066 return 0;
3067 case 0:
3068 map_fork();
3069 break;
3070 }
3071 wait_reshape(sra);
3072 impose_level(fd, info->new_level, devname, verbose);
3073
3074 return 0;
3075 case 1: /* Couldn't set data_offset, try the old way */
3076 if (data_offset != INVALID_SECTORS) {
3077 pr_err("Cannot update data_offset on this array\n");
3078 goto release;
3079 }
3080 break;
3081 }
3082
3083 started:
3084 /* Decide how many blocks (sectors) for a reshape
3085 * unit. The number we have so far is just a minimum
3086 */
3087 blocks = reshape.backup_blocks;
3088 if (reshape.before.data_disks ==
3089 reshape.after.data_disks) {
3090 /* Make 'blocks' bigger for better throughput, but
3091 * not so big that we reject it below.
3092 * Try for 16 megabytes
3093 */
3094 while (blocks * 32 < sra->component_size &&
3095 blocks < 16*1024*2)
3096 blocks *= 2;
3097 } else
3098 pr_err("Need to backup %luK of critical "
3099 "section..\n", blocks/2);
3100
3101 if (blocks >= sra->component_size/2) {
3102 pr_err("%s: Something wrong"
3103 " - reshape aborted\n",
3104 devname);
3105 goto release;
3106 }
3107
3108 /* Now we need to open all these devices so we can read/write.
3109 */
3110 nrdisks = max(reshape.before.data_disks,
3111 reshape.after.data_disks) + reshape.parity
3112 + sra->array.spare_disks;
3113 fdlist = xcalloc((1+nrdisks), sizeof(int));
3114 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3115
3116 odisks = reshape.before.data_disks + reshape.parity;
3117 d = reshape_prepare_fdlist(devname, sra, odisks,
3118 nrdisks, blocks, backup_file,
3119 fdlist, offsets);
3120 if (d < 0) {
3121 goto release;
3122 }
3123 if ((st->ss->manage_reshape == NULL) ||
3124 (st->ss->recover_backup == NULL)) {
3125 if (backup_file == NULL) {
3126 if (reshape.after.data_disks <=
3127 reshape.before.data_disks) {
3128 pr_err("%s: Cannot grow - "
3129 "need backup-file\n", devname);
3130 goto release;
3131 } else if (sra->array.spare_disks == 0) {
3132 pr_err("%s: Cannot grow - "
3133 "need a spare or backup-file to backup "
3134 "critical section\n", devname);
3135 goto release;
3136 }
3137 } else {
3138 if (!reshape_open_backup_file(backup_file, fd, devname,
3139 (signed)blocks,
3140 fdlist+d, offsets+d,
3141 restart)) {
3142 goto release;
3143 }
3144 d++;
3145 }
3146 }
3147
3148 update_cache_size(container, sra, info,
3149 min(reshape.before.data_disks, reshape.after.data_disks),
3150 blocks);
3151
3152 /* Right, everything seems fine. Let's kick things off.
3153 * If only changing raid_disks, use ioctl, else use
3154 * sysfs.
3155 */
3156 sync_metadata(st);
3157
3158 if (impose_reshape(sra, info, st, fd, restart,
3159 devname, container, &reshape) < 0)
3160 goto release;
3161
3162 err = start_reshape(sra, restart, reshape.before.data_disks,
3163 reshape.after.data_disks);
3164 if (err) {
3165 pr_err("Cannot %s reshape for %s\n",
3166 restart ? "continue" : "start",
3167 devname);
3168 goto release;
3169 }
3170 if (restart)
3171 sysfs_set_str(sra, NULL, "array_state", "active");
3172 if (freeze_reshape) {
3173 free(fdlist);
3174 free(offsets);
3175 sysfs_free(sra);
3176 pr_err("Reshape has to be continued from"
3177 " location %llu when root filesystem has been mounted.\n",
3178 sra->reshape_progress);
3179 return 1;
3180 }
3181
3182 /* Now we just need to kick off the reshape and watch, while
3183 * handling backups of the data...
3184 * This is all done by a forked background process.
3185 */
3186 switch(forked ? 0 : fork()) {
3187 case -1:
3188 pr_err("Cannot run child to monitor reshape: %s\n",
3189 strerror(errno));
3190 abort_reshape(sra);
3191 goto release;
3192 default:
3193 free(fdlist);
3194 free(offsets);
3195 sysfs_free(sra);
3196 return 0;
3197 case 0:
3198 map_fork();
3199 break;
3200 }
3201
3202 /* If another array on the same devices is busy, the
3203 * reshape will wait for them. This would mean that
3204 * the first section that we suspend will stay suspended
3205 * for a long time. So check on that possibility
3206 * by looking for "DELAYED" in /proc/mdstat, and if found,
3207 * wait a while
3208 */
3209 do {
3210 struct mdstat_ent *mds, *m;
3211 delayed = 0;
3212 mds = mdstat_read(0, 0);
3213 for (m = mds; m; m = m->next)
3214 if (strcmp(m->devnm, sra->sys_name) == 0) {
3215 if (m->resync &&
3216 m->percent == RESYNC_DELAYED)
3217 delayed = 1;
3218 if (m->resync == 0)
3219 /* Haven't started the reshape thread
3220 * yet, wait a bit
3221 */
3222 delayed = 2;
3223 break;
3224 }
3225 free_mdstat(mds);
3226 if (delayed == 1 && get_linux_version() < 3007000) {
3227 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3228 " You might experience problems until other reshapes complete.\n");
3229 delayed = 0;
3230 }
3231 if (delayed)
3232 sleep(30 - (delayed-1) * 25);
3233 } while (delayed);
3234
3235 close(fd);
3236 if (check_env("MDADM_GROW_VERIFY"))
3237 fd = open(devname, O_RDONLY | O_DIRECT);
3238 else
3239 fd = -1;
3240 mlockall(MCL_FUTURE);
3241
3242 if (st->ss->external) {
3243 /* metadata handler takes it from here */
3244 done = st->ss->manage_reshape(
3245 fd, sra, &reshape, st, blocks,
3246 fdlist, offsets,
3247 d - odisks, fdlist+odisks,
3248 offsets+odisks);
3249 } else
3250 done = child_monitor(
3251 fd, sra, &reshape, st, blocks,
3252 fdlist, offsets,
3253 d - odisks, fdlist+odisks,
3254 offsets+odisks);
3255
3256 free(fdlist);
3257 free(offsets);
3258
3259 if (backup_file && done)
3260 unlink(backup_file);
3261 if (!done) {
3262 abort_reshape(sra);
3263 goto out;
3264 }
3265
3266 if (!st->ss->external &&
3267 !(reshape.before.data_disks != reshape.after.data_disks
3268 && info->custom_array_size) &&
3269 info->new_level == reshape.level &&
3270 !forked) {
3271 /* no need to wait for the reshape to finish as
3272 * there is nothing more to do.
3273 */
3274 sysfs_free(sra);
3275 exit(0);
3276 }
3277 wait_reshape(sra);
3278
3279 if (st->ss->external) {
3280 /* Re-load the metadata as much could have changed */
3281 int cfd = open_dev(st->container_devnm);
3282 if (cfd >= 0) {
3283 flush_mdmon(container);
3284 st->ss->free_super(st);
3285 st->ss->load_container(st, cfd, container);
3286 close(cfd);
3287 }
3288 }
3289
3290 /* set new array size if required customer_array_size is used
3291 * by this metadata.
3292 */
3293 if (reshape.before.data_disks !=
3294 reshape.after.data_disks &&
3295 info->custom_array_size)
3296 set_array_size(st, info, info->text_version);
3297
3298 if (info->new_level != reshape.level) {
3299 if (fd < 0)
3300 fd = open(devname, O_RDONLY);
3301 impose_level(fd, info->new_level, devname, verbose);
3302 close(fd);
3303 if (info->new_level == 0)
3304 st->update_tail = NULL;
3305 }
3306 out:
3307 sysfs_free(sra);
3308 if (forked)
3309 return 0;
3310 unfreeze(st);
3311 exit(0);
3312
3313 release:
3314 free(fdlist);
3315 free(offsets);
3316 if (orig_level != UnSet && sra) {
3317 c = map_num(pers, orig_level);
3318 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3319 pr_err("aborting level change\n");
3320 }
3321 sysfs_free(sra);
3322 if (!forked)
3323 unfreeze(st);
3324 return 1;
3325 }
3326
3327 /* mdfd handle is passed to be closed in child process (after fork).
3328 */
3329 int reshape_container(char *container, char *devname,
3330 int mdfd,
3331 struct supertype *st,
3332 struct mdinfo *info,
3333 int force,
3334 char *backup_file,
3335 int verbose, int restart, int freeze_reshape)
3336 {
3337 struct mdinfo *cc = NULL;
3338 int rv = restart;
3339 char last_devnm[32] = "";
3340
3341 /* component_size is not meaningful for a container,
3342 * so pass '0' meaning 'no change'
3343 */
3344 if (!restart &&
3345 reshape_super(st, 0, info->new_level,
3346 info->new_layout, info->new_chunk,
3347 info->array.raid_disks, info->delta_disks,
3348 backup_file, devname, APPLY_METADATA_CHANGES,
3349 verbose)) {
3350 unfreeze(st);
3351 return 1;
3352 }
3353
3354 sync_metadata(st);
3355
3356 /* ping monitor to be sure that update is on disk
3357 */
3358 ping_monitor(container);
3359
3360 switch (fork()) {
3361 case -1: /* error */
3362 perror("Cannot fork to complete reshape\n");
3363 unfreeze(st);
3364 return 1;
3365 default: /* parent */
3366 if (!freeze_reshape)
3367 printf(Name ": multi-array reshape continues"
3368 " in background\n");
3369 return 0;
3370 case 0: /* child */
3371 map_fork();
3372 break;
3373 }
3374
3375 /* close unused handle in child process
3376 */
3377 if (mdfd > -1)
3378 close(mdfd);
3379
3380 while(1) {
3381 /* For each member array with reshape_active,
3382 * we need to perform the reshape.
3383 * We pick the first array that needs reshaping and
3384 * reshape it. reshape_array() will re-read the metadata
3385 * so the next time through a different array should be
3386 * ready for reshape.
3387 * It is possible that the 'different' array will not
3388 * be assembled yet. In that case we simple exit.
3389 * When it is assembled, the mdadm which assembles it
3390 * will take over the reshape.
3391 */
3392 struct mdinfo *content;
3393 int fd;
3394 struct mdstat_ent *mdstat;
3395 char *adev;
3396 int devid;
3397
3398 sysfs_free(cc);
3399
3400 cc = st->ss->container_content(st, NULL);
3401
3402 for (content = cc; content ; content = content->next) {
3403 char *subarray;
3404 if (!content->reshape_active)
3405 continue;
3406
3407 subarray = strchr(content->text_version+1, '/')+1;
3408 mdstat = mdstat_by_subdev(subarray, container);
3409 if (!mdstat)
3410 continue;
3411 if (mdstat->active == 0) {
3412 pr_err("Skipping inactive array %s.\n",
3413 mdstat->devnm);
3414 free_mdstat(mdstat);
3415 mdstat = NULL;
3416 continue;
3417 }
3418 break;
3419 }
3420 if (!content)
3421 break;
3422
3423 devid = devnm2devid(mdstat->devnm);
3424 adev = map_dev(major(devid), minor(devid), 0);
3425 if (!adev)
3426 adev = content->text_version;
3427
3428 fd = open_dev(mdstat->devnm);
3429 if (fd < 0) {
3430 printf(Name ": Device %s cannot be opened for reshape.",
3431 adev);
3432 break;
3433 }
3434
3435 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3436 /* Do not allow for multiple reshape_array() calls for
3437 * the same array.
3438 * It can happen when reshape_array() returns without
3439 * error, when reshape is not finished (wrong reshape
3440 * starting/continuation conditions). Mdmon doesn't
3441 * switch to next array in container and reentry
3442 * conditions for the same array occur.
3443 * This is possibly interim until the behaviour of
3444 * reshape_array is resolved().
3445 */
3446 printf(Name ": Multiple reshape execution detected for "
3447 "device %s.", adev);
3448 close(fd);
3449 break;
3450 }
3451 strcpy(last_devnm, mdstat->devnm);
3452
3453 sysfs_init(content, fd, mdstat->devnm);
3454
3455 if (mdmon_running(container))
3456 flush_mdmon(container);
3457
3458 rv = reshape_array(container, fd, adev, st,
3459 content, force, NULL, 0ULL,
3460 backup_file, verbose, 1, restart,
3461 freeze_reshape);
3462 close(fd);
3463
3464 if (freeze_reshape) {
3465 sysfs_free(cc);
3466 exit(0);
3467 }
3468
3469 restart = 0;
3470 if (rv)
3471 break;
3472
3473 if (mdmon_running(container))
3474 flush_mdmon(container);
3475 }
3476 if (!rv)
3477 unfreeze(st);
3478 sysfs_free(cc);
3479 exit(0);
3480 }
3481
3482 /*
3483 * We run a child process in the background which performs the following
3484 * steps:
3485 * - wait for resync to reach a certain point
3486 * - suspend io to the following section
3487 * - backup that section
3488 * - allow resync to proceed further
3489 * - resume io
3490 * - discard the backup.
3491 *
3492 * When are combined in slightly different ways in the three cases.
3493 * Grow:
3494 * - suspend/backup/allow/wait/resume/discard
3495 * Shrink:
3496 * - allow/wait/suspend/backup/allow/wait/resume/discard
3497 * same-size:
3498 * - wait/resume/discard/suspend/backup/allow
3499 *
3500 * suspend/backup/allow always come together
3501 * wait/resume/discard do too.
3502 * For the same-size case we have two backups to improve flow.
3503 *
3504 */
3505
3506 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3507 unsigned long long backup_point,
3508 unsigned long long wait_point,
3509 unsigned long long *suspend_point,
3510 unsigned long long *reshape_completed)
3511 {
3512 /* This function is called repeatedly by the reshape manager.
3513 * It determines how much progress can safely be made and allows
3514 * that progress.
3515 * - 'info' identifies the array and particularly records in
3516 * ->reshape_progress the metadata's knowledge of progress
3517 * This is a sector offset from the start of the array
3518 * of the next array block to be relocated. This number
3519 * may increase from 0 or decrease from array_size, depending
3520 * on the type of reshape that is happening.
3521 * Note that in contrast, 'sync_completed' is a block count of the
3522 * reshape so far. It gives the distance between the start point
3523 * (head or tail of device) and the next place that data will be
3524 * written. It always increases.
3525 * - 'reshape' is the structure created by analyse_change
3526 * - 'backup_point' shows how much the metadata manager has backed-up
3527 * data. For reshapes with increasing progress, it is the next address
3528 * to be backed up, previous addresses have been backed-up. For
3529 * decreasing progress, it is the earliest address that has been
3530 * backed up - later address are also backed up.
3531 * So addresses between reshape_progress and backup_point are
3532 * backed up providing those are in the 'correct' order.
3533 * - 'wait_point' is an array address. When reshape_completed
3534 * passes this point, progress_reshape should return. It might
3535 * return earlier if it determines that ->reshape_progress needs
3536 * to be updated or further backup is needed.
3537 * - suspend_point is maintained by progress_reshape and the caller
3538 * should not touch it except to initialise to zero.
3539 * It is an array address and it only increases in 2.6.37 and earlier.
3540 * This makes it difficult to handle reducing reshapes with
3541 * external metadata.
3542 * However: it is similar to backup_point in that it records the
3543 * other end of a suspended region from reshape_progress.
3544 * it is moved to extend the region that is safe to backup and/or
3545 * reshape
3546 * - reshape_completed is read from sysfs and returned. The caller
3547 * should copy this into ->reshape_progress when it has reason to
3548 * believe that the metadata knows this, and any backup outside this
3549 * has been erased.
3550 *
3551 * Return value is:
3552 * 1 if more data from backup_point - but only as far as suspend_point,
3553 * should be backed up
3554 * 0 if things are progressing smoothly
3555 * -1 if the reshape is finished because it is all done,
3556 * -2 if the reshape is finished due to an error.
3557 */
3558
3559 int advancing = (reshape->after.data_disks
3560 >= reshape->before.data_disks);
3561 unsigned long long need_backup; /* All data between start of array and
3562 * here will at some point need to
3563 * be backed up.
3564 */
3565 unsigned long long read_offset, write_offset;
3566 unsigned long long write_range;
3567 unsigned long long max_progress, target, completed;
3568 unsigned long long array_size = (info->component_size
3569 * reshape->before.data_disks);
3570 int fd;
3571 char buf[20];
3572
3573 /* First, we unsuspend any region that is now known to be safe.
3574 * If suspend_point is on the 'wrong' side of reshape_progress, then
3575 * we don't have or need suspension at the moment. This is true for
3576 * native metadata when we don't need to back-up.
3577 */
3578 if (advancing) {
3579 if (info->reshape_progress <= *suspend_point)
3580 sysfs_set_num(info, NULL, "suspend_lo",
3581 info->reshape_progress);
3582 } else {
3583 /* Note: this won't work in 2.6.37 and before.
3584 * Something somewhere should make sure we don't need it!
3585 */
3586 if (info->reshape_progress >= *suspend_point)
3587 sysfs_set_num(info, NULL, "suspend_hi",
3588 info->reshape_progress);
3589 }
3590
3591 /* Now work out how far it is safe to progress.
3592 * If the read_offset for ->reshape_progress is less than
3593 * 'blocks' beyond the write_offset, we can only progress as far
3594 * as a backup.
3595 * Otherwise we can progress until the write_offset for the new location
3596 * reaches (within 'blocks' of) the read_offset at the current location.
3597 * However that region must be suspended unless we are using native
3598 * metadata.
3599 * If we need to suspend more, we limit it to 128M per device, which is
3600 * rather arbitrary and should be some time-based calculation.
3601 */
3602 read_offset = info->reshape_progress / reshape->before.data_disks;
3603 write_offset = info->reshape_progress / reshape->after.data_disks;
3604 write_range = info->new_chunk/512;
3605 if (reshape->before.data_disks == reshape->after.data_disks)
3606 need_backup = array_size;
3607 else
3608 need_backup = reshape->backup_blocks;
3609 if (advancing) {
3610 if (read_offset < write_offset + write_range)
3611 max_progress = backup_point;
3612 else
3613 max_progress =
3614 read_offset *
3615 reshape->after.data_disks;
3616 } else {
3617 if (read_offset > write_offset - write_range)
3618 /* Can only progress as far as has been backed up,
3619 * which must be suspended */
3620 max_progress = backup_point;
3621 else if (info->reshape_progress <= need_backup)
3622 max_progress = backup_point;
3623 else {
3624 if (info->array.major_version >= 0)
3625 /* Can progress until backup is needed */
3626 max_progress = need_backup;
3627 else {
3628 /* Can progress until metadata update is required */
3629 max_progress =
3630 read_offset *
3631 reshape->after.data_disks;
3632 /* but data must be suspended */
3633 if (max_progress < *suspend_point)
3634 max_progress = *suspend_point;
3635 }
3636 }
3637 }
3638
3639 /* We know it is safe to progress to 'max_progress' providing
3640 * it is suspended or we are using native metadata.
3641 * Consider extending suspend_point 128M per device if it
3642 * is less than 64M per device beyond reshape_progress.
3643 * But always do a multiple of 'blocks'
3644 * FIXME this is too big - it takes to long to complete
3645 * this much.
3646 */
3647 target = 64*1024*2 * min(reshape->before.data_disks,
3648 reshape->after.data_disks);
3649 target /= reshape->backup_blocks;
3650 if (target < 2)
3651 target = 2;
3652 target *= reshape->backup_blocks;
3653
3654 /* For externally managed metadata we always need to suspend IO to
3655 * the area being reshaped so we regularly push suspend_point forward.
3656 * For native metadata we only need the suspend if we are going to do
3657 * a backup.
3658 */
3659 if (advancing) {
3660 if ((need_backup > info->reshape_progress
3661 || info->array.major_version < 0) &&
3662 *suspend_point < info->reshape_progress + target) {
3663 if (need_backup < *suspend_point + 2 * target)
3664 *suspend_point = need_backup;
3665 else if (*suspend_point + 2 * target < array_size)
3666 *suspend_point += 2 * target;
3667 else
3668 *suspend_point = array_size;
3669 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3670 if (max_progress > *suspend_point)
3671 max_progress = *suspend_point;
3672 }
3673 } else {
3674 if (info->array.major_version >= 0) {
3675 /* Only need to suspend when about to backup */
3676 if (info->reshape_progress < need_backup * 2 &&
3677 *suspend_point > 0) {
3678 *suspend_point = 0;
3679 sysfs_set_num(info, NULL, "suspend_lo", 0);
3680 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3681 }
3682 } else {
3683 /* Need to suspend continually */
3684 if (info->reshape_progress < *suspend_point)
3685 *suspend_point = info->reshape_progress;
3686 if (*suspend_point + target < info->reshape_progress)
3687 /* No need to move suspend region yet */;
3688 else {
3689 if (*suspend_point >= 2 * target)
3690 *suspend_point -= 2 * target;
3691 else
3692 *suspend_point = 0;
3693 sysfs_set_num(info, NULL, "suspend_lo",
3694 *suspend_point);
3695 }
3696 if (max_progress < *suspend_point)
3697 max_progress = *suspend_point;
3698 }
3699 }
3700
3701 /* now set sync_max to allow that progress. sync_max, like
3702 * sync_completed is a count of sectors written per device, so
3703 * we find the difference between max_progress and the start point,
3704 * and divide that by after.data_disks to get a sync_max
3705 * number.
3706 * At the same time we convert wait_point to a similar number
3707 * for comparing against sync_completed.
3708 */
3709 /* scale down max_progress to per_disk */
3710 max_progress /= reshape->after.data_disks;
3711 /* Round to chunk size as some kernels give an erroneously high number */
3712 max_progress /= info->new_chunk/512;
3713 max_progress *= info->new_chunk/512;
3714 /* And round to old chunk size as the kernel wants that */
3715 max_progress /= info->array.chunk_size/512;
3716 max_progress *= info->array.chunk_size/512;
3717 /* Limit progress to the whole device */
3718 if (max_progress > info->component_size)
3719 max_progress = info->component_size;
3720 wait_point /= reshape->after.data_disks;
3721 if (!advancing) {
3722 /* switch from 'device offset' to 'processed block count' */
3723 max_progress = info->component_size - max_progress;
3724 wait_point = info->component_size - wait_point;
3725 }
3726
3727 sysfs_set_num(info, NULL, "sync_max", max_progress);
3728
3729 /* Now wait. If we have already reached the point that we were
3730 * asked to wait to, don't wait at all, else wait for any change.
3731 * We need to select on 'sync_completed' as that is the place that
3732 * notifications happen, but we are really interested in
3733 * 'reshape_position'
3734 */
3735 fd = sysfs_get_fd(info, NULL, "sync_completed");
3736 if (fd < 0)
3737 goto check_progress;
3738
3739 if (sysfs_fd_get_ll(fd, &completed) < 0)
3740 goto check_progress;
3741
3742 while (completed < max_progress && completed < wait_point) {
3743 /* Check that sync_action is still 'reshape' to avoid
3744 * waiting forever on a dead array
3745 */
3746 char action[20];
3747 fd_set rfds;
3748 if (sysfs_get_str(info, NULL, "sync_action",
3749 action, 20) <= 0 ||
3750 strncmp(action, "reshape", 7) != 0)
3751 break;
3752 /* Some kernels reset 'sync_completed' to zero
3753 * before setting 'sync_action' to 'idle'.
3754 * So we need these extra tests.
3755 */
3756 if (completed == 0 && advancing
3757 && info->reshape_progress > 0)
3758 break;
3759 if (completed == 0 && !advancing
3760 && info->reshape_progress < (info->component_size
3761 * reshape->after.data_disks))
3762 break;
3763 FD_ZERO(&rfds);
3764 FD_SET(fd, &rfds);
3765 select(fd+1, NULL, NULL, &rfds, NULL);
3766 if (sysfs_fd_get_ll(fd, &completed) < 0)
3767 goto check_progress;
3768 }
3769 /* Some kernels reset 'sync_completed' to zero,
3770 * we need to have real point we are in md
3771 */
3772 if (completed == 0)
3773 completed = max_progress;
3774
3775 /* some kernels can give an incorrectly high 'completed' number */
3776 completed /= (info->new_chunk/512);
3777 completed *= (info->new_chunk/512);
3778 /* Convert 'completed' back in to a 'progress' number */
3779 completed *= reshape->after.data_disks;
3780 if (!advancing) {
3781 completed = info->component_size * reshape->after.data_disks
3782 - completed;
3783 }
3784 *reshape_completed = completed;
3785
3786 close(fd);
3787
3788 /* We return the need_backup flag. Caller will decide
3789 * how much - a multiple of ->backup_blocks up to *suspend_point
3790 */
3791 if (advancing)
3792 return need_backup > info->reshape_progress;
3793 else
3794 return need_backup >= info->reshape_progress;
3795
3796 check_progress:
3797 /* if we couldn't read a number from sync_completed, then
3798 * either the reshape did complete, or it aborted.
3799 * We can tell which by checking for 'none' in reshape_position.
3800 * If it did abort, then it might immediately restart if it
3801 * it was just a device failure that leaves us degraded but
3802 * functioning.
3803 */
3804 strcpy(buf, "hi");
3805 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3806 || strncmp(buf, "none", 4) != 0) {
3807 /* The abort might only be temporary. Wait up to 10
3808 * seconds for fd to contain a valid number again.
3809 */
3810 struct timeval tv;
3811 int rv = -2;
3812 tv.tv_sec = 10;
3813 tv.tv_usec = 0;
3814 while (fd >= 0 && rv < 0 && tv.tv_sec > 0) {
3815 fd_set rfds;
3816 FD_ZERO(&rfds);
3817 FD_SET(fd, &rfds);
3818 if (select(fd+1, NULL, NULL, &rfds, &tv) != 1)
3819 break;
3820 switch (sysfs_fd_get_ll(fd, &completed)) {
3821 case 0:
3822 /* all good again */
3823 rv = 1;
3824 break;
3825 case -2: /* read error - abort */
3826 tv.tv_sec = 0;
3827 break;
3828 }
3829 }
3830 if (fd >= 0)
3831 close(fd);
3832 return rv; /* abort */
3833 } else {
3834 /* Maybe racing with array shutdown - check state */
3835 if (fd >= 0)
3836 close(fd);
3837 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3838 || strncmp(buf, "inactive", 8) == 0
3839 || strncmp(buf, "clear",5) == 0)
3840 return -2; /* abort */
3841 return -1; /* complete */
3842 }
3843 }
3844
3845 /* FIXME return status is never checked */
3846 static int grow_backup(struct mdinfo *sra,
3847 unsigned long long offset, /* per device */
3848 unsigned long stripes, /* per device, in old chunks */
3849 int *sources, unsigned long long *offsets,
3850 int disks, int chunk, int level, int layout,
3851 int dests, int *destfd, unsigned long long *destoffsets,
3852 int part, int *degraded,
3853 char *buf)
3854 {
3855 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3856 * to storage 'destfd' (offset 'destoffsets'), after first
3857 * suspending IO. Then allow resync to continue
3858 * over the suspended section.
3859 * Use part 'part' of the backup-super-block.
3860 */
3861 int odata = disks;
3862 int rv = 0;
3863 int i;
3864 unsigned long long ll;
3865 int new_degraded;
3866 //printf("offset %llu\n", offset);
3867 if (level >= 4)
3868 odata--;
3869 if (level == 6)
3870 odata--;
3871
3872 /* Check that array hasn't become degraded, else we might backup the wrong data */
3873 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
3874 return -1; /* FIXME this error is ignored */
3875 new_degraded = (int)ll;
3876 if (new_degraded != *degraded) {
3877 /* check each device to ensure it is still working */
3878 struct mdinfo *sd;
3879 for (sd = sra->devs ; sd ; sd = sd->next) {
3880 if (sd->disk.state & (1<<MD_DISK_FAULTY))
3881 continue;
3882 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
3883 char sbuf[20];
3884 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
3885 strstr(sbuf, "faulty") ||
3886 strstr(sbuf, "in_sync") == NULL) {
3887 /* this device is dead */
3888 sd->disk.state = (1<<MD_DISK_FAULTY);
3889 if (sd->disk.raid_disk >= 0 &&
3890 sources[sd->disk.raid_disk] >= 0) {
3891 close(sources[sd->disk.raid_disk]);
3892 sources[sd->disk.raid_disk] = -1;
3893 }
3894 }
3895 }
3896 }
3897 *degraded = new_degraded;
3898 }
3899 if (part) {
3900 bsb.arraystart2 = __cpu_to_le64(offset * odata);
3901 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
3902 } else {
3903 bsb.arraystart = __cpu_to_le64(offset * odata);
3904 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
3905 }
3906 if (part)
3907 bsb.magic[15] = '2';
3908 for (i = 0; i < dests; i++)
3909 if (part)
3910 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
3911 else
3912 lseek64(destfd[i], destoffsets[i], 0);
3913
3914 rv = save_stripes(sources, offsets,
3915 disks, chunk, level, layout,
3916 dests, destfd,
3917 offset*512*odata, stripes * chunk * odata,
3918 buf);
3919
3920 if (rv)
3921 return rv;
3922 bsb.mtime = __cpu_to_le64(time(0));
3923 for (i = 0; i < dests; i++) {
3924 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3925
3926 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3927 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3928 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3929 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3930
3931 rv = -1;
3932 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
3933 != destoffsets[i] - 4096)
3934 break;
3935 if (write(destfd[i], &bsb, 512) != 512)
3936 break;
3937 if (destoffsets[i] > 4096) {
3938 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
3939 destoffsets[i]+stripes*chunk*odata)
3940 break;
3941 if (write(destfd[i], &bsb, 512) != 512)
3942 break;
3943 }
3944 fsync(destfd[i]);
3945 rv = 0;
3946 }
3947
3948 return rv;
3949 }
3950
3951 /* in 2.6.30, the value reported by sync_completed can be
3952 * less that it should be by one stripe.
3953 * This only happens when reshape hits sync_max and pauses.
3954 * So allow wait_backup to either extent sync_max further
3955 * than strictly necessary, or return before the
3956 * sync has got quite as far as we would really like.
3957 * This is what 'blocks2' is for.
3958 * The various caller give appropriate values so that
3959 * every works.
3960 */
3961 /* FIXME return value is often ignored */
3962 static int forget_backup(int dests, int *destfd,
3963 unsigned long long *destoffsets,
3964 int part)
3965 {
3966 /*
3967 * Erase backup 'part' (which is 0 or 1)
3968 */
3969 int i;
3970 int rv;
3971
3972 if (part) {
3973 bsb.arraystart2 = __cpu_to_le64(0);
3974 bsb.length2 = __cpu_to_le64(0);
3975 } else {
3976 bsb.arraystart = __cpu_to_le64(0);
3977 bsb.length = __cpu_to_le64(0);
3978 }
3979 bsb.mtime = __cpu_to_le64(time(0));
3980 rv = 0;
3981 for (i = 0; i < dests; i++) {
3982 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3983 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3984 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3985 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3986 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3987 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
3988 destoffsets[i]-4096)
3989 rv = -1;
3990 if (rv == 0 &&
3991 write(destfd[i], &bsb, 512) != 512)
3992 rv = -1;
3993 fsync(destfd[i]);
3994 }
3995 return rv;
3996 }
3997
3998 static void fail(char *msg)
3999 {
4000 int rv;
4001 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4002 rv |= (write(2, "\n", 1) != 1);
4003 exit(rv ? 1 : 2);
4004 }
4005
4006 static char *abuf, *bbuf;
4007 static unsigned long long abuflen;
4008 static void validate(int afd, int bfd, unsigned long long offset)
4009 {
4010 /* check that the data in the backup against the array.
4011 * This is only used for regression testing and should not
4012 * be used while the array is active
4013 */
4014 if (afd < 0)
4015 return;
4016 lseek64(bfd, offset - 4096, 0);
4017 if (read(bfd, &bsb2, 512) != 512)
4018 fail("cannot read bsb");
4019 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4020 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4021 fail("first csum bad");
4022 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4023 fail("magic is bad");
4024 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4025 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4026 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4027 fail("second csum bad");
4028
4029 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4030 fail("devstart is wrong");
4031
4032 if (bsb2.length) {
4033 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4034
4035 if (abuflen < len) {
4036 free(abuf);
4037 free(bbuf);
4038 abuflen = len;
4039 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4040 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4041 abuflen = 0;
4042 /* just stop validating on mem-alloc failure */
4043 return;
4044 }
4045 }
4046
4047 lseek64(bfd, offset, 0);
4048 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4049 //printf("len %llu\n", len);
4050 fail("read first backup failed");
4051 }
4052 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4053 if ((unsigned long long)read(afd, abuf, len) != len)
4054 fail("read first from array failed");
4055 if (memcmp(bbuf, abuf, len) != 0) {
4056 #if 0
4057 int i;
4058 printf("offset=%llu len=%llu\n",
4059 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4060 for (i=0; i<len; i++)
4061 if (bbuf[i] != abuf[i]) {
4062 printf("first diff byte %d\n", i);
4063 break;
4064 }
4065 #endif
4066 fail("data1 compare failed");
4067 }
4068 }
4069 if (bsb2.length2) {
4070 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4071
4072 if (abuflen < len) {
4073 free(abuf);
4074 free(bbuf);
4075 abuflen = len;
4076 abuf = xmalloc(abuflen);
4077 bbuf = xmalloc(abuflen);
4078 }
4079
4080 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4081 if ((unsigned long long)read(bfd, bbuf, len) != len)
4082 fail("read second backup failed");
4083 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4084 if ((unsigned long long)read(afd, abuf, len) != len)
4085 fail("read second from array failed");
4086 if (memcmp(bbuf, abuf, len) != 0)
4087 fail("data2 compare failed");
4088 }
4089 }
4090
4091 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4092 struct supertype *st, unsigned long blocks,
4093 int *fds, unsigned long long *offsets,
4094 int dests, int *destfd, unsigned long long *destoffsets)
4095 {
4096 /* Monitor a reshape where backup is being performed using
4097 * 'native' mechanism - either to a backup file, or
4098 * to some space in a spare.
4099 */
4100 char *buf;
4101 int degraded = -1;
4102 unsigned long long speed;
4103 unsigned long long suspend_point, array_size;
4104 unsigned long long backup_point, wait_point;
4105 unsigned long long reshape_completed;
4106 int done = 0;
4107 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4108 int part = 0; /* The next part of the backup area to fill. It may already
4109 * be full, so we need to check */
4110 int level = reshape->level;
4111 int layout = reshape->before.layout;
4112 int data = reshape->before.data_disks;
4113 int disks = reshape->before.data_disks + reshape->parity;
4114 int chunk = sra->array.chunk_size;
4115 struct mdinfo *sd;
4116 unsigned long stripes;
4117 int uuid[4];
4118
4119 /* set up the backup-super-block. This requires the
4120 * uuid from the array.
4121 */
4122 /* Find a superblock */
4123 for (sd = sra->devs; sd; sd = sd->next) {
4124 char *dn;
4125 int devfd;
4126 int ok;
4127 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4128 continue;
4129 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4130 devfd = dev_open(dn, O_RDONLY);
4131 if (devfd < 0)
4132 continue;
4133 ok = st->ss->load_super(st, devfd, NULL);
4134 close(devfd);
4135 if (ok == 0)
4136 break;
4137 }
4138 if (!sd) {
4139 pr_err("Cannot find a superblock\n");
4140 return 0;
4141 }
4142
4143 memset(&bsb, 0, 512);
4144 memcpy(bsb.magic, "md_backup_data-1", 16);
4145 st->ss->uuid_from_super(st, uuid);
4146 memcpy(bsb.set_uuid, uuid, 16);
4147 bsb.mtime = __cpu_to_le64(time(0));
4148 bsb.devstart2 = blocks;
4149
4150 stripes = blocks / (sra->array.chunk_size/512) /
4151 reshape->before.data_disks;
4152
4153 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4154 /* Don't start the 'reshape' */
4155 return 0;
4156 if (reshape->before.data_disks == reshape->after.data_disks) {
4157 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4158 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4159 }
4160
4161 if (increasing) {
4162 array_size = sra->component_size * reshape->after.data_disks;
4163 backup_point = sra->reshape_progress;
4164 suspend_point = 0;
4165 } else {
4166 array_size = sra->component_size * reshape->before.data_disks;
4167 backup_point = reshape->backup_blocks;
4168 suspend_point = array_size;
4169 }
4170
4171 while (!done) {
4172 int rv;
4173
4174 /* Want to return as soon the oldest backup slot can
4175 * be released as that allows us to start backing up
4176 * some more, providing suspend_point has been
4177 * advanced, which it should have.
4178 */
4179 if (increasing) {
4180 wait_point = array_size;
4181 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4182 wait_point = (__le64_to_cpu(bsb.arraystart) +
4183 __le64_to_cpu(bsb.length));
4184 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4185 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4186 __le64_to_cpu(bsb.length2));
4187 } else {
4188 wait_point = 0;
4189 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4190 wait_point = __le64_to_cpu(bsb.arraystart);
4191 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4192 wait_point = __le64_to_cpu(bsb.arraystart2);
4193 }
4194
4195 rv = progress_reshape(sra, reshape,
4196 backup_point, wait_point,
4197 &suspend_point, &reshape_completed);
4198 /* external metadata would need to ping_monitor here */
4199 sra->reshape_progress = reshape_completed;
4200
4201 /* Clear any backup region that is before 'here' */
4202 if (increasing) {
4203 if (__le64_to_cpu(bsb.length) > 0 &&
4204 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4205 __le64_to_cpu(bsb.length)))
4206 forget_backup(dests, destfd,
4207 destoffsets, 0);
4208 if (__le64_to_cpu(bsb.length2) > 0 &&
4209 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4210 __le64_to_cpu(bsb.length2)))
4211 forget_backup(dests, destfd,
4212 destoffsets, 1);
4213 } else {
4214 if (__le64_to_cpu(bsb.length) > 0 &&
4215 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4216 forget_backup(dests, destfd,
4217 destoffsets, 0);
4218 if (__le64_to_cpu(bsb.length2) > 0 &&
4219 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4220 forget_backup(dests, destfd,
4221 destoffsets, 1);
4222 }
4223
4224 if (rv < 0) {
4225 if (rv == -1)
4226 done = 1;
4227 break;
4228 }
4229 if (rv == 0 && increasing && !st->ss->external) {
4230 /* No longer need to monitor this reshape */
4231 done = 1;
4232 break;
4233 }
4234
4235 while (rv) {
4236 unsigned long long offset;
4237 unsigned long actual_stripes;
4238 /* Need to backup some data.
4239 * If 'part' is not used and the desired
4240 * backup size is suspended, do a backup,
4241 * then consider the next part.
4242 */
4243 /* Check that 'part' is unused */
4244 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4245 break;
4246 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4247 break;
4248
4249 offset = backup_point / data;
4250 actual_stripes = stripes;
4251 if (increasing) {
4252 if (offset + actual_stripes * (chunk/512) >
4253 sra->component_size)
4254 actual_stripes = ((sra->component_size - offset)
4255 / (chunk/512));
4256 if (offset + actual_stripes * (chunk/512) >
4257 suspend_point/data)
4258 break;
4259 } else {
4260 if (offset < actual_stripes * (chunk/512))
4261 actual_stripes = offset / (chunk/512);
4262 offset -= actual_stripes * (chunk/512);
4263 if (offset < suspend_point/data)
4264 break;
4265 }
4266 if (actual_stripes == 0)
4267 break;
4268 grow_backup(sra, offset, actual_stripes,
4269 fds, offsets,
4270 disks, chunk, level, layout,
4271 dests, destfd, destoffsets,
4272 part, &degraded, buf);
4273 validate(afd, destfd[0], destoffsets[0]);
4274 /* record where 'part' is up to */
4275 part = !part;
4276 if (increasing)
4277 backup_point += actual_stripes * (chunk/512) * data;
4278 else
4279 backup_point -= actual_stripes * (chunk/512) * data;
4280 }
4281 }
4282
4283 /* FIXME maybe call progress_reshape one more time instead */
4284 abort_reshape(sra); /* remove any remaining suspension */
4285 if (reshape->before.data_disks == reshape->after.data_disks)
4286 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4287 free(buf);
4288 return done;
4289 }
4290
4291 /*
4292 * If any spare contains md_back_data-1 which is recent wrt mtime,
4293 * write that data into the array and update the super blocks with
4294 * the new reshape_progress
4295 */
4296 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4297 char *backup_file, int verbose)
4298 {
4299 int i, j;
4300 int old_disks;
4301 unsigned long long *offsets;
4302 unsigned long long nstripe, ostripe;
4303 int ndata, odata;
4304
4305 odata = info->array.raid_disks - info->delta_disks - 1;
4306 if (info->array.level == 6) odata--; /* number of data disks */
4307 ndata = info->array.raid_disks - 1;
4308 if (info->new_level == 6) ndata--;
4309
4310 old_disks = info->array.raid_disks - info->delta_disks;
4311
4312 if (info->delta_disks <= 0)
4313 /* Didn't grow, so the backup file must have
4314 * been used
4315 */
4316 old_disks = cnt;
4317 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4318 struct mdinfo dinfo;
4319 int fd;
4320 int bsbsize;
4321 char *devname, namebuf[20];
4322 unsigned long long lo, hi;
4323
4324 /* This was a spare and may have some saved data on it.
4325 * Load the superblock, find and load the
4326 * backup_super_block.
4327 * If either fail, go on to next device.
4328 * If the backup contains no new info, just return
4329 * else restore data and update all superblocks
4330 */
4331 if (i == old_disks-1) {
4332 fd = open(backup_file, O_RDONLY);
4333 if (fd<0) {
4334 pr_err("backup file %s inaccessible: %s\n",
4335 backup_file, strerror(errno));
4336 continue;
4337 }
4338 devname = backup_file;
4339 } else {
4340 fd = fdlist[i];
4341 if (fd < 0)
4342 continue;
4343 if (st->ss->load_super(st, fd, NULL))
4344 continue;
4345
4346 st->ss->getinfo_super(st, &dinfo, NULL);
4347 st->ss->free_super(st);
4348
4349 if (lseek64(fd,
4350 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4351 0) < 0) {
4352 pr_err("Cannot seek on device %d\n", i);
4353 continue; /* Cannot seek */
4354 }
4355 sprintf(namebuf, "device-%d", i);
4356 devname = namebuf;
4357 }
4358 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4359 if (verbose)
4360 pr_err("Cannot read from %s\n", devname);
4361 continue; /* Cannot read */
4362 }
4363 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4364 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4365 if (verbose)
4366 pr_err("No backup metadata on %s\n", devname);
4367 continue;
4368 }
4369 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4370 if (verbose)
4371 pr_err("Bad backup-metadata checksum on %s\n", devname);
4372 continue; /* bad checksum */
4373 }
4374 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4375 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4376 if (verbose)
4377 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4378 continue; /* Bad second checksum */
4379 }
4380 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4381 if (verbose)
4382 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4383 continue; /* Wrong uuid */
4384 }
4385
4386 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4387 * sometimes they aren't... So allow considerable flexability in matching, and allow
4388 * this test to be overridden by an environment variable.
4389 */
4390 if (info->array.utime > (int)__le64_to_cpu(bsb.mtime) + 2*60*60 ||
4391 info->array.utime < (int)__le64_to_cpu(bsb.mtime) - 10*60) {
4392 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4393 pr_err("accepting backup with timestamp %lu "
4394 "for array with timestamp %lu\n",
4395 (unsigned long)__le64_to_cpu(bsb.mtime),
4396 (unsigned long)info->array.utime);
4397 } else {
4398 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4399 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4400 continue; /* time stamp is too bad */
4401 }
4402 }
4403
4404 if (bsb.magic[15] == '1') {
4405 if (bsb.length == 0)
4406 continue;
4407 if (info->delta_disks >= 0) {
4408 /* reshape_progress is increasing */
4409 if (__le64_to_cpu(bsb.arraystart)
4410 + __le64_to_cpu(bsb.length)
4411 < info->reshape_progress) {
4412 nonew:
4413 if (verbose)
4414 pr_err("backup-metadata found on %s but is not needed\n", devname);
4415 continue; /* No new data here */
4416 }
4417 } else {
4418 /* reshape_progress is decreasing */
4419 if (__le64_to_cpu(bsb.arraystart) >=
4420 info->reshape_progress)
4421 goto nonew; /* No new data here */
4422 }
4423 } else {
4424 if (bsb.length == 0 && bsb.length2 == 0)
4425 continue;
4426 if (info->delta_disks >= 0) {
4427 /* reshape_progress is increasing */
4428 if ((__le64_to_cpu(bsb.arraystart)
4429 + __le64_to_cpu(bsb.length)
4430 < info->reshape_progress)
4431 &&
4432 (__le64_to_cpu(bsb.arraystart2)
4433 + __le64_to_cpu(bsb.length2)
4434 < info->reshape_progress))
4435 goto nonew; /* No new data here */
4436 } else {
4437 /* reshape_progress is decreasing */
4438 if (__le64_to_cpu(bsb.arraystart) >=
4439 info->reshape_progress &&
4440 __le64_to_cpu(bsb.arraystart2) >=
4441 info->reshape_progress)
4442 goto nonew; /* No new data here */
4443 }
4444 }
4445 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4446 second_fail:
4447 if (verbose)
4448 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4449 devname);
4450 continue; /* Cannot seek */
4451 }
4452 /* There should be a duplicate backup superblock 4k before here */
4453 if (lseek64(fd, -4096, 1) < 0 ||
4454 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4455 goto second_fail; /* Cannot find leading superblock */
4456 if (bsb.magic[15] == '1')
4457 bsbsize = offsetof(struct mdp_backup_super, pad1);
4458 else
4459 bsbsize = offsetof(struct mdp_backup_super, pad);
4460 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4461 goto second_fail; /* Cannot find leading superblock */
4462
4463 /* Now need the data offsets for all devices. */
4464 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4465 for(j=0; j<info->array.raid_disks; j++) {
4466 if (fdlist[j] < 0)
4467 continue;
4468 if (st->ss->load_super(st, fdlist[j], NULL))
4469 /* FIXME should be this be an error */
4470 continue;
4471 st->ss->getinfo_super(st, &dinfo, NULL);
4472 st->ss->free_super(st);
4473 offsets[j] = dinfo.data_offset * 512;
4474 }
4475 printf(Name ": restoring critical section\n");
4476
4477 if (restore_stripes(fdlist, offsets,
4478 info->array.raid_disks,
4479 info->new_chunk,
4480 info->new_level,
4481 info->new_layout,
4482 fd, __le64_to_cpu(bsb.devstart)*512,
4483 __le64_to_cpu(bsb.arraystart)*512,
4484 __le64_to_cpu(bsb.length)*512, NULL)) {
4485 /* didn't succeed, so giveup */
4486 if (verbose)
4487 pr_err("Error restoring backup from %s\n",
4488 devname);
4489 free(offsets);
4490 return 1;
4491 }
4492
4493 if (bsb.magic[15] == '2' &&
4494 restore_stripes(fdlist, offsets,
4495 info->array.raid_disks,
4496 info->new_chunk,
4497 info->new_level,
4498 info->new_layout,
4499 fd, __le64_to_cpu(bsb.devstart)*512 +
4500 __le64_to_cpu(bsb.devstart2)*512,
4501 __le64_to_cpu(bsb.arraystart2)*512,
4502 __le64_to_cpu(bsb.length2)*512, NULL)) {
4503 /* didn't succeed, so giveup */
4504 if (verbose)
4505 pr_err("Error restoring second backup from %s\n",
4506 devname);
4507 free(offsets);
4508 return 1;
4509 }
4510
4511 free(offsets);
4512
4513 /* Ok, so the data is restored. Let's update those superblocks. */
4514
4515 lo = hi = 0;
4516 if (bsb.length) {
4517 lo = __le64_to_cpu(bsb.arraystart);
4518 hi = lo + __le64_to_cpu(bsb.length);
4519 }
4520 if (bsb.magic[15] == '2' && bsb.length2) {
4521 unsigned long long lo1, hi1;
4522 lo1 = __le64_to_cpu(bsb.arraystart2);
4523 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4524 if (lo == hi) {
4525 lo = lo1;
4526 hi = hi1;
4527 } else if (lo < lo1)
4528 hi = hi1;
4529 else
4530 lo = lo1;
4531 }
4532 if (lo < hi &&
4533 (info->reshape_progress < lo ||
4534 info->reshape_progress > hi))
4535 /* backup does not affect reshape_progress*/ ;
4536 else if (info->delta_disks >= 0) {
4537 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4538 __le64_to_cpu(bsb.length);
4539 if (bsb.magic[15] == '2') {
4540 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4541 __le64_to_cpu(bsb.length2);
4542 if (p2 > info->reshape_progress)
4543 info->reshape_progress = p2;
4544 }
4545 } else {
4546 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4547 if (bsb.magic[15] == '2') {
4548 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4549 if (p2 < info->reshape_progress)
4550 info->reshape_progress = p2;
4551 }
4552 }
4553 for (j=0; j<info->array.raid_disks; j++) {
4554 if (fdlist[j] < 0)
4555 continue;
4556 if (st->ss->load_super(st, fdlist[j], NULL))
4557 continue;
4558 st->ss->getinfo_super(st, &dinfo, NULL);
4559 dinfo.reshape_progress = info->reshape_progress;
4560 st->ss->update_super(st, &dinfo,
4561 "_reshape_progress",
4562 NULL,0, 0, NULL);
4563 st->ss->store_super(st, fdlist[j]);
4564 st->ss->free_super(st);
4565 }
4566 return 0;
4567 }
4568 /* Didn't find any backup data, try to see if any
4569 * was needed.
4570 */
4571 if (info->delta_disks < 0) {
4572 /* When shrinking, the critical section is at the end.
4573 * So see if we are before the critical section.
4574 */
4575 unsigned long long first_block;
4576 nstripe = ostripe = 0;
4577 first_block = 0;
4578 while (ostripe >= nstripe) {
4579 ostripe += info->array.chunk_size / 512;
4580 first_block = ostripe * odata;
4581 nstripe = first_block / ndata / (info->new_chunk/512) *
4582 (info->new_chunk/512);
4583 }
4584
4585 if (info->reshape_progress >= first_block)
4586 return 0;
4587 }
4588 if (info->delta_disks > 0) {
4589 /* See if we are beyond the critical section. */
4590 unsigned long long last_block;
4591 nstripe = ostripe = 0;
4592 last_block = 0;
4593 while (nstripe >= ostripe) {
4594 nstripe += info->new_chunk / 512;
4595 last_block = nstripe * ndata;
4596 ostripe = last_block / odata / (info->array.chunk_size/512) *
4597 (info->array.chunk_size/512);
4598 }
4599
4600 if (info->reshape_progress >= last_block)
4601 return 0;
4602 }
4603 /* needed to recover critical section! */
4604 if (verbose)
4605 pr_err("Failed to find backup of critical section\n");
4606 return 1;
4607 }
4608
4609 int Grow_continue_command(char *devname, int fd,
4610 char *backup_file, int verbose)
4611 {
4612 int ret_val = 0;
4613 struct supertype *st = NULL;
4614 struct mdinfo *content = NULL;
4615 struct mdinfo array;
4616 char *subarray = NULL;
4617 struct mdinfo *cc = NULL;
4618 struct mdstat_ent *mdstat = NULL;
4619 int cfd = -1;
4620 int fd2 = -1;
4621
4622 dprintf("Grow continue from command line called for %s\n",
4623 devname);
4624
4625 st = super_by_fd(fd, &subarray);
4626 if (!st || !st->ss) {
4627 pr_err("Unable to determine metadata format for %s\n",
4628 devname);
4629 return 1;
4630 }
4631 dprintf("Grow continue is run for ");
4632 if (st->ss->external == 0) {
4633 int d;
4634 dprintf("native array (%s)\n", devname);
4635 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4636 pr_err("%s is not an active md array -"
4637 " aborting\n", devname);
4638 ret_val = 1;
4639 goto Grow_continue_command_exit;
4640 }
4641 content = &array;
4642 /* Need to load a superblock.
4643 * FIXME we should really get what we need from
4644 * sysfs
4645 */
4646 for (d = 0; d < MAX_DISKS; d++) {
4647 mdu_disk_info_t disk;
4648 char *dv;
4649 int err;
4650 disk.number = d;
4651 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4652 continue;
4653 if (disk.major == 0 && disk.minor == 0)
4654 continue;
4655 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4656 continue;
4657 dv = map_dev(disk.major, disk.minor, 1);
4658 if (!dv)
4659 continue;
4660 fd2 = dev_open(dv, O_RDONLY);
4661 if (fd2 < 0)
4662 continue;
4663 err = st->ss->load_super(st, fd2, NULL);
4664 close(fd2);
4665 if (err)
4666 continue;
4667 break;
4668 }
4669 if (d == MAX_DISKS) {
4670 pr_err("Unable to load metadata for %s\n",
4671 devname);
4672 ret_val = 1;
4673 goto Grow_continue_command_exit;
4674 }
4675 st->ss->getinfo_super(st, content, NULL);
4676 } else {
4677 char *container;
4678
4679 if (subarray) {
4680 dprintf("subarray (%s)\n", subarray);
4681 container = st->container_devnm;
4682 cfd = open_dev_excl(st->container_devnm);
4683 } else {
4684 container = st->devnm;
4685 close(fd);
4686 cfd = open_dev_excl(st->devnm);
4687 dprintf("container (%s)\n", container);
4688 fd = cfd;
4689 }
4690 if (cfd < 0) {
4691 pr_err("Unable to open container "
4692 "for %s\n", devname);
4693 ret_val = 1;
4694 goto Grow_continue_command_exit;
4695 }
4696
4697 /* find in container array under reshape
4698 */
4699 ret_val = st->ss->load_container(st, cfd, NULL);
4700 if (ret_val) {
4701 pr_err("Cannot read superblock for %s\n",
4702 devname);
4703 ret_val = 1;
4704 goto Grow_continue_command_exit;
4705 }
4706
4707 cc = st->ss->container_content(st, subarray);
4708 for (content = cc; content ; content = content->next) {
4709 char *array;
4710 int allow_reshape = 1;
4711
4712 if (content->reshape_active == 0)
4713 continue;
4714 /* The decision about array or container wide
4715 * reshape is taken in Grow_continue based
4716 * content->reshape_active state, therefore we
4717 * need to check_reshape based on
4718 * reshape_active and subarray name
4719 */
4720 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4721 allow_reshape = 0;
4722 if (content->reshape_active == CONTAINER_RESHAPE &&
4723 (content->array.state
4724 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4725 allow_reshape = 0;
4726
4727 if (!allow_reshape) {
4728 pr_err("cannot continue reshape of an array"
4729 " in container with unsupported"
4730 " metadata: %s(%s)\n",
4731 devname, container);
4732 ret_val = 1;
4733 goto Grow_continue_command_exit;
4734 }
4735
4736 array = strchr(content->text_version+1, '/')+1;
4737 mdstat = mdstat_by_subdev(array, container);
4738 if (!mdstat)
4739 continue;
4740 if (mdstat->active == 0) {
4741 pr_err("Skipping inactive array %s.\n",
4742 mdstat->devnm);
4743 free_mdstat(mdstat);
4744 mdstat = NULL;
4745 continue;
4746 }
4747 break;
4748 }
4749 if (!content) {
4750 pr_err("Unable to determine reshaped "
4751 "array for %s\n", devname);
4752 ret_val = 1;
4753 goto Grow_continue_command_exit;
4754 }
4755 fd2 = open_dev(mdstat->devnm);
4756 if (fd2 < 0) {
4757 pr_err("cannot open (%s)\n", mdstat->devnm);
4758 ret_val = 1;
4759 goto Grow_continue_command_exit;
4760 }
4761
4762 sysfs_init(content, fd2, mdstat->devnm);
4763
4764 /* start mdmon in case it is not running
4765 */
4766 if (!mdmon_running(container))
4767 start_mdmon(container);
4768 ping_monitor(container);
4769
4770 if (mdmon_running(container))
4771 st->update_tail = &st->updates;
4772 else {
4773 pr_err("No mdmon found. "
4774 "Grow cannot continue.\n");
4775 ret_val = 1;
4776 goto Grow_continue_command_exit;
4777 }
4778 }
4779
4780 /* verify that array under reshape is started from
4781 * correct position
4782 */
4783 if (verify_reshape_position(content, content->array.level) < 0) {
4784 ret_val = 1;
4785 goto Grow_continue_command_exit;
4786 }
4787
4788 /* continue reshape
4789 */
4790 ret_val = Grow_continue(fd, st, content, backup_file, 0);
4791
4792 Grow_continue_command_exit:
4793 if (fd2 > -1)
4794 close(fd2);
4795 if (cfd > -1)
4796 close(cfd);
4797 st->ss->free_super(st);
4798 free_mdstat(mdstat);
4799 sysfs_free(cc);
4800 free(subarray);
4801
4802 return ret_val;
4803 }
4804
4805 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4806 char *backup_file, int freeze_reshape)
4807 {
4808 int ret_val = 2;
4809
4810 if (!info->reshape_active)
4811 return ret_val;
4812
4813 if (st->ss->external) {
4814 int cfd = open_dev(st->container_devnm);
4815
4816 if (cfd < 0)
4817 return 1;
4818
4819 st->ss->load_container(st, cfd, st->container_devnm);
4820 close(cfd);
4821 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4822 st, info, 0, backup_file,
4823 0,
4824 1 | info->reshape_active,
4825 freeze_reshape);
4826 } else
4827 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4828 NULL, 0ULL, backup_file, 0, 0,
4829 1 | info->reshape_active,
4830 freeze_reshape);
4831
4832 return ret_val;
4833 }