]> git.ipfire.org Git - thirdparty/mdadm.git/blob - Grow.c
Incremental: improve support for "DEVICE" based restriction in mdadm.conf
[thirdparty/mdadm.git] / Grow.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24 #include "mdadm.h"
25 #include "dlink.h"
26 #include <sys/mman.h>
27 #include <stdint.h>
28 #include <signal.h>
29
30 #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN)
31 #error no endian defined
32 #endif
33 #include "md_u.h"
34 #include "md_p.h"
35
36 #ifndef offsetof
37 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
38 #endif
39
40 int restore_backup(struct supertype *st,
41 struct mdinfo *content,
42 int working_disks,
43 int next_spare,
44 char *backup_file,
45 int verbose)
46 {
47 int i;
48 int *fdlist;
49 struct mdinfo *dev;
50 int err;
51 int disk_count = next_spare + working_disks;
52
53 dprintf("Called restore_backup()\n");
54 fdlist = xmalloc(sizeof(int) * disk_count);
55
56 enable_fds(next_spare);
57 for (i = 0; i < next_spare; i++)
58 fdlist[i] = -1;
59 for (dev = content->devs; dev; dev = dev->next) {
60 char buf[22];
61 int fd;
62 sprintf(buf, "%d:%d",
63 dev->disk.major,
64 dev->disk.minor);
65 fd = dev_open(buf, O_RDWR);
66
67 if (dev->disk.raid_disk >= 0)
68 fdlist[dev->disk.raid_disk] = fd;
69 else
70 fdlist[next_spare++] = fd;
71 }
72
73 if (st->ss->external && st->ss->recover_backup)
74 err = st->ss->recover_backup(st, content);
75 else
76 err = Grow_restart(st, content, fdlist, next_spare,
77 backup_file, verbose > 0);
78
79 while (next_spare > 0) {
80 next_spare--;
81 if (fdlist[next_spare] >= 0)
82 close(fdlist[next_spare]);
83 }
84 free(fdlist);
85 if (err) {
86 pr_err("Failed to restore critical"
87 " section for reshape - sorry.\n");
88 if (!backup_file)
89 pr_err("Possibly you need"
90 " to specify a --backup-file\n");
91 return 1;
92 }
93
94 dprintf("restore_backup() returns status OK.\n");
95 return 0;
96 }
97
98 int Grow_Add_device(char *devname, int fd, char *newdev)
99 {
100 /* Add a device to an active array.
101 * Currently, just extend a linear array.
102 * This requires writing a new superblock on the
103 * new device, calling the kernel to add the device,
104 * and if that succeeds, update the superblock on
105 * all other devices.
106 * This means that we need to *find* all other devices.
107 */
108 struct mdinfo info;
109
110 struct stat stb;
111 int nfd, fd2;
112 int d, nd;
113 struct supertype *st = NULL;
114 char *subarray = NULL;
115
116 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
117 pr_err("cannot get array info for %s\n", devname);
118 return 1;
119 }
120
121 if (info.array.level != -1) {
122 pr_err("can only add devices to linear arrays\n");
123 return 1;
124 }
125
126 st = super_by_fd(fd, &subarray);
127 if (!st) {
128 pr_err("cannot handle arrays with superblock version %d\n",
129 info.array.major_version);
130 return 1;
131 }
132
133 if (subarray) {
134 pr_err("Cannot grow linear sub-arrays yet\n");
135 free(subarray);
136 free(st);
137 return 1;
138 }
139
140 nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT);
141 if (nfd < 0) {
142 pr_err("cannot open %s\n", newdev);
143 free(st);
144 return 1;
145 }
146 fstat(nfd, &stb);
147 if ((stb.st_mode & S_IFMT) != S_IFBLK) {
148 pr_err("%s is not a block device!\n", newdev);
149 close(nfd);
150 free(st);
151 return 1;
152 }
153 /* now check out all the devices and make sure we can read the
154 * superblock */
155 for (d=0 ; d < info.array.raid_disks ; d++) {
156 mdu_disk_info_t disk;
157 char *dv;
158
159 st->ss->free_super(st);
160
161 disk.number = d;
162 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
163 pr_err("cannot get device detail for device %d\n",
164 d);
165 close(nfd);
166 free(st);
167 return 1;
168 }
169 dv = map_dev(disk.major, disk.minor, 1);
170 if (!dv) {
171 pr_err("cannot find device file for device %d\n",
172 d);
173 close(nfd);
174 free(st);
175 return 1;
176 }
177 fd2 = dev_open(dv, O_RDWR);
178 if (fd2 < 0) {
179 pr_err("cannot open device file %s\n", dv);
180 close(nfd);
181 free(st);
182 return 1;
183 }
184
185 if (st->ss->load_super(st, fd2, NULL)) {
186 pr_err("cannot find super block on %s\n", dv);
187 close(nfd);
188 close(fd2);
189 free(st);
190 return 1;
191 }
192 close(fd2);
193 }
194 /* Ok, looks good. Lets update the superblock and write it out to
195 * newdev.
196 */
197
198 info.disk.number = d;
199 info.disk.major = major(stb.st_rdev);
200 info.disk.minor = minor(stb.st_rdev);
201 info.disk.raid_disk = d;
202 info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
203 st->ss->update_super(st, &info, "linear-grow-new", newdev,
204 0, 0, NULL);
205
206 if (st->ss->store_super(st, nfd)) {
207 pr_err("Cannot store new superblock on %s\n",
208 newdev);
209 close(nfd);
210 return 1;
211 }
212 close(nfd);
213
214 if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) {
215 pr_err("Cannot add new disk to this array\n");
216 return 1;
217 }
218 /* Well, that seems to have worked.
219 * Now go through and update all superblocks
220 */
221
222 if (ioctl(fd, GET_ARRAY_INFO, &info.array) < 0) {
223 pr_err("cannot get array info for %s\n", devname);
224 return 1;
225 }
226
227 nd = d;
228 for (d=0 ; d < info.array.raid_disks ; d++) {
229 mdu_disk_info_t disk;
230 char *dv;
231
232 disk.number = d;
233 if (ioctl(fd, GET_DISK_INFO, &disk) < 0) {
234 pr_err("cannot get device detail for device %d\n",
235 d);
236 return 1;
237 }
238 dv = map_dev(disk.major, disk.minor, 1);
239 if (!dv) {
240 pr_err("cannot find device file for device %d\n",
241 d);
242 return 1;
243 }
244 fd2 = dev_open(dv, O_RDWR);
245 if (fd2 < 0) {
246 pr_err("cannot open device file %s\n", dv);
247 return 1;
248 }
249 if (st->ss->load_super(st, fd2, NULL)) {
250 pr_err("cannot find super block on %s\n", dv);
251 close(fd);
252 return 1;
253 }
254 info.array.raid_disks = nd+1;
255 info.array.nr_disks = nd+1;
256 info.array.active_disks = nd+1;
257 info.array.working_disks = nd+1;
258
259 st->ss->update_super(st, &info, "linear-grow-update", dv,
260 0, 0, NULL);
261
262 if (st->ss->store_super(st, fd2)) {
263 pr_err("Cannot store new superblock on %s\n", dv);
264 close(fd2);
265 return 1;
266 }
267 close(fd2);
268 }
269
270 return 0;
271 }
272
273 int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s)
274 {
275 /*
276 * First check that array doesn't have a bitmap
277 * Then create the bitmap
278 * Then add it
279 *
280 * For internal bitmaps, we need to check the version,
281 * find all the active devices, and write the bitmap block
282 * to all devices
283 */
284 mdu_bitmap_file_t bmf;
285 mdu_array_info_t array;
286 struct supertype *st;
287 char *subarray = NULL;
288 int major = BITMAP_MAJOR_HI;
289 int vers = md_get_version(fd);
290 unsigned long long bitmapsize, array_size;
291
292 if (vers < 9003) {
293 major = BITMAP_MAJOR_HOSTENDIAN;
294 pr_err("Warning - bitmaps created on this kernel"
295 " are not portable\n"
296 " between different architectures. Consider upgrading"
297 " the Linux kernel.\n");
298 }
299
300 if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) {
301 if (errno == ENOMEM)
302 pr_err("Memory allocation failure.\n");
303 else
304 pr_err("bitmaps not supported by this kernel.\n");
305 return 1;
306 }
307 if (bmf.pathname[0]) {
308 if (strcmp(s->bitmap_file,"none")==0) {
309 if (ioctl(fd, SET_BITMAP_FILE, -1)!= 0) {
310 pr_err("failed to remove bitmap %s\n",
311 bmf.pathname);
312 return 1;
313 }
314 return 0;
315 }
316 pr_err("%s already has a bitmap (%s)\n",
317 devname, bmf.pathname);
318 return 1;
319 }
320 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
321 pr_err("cannot get array status for %s\n", devname);
322 return 1;
323 }
324 if (array.state & (1<<MD_SB_BITMAP_PRESENT)) {
325 if (strcmp(s->bitmap_file, "none")==0) {
326 array.state &= ~(1<<MD_SB_BITMAP_PRESENT);
327 if (ioctl(fd, SET_ARRAY_INFO, &array)!= 0) {
328 pr_err("failed to remove internal bitmap.\n");
329 return 1;
330 }
331 return 0;
332 }
333 pr_err("Internal bitmap already present on %s\n",
334 devname);
335 return 1;
336 }
337
338 if (strcmp(s->bitmap_file, "none") == 0) {
339 pr_err("no bitmap found on %s\n", devname);
340 return 1;
341 }
342 if (array.level <= 0) {
343 pr_err("Bitmaps not meaningful with level %s\n",
344 map_num(pers, array.level)?:"of this array");
345 return 1;
346 }
347 bitmapsize = array.size;
348 bitmapsize <<= 1;
349 if (get_dev_size(fd, NULL, &array_size) &&
350 array_size > (0x7fffffffULL<<9)) {
351 /* Array is big enough that we cannot trust array.size
352 * try other approaches
353 */
354 bitmapsize = get_component_size(fd);
355 }
356 if (bitmapsize == 0) {
357 pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n");
358 return 1;
359 }
360
361 if (array.level == 10) {
362 int ncopies = (array.layout&255)*((array.layout>>8)&255);
363 bitmapsize = bitmapsize * array.raid_disks / ncopies;
364 }
365
366 st = super_by_fd(fd, &subarray);
367 if (!st) {
368 pr_err("Cannot understand version %d.%d\n",
369 array.major_version, array.minor_version);
370 return 1;
371 }
372 if (subarray) {
373 pr_err("Cannot add bitmaps to sub-arrays yet\n");
374 free(subarray);
375 free(st);
376 return 1;
377 }
378 if (strcmp(s->bitmap_file, "internal") == 0) {
379 int rv;
380 int d;
381 int offset_setable = 0;
382 struct mdinfo *mdi;
383 if (st->ss->add_internal_bitmap == NULL) {
384 pr_err("Internal bitmaps not supported "
385 "with %s metadata\n", st->ss->name);
386 return 1;
387 }
388 mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION);
389 if (mdi)
390 offset_setable = 1;
391 for (d=0; d< st->max_devs; d++) {
392 mdu_disk_info_t disk;
393 char *dv;
394 disk.number = d;
395 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
396 continue;
397 if (disk.major == 0 &&
398 disk.minor == 0)
399 continue;
400 if ((disk.state & (1<<MD_DISK_SYNC))==0)
401 continue;
402 dv = map_dev(disk.major, disk.minor, 1);
403 if (dv) {
404 int fd2 = dev_open(dv, O_RDWR);
405 if (fd2 < 0)
406 continue;
407 if (st->ss->load_super(st, fd2, NULL)==0) {
408 if (st->ss->add_internal_bitmap(
409 st,
410 &s->bitmap_chunk, c->delay, s->write_behind,
411 bitmapsize, offset_setable,
412 major)
413 )
414 st->ss->write_bitmap(st, fd2);
415 else {
416 pr_err("failed to create internal bitmap"
417 " - chunksize problem.\n");
418 close(fd2);
419 return 1;
420 }
421 }
422 close(fd2);
423 }
424 }
425 if (offset_setable) {
426 st->ss->getinfo_super(st, mdi, NULL);
427 sysfs_init(mdi, fd, NULL);
428 rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location",
429 mdi->bitmap_offset);
430 } else {
431 array.state |= (1<<MD_SB_BITMAP_PRESENT);
432 rv = ioctl(fd, SET_ARRAY_INFO, &array);
433 }
434 if (rv < 0) {
435 if (errno == EBUSY)
436 pr_err("Cannot add bitmap while array is"
437 " resyncing or reshaping etc.\n");
438 pr_err("failed to set internal bitmap.\n");
439 return 1;
440 }
441 } else {
442 int uuid[4];
443 int bitmap_fd;
444 int d;
445 int max_devs = st->max_devs;
446
447 /* try to load a superblock */
448 for (d = 0; d < max_devs; d++) {
449 mdu_disk_info_t disk;
450 char *dv;
451 int fd2;
452 disk.number = d;
453 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
454 continue;
455 if ((disk.major==0 && disk.minor==0) ||
456 (disk.state & (1<<MD_DISK_REMOVED)))
457 continue;
458 dv = map_dev(disk.major, disk.minor, 1);
459 if (!dv)
460 continue;
461 fd2 = dev_open(dv, O_RDONLY);
462 if (fd2 >= 0) {
463 if (st->ss->load_super(st, fd2, NULL) == 0) {
464 close(fd2);
465 st->ss->uuid_from_super(st, uuid);
466 break;
467 }
468 close(fd2);
469 }
470 }
471 if (d == max_devs) {
472 pr_err("cannot find UUID for array!\n");
473 return 1;
474 }
475 if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, s->bitmap_chunk,
476 c->delay, s->write_behind, bitmapsize, major)) {
477 return 1;
478 }
479 bitmap_fd = open(s->bitmap_file, O_RDWR);
480 if (bitmap_fd < 0) {
481 pr_err("weird: %s cannot be opened\n",
482 s->bitmap_file);
483 return 1;
484 }
485 if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) {
486 int err = errno;
487 if (errno == EBUSY)
488 pr_err("Cannot add bitmap while array is"
489 " resyncing or reshaping etc.\n");
490 pr_err("Cannot set bitmap file for %s: %s\n",
491 devname, strerror(err));
492 return 1;
493 }
494 }
495
496 return 0;
497 }
498
499 /*
500 * When reshaping an array we might need to backup some data.
501 * This is written to all spares with a 'super_block' describing it.
502 * The superblock goes 4K from the end of the used space on the
503 * device.
504 * It if written after the backup is complete.
505 * It has the following structure.
506 */
507
508 static struct mdp_backup_super {
509 char magic[16]; /* md_backup_data-1 or -2 */
510 __u8 set_uuid[16];
511 __u64 mtime;
512 /* start/sizes in 512byte sectors */
513 __u64 devstart; /* address on backup device/file of data */
514 __u64 arraystart;
515 __u64 length;
516 __u32 sb_csum; /* csum of preceeding bytes. */
517 __u32 pad1;
518 __u64 devstart2; /* offset in to data of second section */
519 __u64 arraystart2;
520 __u64 length2;
521 __u32 sb_csum2; /* csum of preceeding bytes. */
522 __u8 pad[512-68-32];
523 } __attribute__((aligned(512))) bsb, bsb2;
524
525 static __u32 bsb_csum(char *buf, int len)
526 {
527 int i;
528 int csum = 0;
529 for (i = 0; i < len; i++)
530 csum = (csum<<3) + buf[0];
531 return __cpu_to_le32(csum);
532 }
533
534 static int check_idle(struct supertype *st)
535 {
536 /* Check that all member arrays for this container, or the
537 * container of this array, are idle
538 */
539 char *container = (st->container_devnm[0]
540 ? st->container_devnm : st->devnm);
541 struct mdstat_ent *ent, *e;
542 int is_idle = 1;
543
544 ent = mdstat_read(0, 0);
545 for (e = ent ; e; e = e->next) {
546 if (!is_container_member(e, container))
547 continue;
548 if (e->percent >= 0) {
549 is_idle = 0;
550 break;
551 }
552 }
553 free_mdstat(ent);
554 return is_idle;
555 }
556
557 static int freeze_container(struct supertype *st)
558 {
559 char *container = (st->container_devnm[0]
560 ? st->container_devnm : st->devnm);
561
562 if (!check_idle(st))
563 return -1;
564
565 if (block_monitor(container, 1)) {
566 pr_err("failed to freeze container\n");
567 return -2;
568 }
569
570 return 1;
571 }
572
573 static void unfreeze_container(struct supertype *st)
574 {
575 char *container = (st->container_devnm[0]
576 ? st->container_devnm : st->devnm);
577
578 unblock_monitor(container, 1);
579 }
580
581 static int freeze(struct supertype *st)
582 {
583 /* Try to freeze resync/rebuild on this array/container.
584 * Return -1 if the array is busy,
585 * return -2 container cannot be frozen,
586 * return 0 if this kernel doesn't support 'frozen'
587 * return 1 if it worked.
588 */
589 if (st->ss->external)
590 return freeze_container(st);
591 else {
592 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
593 int err;
594 char buf[20];
595
596 if (!sra)
597 return -1;
598 /* Need to clear any 'read-auto' status */
599 if (sysfs_get_str(sra, NULL, "array_state", buf, 20) > 0 &&
600 strncmp(buf, "read-auto", 9) == 0)
601 sysfs_set_str(sra, NULL, "array_state", "clean");
602
603 err = sysfs_freeze_array(sra);
604 sysfs_free(sra);
605 return err;
606 }
607 }
608
609 static void unfreeze(struct supertype *st)
610 {
611 if (st->ss->external)
612 return unfreeze_container(st);
613 else {
614 struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION);
615
616 if (sra)
617 sysfs_set_str(sra, NULL, "sync_action", "idle");
618 sysfs_free(sra);
619 }
620 }
621
622 static void wait_reshape(struct mdinfo *sra)
623 {
624 int fd = sysfs_get_fd(sra, NULL, "sync_action");
625 char action[20];
626
627 if (fd < 0)
628 return;
629
630 while (sysfs_fd_get_str(fd, action, 20) > 0 &&
631 strncmp(action, "reshape", 7) == 0)
632 sysfs_wait(fd, NULL);
633 close(fd);
634 }
635
636 static int reshape_super(struct supertype *st, unsigned long long size,
637 int level, int layout, int chunksize, int raid_disks,
638 int delta_disks, char *backup_file, char *dev,
639 int direction, int verbose)
640 {
641 /* nothing extra to check in the native case */
642 if (!st->ss->external)
643 return 0;
644 if (!st->ss->reshape_super ||
645 !st->ss->manage_reshape) {
646 pr_err("%s metadata does not support reshape\n",
647 st->ss->name);
648 return 1;
649 }
650
651 return st->ss->reshape_super(st, size, level, layout, chunksize,
652 raid_disks, delta_disks, backup_file, dev,
653 direction, verbose);
654 }
655
656 static void sync_metadata(struct supertype *st)
657 {
658 if (st->ss->external) {
659 if (st->update_tail) {
660 flush_metadata_updates(st);
661 st->update_tail = &st->updates;
662 } else
663 st->ss->sync_metadata(st);
664 }
665 }
666
667 static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n)
668 {
669 /* when dealing with external metadata subarrays we need to be
670 * prepared to handle EAGAIN. The kernel may need to wait for
671 * mdmon to mark the array active so the kernel can handle
672 * allocations/writeback when preparing the reshape action
673 * (md_allow_write()). We temporarily disable safe_mode_delay
674 * to close a race with the array_state going clean before the
675 * next write to raid_disks / stripe_cache_size
676 */
677 char safe[50];
678 int rc;
679
680 /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */
681 if (!container ||
682 (strcmp(name, "raid_disks") != 0 &&
683 strcmp(name, "stripe_cache_size") != 0))
684 return sysfs_set_num(sra, NULL, name, n);
685
686 rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe));
687 if (rc <= 0)
688 return -1;
689 sysfs_set_num(sra, NULL, "safe_mode_delay", 0);
690 rc = sysfs_set_num(sra, NULL, name, n);
691 if (rc < 0 && errno == EAGAIN) {
692 ping_monitor(container);
693 /* if we get EAGAIN here then the monitor is not active
694 * so stop trying
695 */
696 rc = sysfs_set_num(sra, NULL, name, n);
697 }
698 sysfs_set_str(sra, NULL, "safe_mode_delay", safe);
699 return rc;
700 }
701
702 int start_reshape(struct mdinfo *sra, int already_running,
703 int before_data_disks, int data_disks)
704 {
705 int err;
706 unsigned long long sync_max_to_set;
707
708 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
709 err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress);
710 err = err ?: sysfs_set_num(sra, NULL, "suspend_lo",
711 sra->reshape_progress);
712 if (before_data_disks <= data_disks)
713 sync_max_to_set = sra->reshape_progress / data_disks;
714 else
715 sync_max_to_set = (sra->component_size * data_disks
716 - sra->reshape_progress) / data_disks;
717 if (!already_running)
718 sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set);
719 err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set);
720 if (!already_running)
721 err = err ?: sysfs_set_str(sra, NULL, "sync_action", "reshape");
722
723 return err;
724 }
725
726 void abort_reshape(struct mdinfo *sra)
727 {
728 sysfs_set_str(sra, NULL, "sync_action", "idle");
729 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
730 sysfs_set_num(sra, NULL, "suspend_hi", 0);
731 sysfs_set_num(sra, NULL, "suspend_lo", 0);
732 sysfs_set_num(sra, NULL, "sync_min", 0);
733 // It isn't safe to reset sync_max as we aren't monitoring.
734 // Array really should be stopped at this point.
735 }
736
737 int remove_disks_for_takeover(struct supertype *st,
738 struct mdinfo *sra,
739 int layout)
740 {
741 int nr_of_copies;
742 struct mdinfo *remaining;
743 int slot;
744
745 if (sra->array.level == 10)
746 nr_of_copies = layout & 0xff;
747 else if (sra->array.level == 1)
748 nr_of_copies = sra->array.raid_disks;
749 else
750 return 1;
751
752 remaining = sra->devs;
753 sra->devs = NULL;
754 /* for each 'copy', select one device and remove from the list. */
755 for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) {
756 struct mdinfo **diskp;
757 int found = 0;
758
759 /* Find a working device to keep */
760 for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) {
761 struct mdinfo *disk = *diskp;
762
763 if (disk->disk.raid_disk < slot)
764 continue;
765 if (disk->disk.raid_disk >= slot + nr_of_copies)
766 continue;
767 if (disk->disk.state & (1<<MD_DISK_REMOVED))
768 continue;
769 if (disk->disk.state & (1<<MD_DISK_FAULTY))
770 continue;
771 if (!(disk->disk.state & (1<<MD_DISK_SYNC)))
772 continue;
773
774 /* We have found a good disk to use! */
775 *diskp = disk->next;
776 disk->next = sra->devs;
777 sra->devs = disk;
778 found = 1;
779 break;
780 }
781 if (!found)
782 break;
783 }
784
785 if (slot < sra->array.raid_disks) {
786 /* didn't find all slots */
787 struct mdinfo **e;
788 e = &remaining;
789 while (*e)
790 e = &(*e)->next;
791 *e = sra->devs;
792 sra->devs = remaining;
793 return 1;
794 }
795
796 /* Remove all 'remaining' devices from the array */
797 while (remaining) {
798 struct mdinfo *sd = remaining;
799 remaining = sd->next;
800
801 sysfs_set_str(sra, sd, "state", "faulty");
802 sysfs_set_str(sra, sd, "slot", "none");
803 /* for external metadata disks should be removed in mdmon */
804 if (!st->ss->external)
805 sysfs_set_str(sra, sd, "state", "remove");
806 sd->disk.state |= (1<<MD_DISK_REMOVED);
807 sd->disk.state &= ~(1<<MD_DISK_SYNC);
808 sd->next = sra->devs;
809 sra->devs = sd;
810 }
811 return 0;
812 }
813
814 void reshape_free_fdlist(int *fdlist,
815 unsigned long long *offsets,
816 int size)
817 {
818 int i;
819
820 for (i = 0; i < size; i++)
821 if (fdlist[i] >= 0)
822 close(fdlist[i]);
823
824 free(fdlist);
825 free(offsets);
826 }
827
828 int reshape_prepare_fdlist(char *devname,
829 struct mdinfo *sra,
830 int raid_disks,
831 int nrdisks,
832 unsigned long blocks,
833 char *backup_file,
834 int *fdlist,
835 unsigned long long *offsets)
836 {
837 int d = 0;
838 struct mdinfo *sd;
839
840 enable_fds(nrdisks);
841 for (d = 0; d <= nrdisks; d++)
842 fdlist[d] = -1;
843 d = raid_disks;
844 for (sd = sra->devs; sd; sd = sd->next) {
845 if (sd->disk.state & (1<<MD_DISK_FAULTY))
846 continue;
847 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
848 char *dn = map_dev(sd->disk.major,
849 sd->disk.minor, 1);
850 fdlist[sd->disk.raid_disk]
851 = dev_open(dn, O_RDONLY);
852 offsets[sd->disk.raid_disk] = sd->data_offset*512;
853 if (fdlist[sd->disk.raid_disk] < 0) {
854 pr_err("%s: cannot open component %s\n",
855 devname, dn ? dn : "-unknown-");
856 d = -1;
857 goto release;
858 }
859 } else if (backup_file == NULL) {
860 /* spare */
861 char *dn = map_dev(sd->disk.major,
862 sd->disk.minor, 1);
863 fdlist[d] = dev_open(dn, O_RDWR);
864 offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512;
865 if (fdlist[d] < 0) {
866 pr_err("%s: cannot open component %s\n",
867 devname, dn ? dn : "-unknown-");
868 d = -1;
869 goto release;
870 }
871 d++;
872 }
873 }
874 release:
875 return d;
876 }
877
878 int reshape_open_backup_file(char *backup_file,
879 int fd,
880 char *devname,
881 long blocks,
882 int *fdlist,
883 unsigned long long *offsets,
884 int restart)
885 {
886 /* Return 1 on success, 0 on any form of failure */
887 /* need to check backup file is large enough */
888 char buf[512];
889 struct stat stb;
890 unsigned int dev;
891 int i;
892
893 *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL),
894 S_IRUSR | S_IWUSR);
895 *offsets = 8 * 512;
896 if (*fdlist < 0) {
897 pr_err("%s: cannot create backup file %s: %s\n",
898 devname, backup_file, strerror(errno));
899 return 0;
900 }
901 /* Guard against backup file being on array device.
902 * If array is partitioned or if LVM etc is in the
903 * way this will not notice, but it is better than
904 * nothing.
905 */
906 fstat(*fdlist, &stb);
907 dev = stb.st_dev;
908 fstat(fd, &stb);
909 if (stb.st_rdev == dev) {
910 pr_err("backup file must NOT be"
911 " on the array being reshaped.\n");
912 close(*fdlist);
913 return 0;
914 }
915
916 memset(buf, 0, 512);
917 for (i=0; i < blocks + 8 ; i++) {
918 if (write(*fdlist, buf, 512) != 512) {
919 pr_err("%s: cannot create"
920 " backup file %s: %s\n",
921 devname, backup_file, strerror(errno));
922 return 0;
923 }
924 }
925 if (fsync(*fdlist) != 0) {
926 pr_err("%s: cannot create backup file %s: %s\n",
927 devname, backup_file, strerror(errno));
928 return 0;
929 }
930
931 return 1;
932 }
933
934 unsigned long compute_backup_blocks(int nchunk, int ochunk,
935 unsigned int ndata, unsigned int odata)
936 {
937 unsigned long a, b, blocks;
938 /* So how much do we need to backup.
939 * We need an amount of data which is both a whole number of
940 * old stripes and a whole number of new stripes.
941 * So LCM for (chunksize*datadisks).
942 */
943 a = (ochunk/512) * odata;
944 b = (nchunk/512) * ndata;
945 /* Find GCD */
946 a = GCD(a, b);
947 /* LCM == product / GCD */
948 blocks = (ochunk/512) * (nchunk/512) * odata * ndata / a;
949
950 return blocks;
951 }
952
953 char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re)
954 {
955 /* Based on the current array state in info->array and
956 * the changes in info->new_* etc, determine:
957 * - whether the change is possible
958 * - Intermediate level/raid_disks/layout
959 * - whether a restriping reshape is needed
960 * - number of sectors in minimum change unit. This
961 * will cover a whole number of stripes in 'before' and
962 * 'after'.
963 *
964 * Return message if the change should be rejected
965 * NULL if the change can be achieved
966 *
967 * This can be called as part of starting a reshape, or
968 * when assembling an array that is undergoing reshape.
969 */
970 int near, far, offset, copies;
971 int new_disks;
972 int old_chunk, new_chunk;
973 /* delta_parity records change in number of devices
974 * caused by level change
975 */
976 int delta_parity = 0;
977
978 memset(re, 0, sizeof(*re));
979
980 /* If a new level not explicitly given, we assume no-change */
981 if (info->new_level == UnSet)
982 info->new_level = info->array.level;
983
984 if (info->new_chunk)
985 switch (info->new_level) {
986 case 0:
987 case 4:
988 case 5:
989 case 6:
990 case 10:
991 /* chunk size is meaningful, must divide component_size
992 * evenly
993 */
994 if (info->component_size % (info->new_chunk/512)) {
995 unsigned long long shrink = info->component_size;
996 shrink &= ~(unsigned long long)(info->new_chunk/512-1);
997 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n",
998 info->new_chunk/1024, info->component_size/2);
999 pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n",
1000 devname, shrink/2);
1001 pr_err("will shrink the array so the given chunk size would work.\n");
1002 return "";
1003 }
1004 break;
1005 default:
1006 return "chunk size not meaningful for this level";
1007 }
1008 else
1009 info->new_chunk = info->array.chunk_size;
1010
1011 switch (info->array.level) {
1012 default:
1013 return "Cannot understand this RAID level";
1014 case 1:
1015 /* RAID1 can convert to RAID1 with different disks, or
1016 * raid5 with 2 disks, or
1017 * raid0 with 1 disk
1018 */
1019 if (info->new_level > 1 &&
1020 (info->component_size & 7))
1021 return "Cannot convert RAID1 of this size - "
1022 "reduce size to multiple of 4K first.";
1023 if (info->new_level == 0) {
1024 if (info->delta_disks != UnSet &&
1025 info->delta_disks != 0)
1026 return "Cannot change number of disks "
1027 "with RAID1->RAID0 conversion";
1028 re->level = 0;
1029 re->before.data_disks = 1;
1030 re->after.data_disks = 1;
1031 return NULL;
1032 }
1033 if (info->new_level == 1) {
1034 if (info->delta_disks == UnSet)
1035 /* Don't know what to do */
1036 return "no change requested for Growing RAID1";
1037 re->level = 1;
1038 return NULL;
1039 }
1040 if (info->array.raid_disks == 2 &&
1041 info->new_level == 5) {
1042
1043 re->level = 5;
1044 re->before.data_disks = 1;
1045 if (info->delta_disks != UnSet &&
1046 info->delta_disks != 0)
1047 re->after.data_disks = 1 + info->delta_disks;
1048 else
1049 re->after.data_disks = 1;
1050 if (re->after.data_disks < 1)
1051 return "Number of disks too small for RAID5";
1052
1053 re->before.layout = ALGORITHM_LEFT_SYMMETRIC;
1054 info->array.chunk_size = 65536;
1055 break;
1056 }
1057 /* Could do some multi-stage conversions, but leave that to
1058 * later.
1059 */
1060 return "Impossibly level change request for RAID1";
1061
1062 case 10:
1063 /* RAID10 can be converted from near mode to
1064 * RAID0 by removing some devices.
1065 * It can also be reshaped if the kernel supports
1066 * new_data_offset.
1067 */
1068 switch (info->new_level) {
1069 case 0:
1070 if ((info->array.layout & ~0xff) != 0x100)
1071 return "Cannot Grow RAID10 with far/offset layout";
1072 /* number of devices must be multiple of number of copies */
1073 if (info->array.raid_disks % (info->array.layout & 0xff))
1074 return "RAID10 layout too complex for Grow operation";
1075
1076 new_disks = (info->array.raid_disks
1077 / (info->array.layout & 0xff));
1078 if (info->delta_disks == UnSet)
1079 info->delta_disks = (new_disks
1080 - info->array.raid_disks);
1081
1082 if (info->delta_disks != new_disks - info->array.raid_disks)
1083 return "New number of raid-devices impossible for RAID10";
1084 if (info->new_chunk &&
1085 info->new_chunk != info->array.chunk_size)
1086 return "Cannot change chunk-size with RAID10 Grow";
1087
1088 /* looks good */
1089 re->level = 0;
1090 re->before.data_disks = new_disks;
1091 re->after.data_disks = re->before.data_disks;
1092 return NULL;
1093
1094 case 10:
1095 near = info->array.layout & 0xff;
1096 far = (info->array.layout >> 8) & 0xff;
1097 offset = info->array.layout & 0x10000;
1098 if (far > 1 && !offset)
1099 return "Cannot reshape RAID10 in far-mode";
1100 copies = near * far;
1101
1102 old_chunk = info->array.chunk_size * far;
1103
1104 if (info->new_layout == UnSet)
1105 info->new_layout = info->array.layout;
1106 else {
1107 near = info->new_layout & 0xff;
1108 far = (info->new_layout >> 8) & 0xff;
1109 offset = info->new_layout & 0x10000;
1110 if (far > 1 && !offset)
1111 return "Cannot reshape RAID10 to far-mode";
1112 if (near * far != copies)
1113 return "Cannot change number of copies"
1114 " when reshaping RAID10";
1115 }
1116 if (info->delta_disks == UnSet)
1117 info->delta_disks = 0;
1118 new_disks = (info->array.raid_disks +
1119 info->delta_disks);
1120
1121 new_chunk = info->new_chunk * far;
1122
1123 re->level = 10;
1124 re->before.layout = info->array.layout;
1125 re->before.data_disks = info->array.raid_disks;
1126 re->after.layout = info->new_layout;
1127 re->after.data_disks = new_disks;
1128 /* For RAID10 we don't do backup but do allow reshape,
1129 * so set backup_blocks to INVALID_SECTORS rather than
1130 * zero.
1131 * And there is no need to synchronise stripes on both
1132 * 'old' and 'new'. So the important
1133 * number is the minimum data_offset difference
1134 * which is the larger of (offset copies * chunk).
1135 */
1136 re->backup_blocks = INVALID_SECTORS;
1137 re->min_offset_change = max(old_chunk, new_chunk) / 512;
1138 if (new_disks < re->before.data_disks &&
1139 info->space_after < re->min_offset_change)
1140 /* Reduce component size by one chunk */
1141 re->new_size = (info->component_size -
1142 re->min_offset_change);
1143 else
1144 re->new_size = info->component_size;
1145 re->new_size = re->new_size * new_disks / copies;
1146 return NULL;
1147
1148 default:
1149 return "RAID10 can only be changed to RAID0";
1150 }
1151 case 0:
1152 /* RAID0 can be converted to RAID10, or to RAID456 */
1153 if (info->new_level == 10) {
1154 if (info->new_layout == UnSet && info->delta_disks == UnSet) {
1155 /* Assume near=2 layout */
1156 info->new_layout = 0x102;
1157 info->delta_disks = info->array.raid_disks;
1158 }
1159 if (info->new_layout == UnSet) {
1160 int copies = 1 + (info->delta_disks
1161 / info->array.raid_disks);
1162 if (info->array.raid_disks * (copies-1)
1163 != info->delta_disks)
1164 return "Impossible number of devices"
1165 " for RAID0->RAID10";
1166 info->new_layout = 0x100 + copies;
1167 }
1168 if (info->delta_disks == UnSet) {
1169 int copies = info->new_layout & 0xff;
1170 if (info->new_layout != 0x100 + copies)
1171 return "New layout impossible"
1172 " for RAID0->RAID10";;
1173 info->delta_disks = (copies - 1) *
1174 info->array.raid_disks;
1175 }
1176 if (info->new_chunk &&
1177 info->new_chunk != info->array.chunk_size)
1178 return "Cannot change chunk-size with RAID0->RAID10";
1179 /* looks good */
1180 re->level = 10;
1181 re->before.data_disks = (info->array.raid_disks +
1182 info->delta_disks);
1183 re->after.data_disks = re->before.data_disks;
1184 re->before.layout = info->new_layout;
1185 return NULL;
1186 }
1187
1188 /* RAID0 can also covert to RAID0/4/5/6 by first converting to
1189 * a raid4 style layout of the final level.
1190 */
1191 switch (info->new_level) {
1192 case 4:
1193 delta_parity = 1;
1194 case 0:
1195 re->level = 4;
1196 re->before.layout = 0;
1197 break;
1198 case 5:
1199 delta_parity = 1;
1200 re->level = 5;
1201 re->before.layout = ALGORITHM_PARITY_N;
1202 if (info->new_layout == UnSet)
1203 info->new_layout = map_name(r5layout, "default");
1204 break;
1205 case 6:
1206 delta_parity = 2;
1207 re->level = 6;
1208 re->before.layout = ALGORITHM_PARITY_N;
1209 if (info->new_layout == UnSet)
1210 info->new_layout = map_name(r6layout, "default");
1211 break;
1212 default:
1213 return "Impossible level change requested";
1214 }
1215 re->before.data_disks = info->array.raid_disks;
1216 /* determining 'after' layout happens outside this 'switch' */
1217 break;
1218
1219 case 4:
1220 info->array.layout = ALGORITHM_PARITY_N;
1221 case 5:
1222 switch (info->new_level) {
1223 case 0:
1224 delta_parity = -1;
1225 case 4:
1226 re->level = info->array.level;
1227 re->before.data_disks = info->array.raid_disks - 1;
1228 re->before.layout = info->array.layout;
1229 break;
1230 case 5:
1231 re->level = 5;
1232 re->before.data_disks = info->array.raid_disks - 1;
1233 re->before.layout = info->array.layout;
1234 break;
1235 case 6:
1236 delta_parity = 1;
1237 re->level = 6;
1238 re->before.data_disks = info->array.raid_disks - 1;
1239 switch (info->array.layout) {
1240 case ALGORITHM_LEFT_ASYMMETRIC:
1241 re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6;
1242 break;
1243 case ALGORITHM_RIGHT_ASYMMETRIC:
1244 re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
1245 break;
1246 case ALGORITHM_LEFT_SYMMETRIC:
1247 re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6;
1248 break;
1249 case ALGORITHM_RIGHT_SYMMETRIC:
1250 re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6;
1251 break;
1252 case ALGORITHM_PARITY_0:
1253 re->before.layout = ALGORITHM_PARITY_0_6;
1254 break;
1255 case ALGORITHM_PARITY_N:
1256 re->before.layout = ALGORITHM_PARITY_N_6;
1257 break;
1258 default:
1259 return "Cannot convert an array with this layout";
1260 }
1261 break;
1262 case 1:
1263 if (info->array.raid_disks != 2)
1264 return "Can only convert a 2-device array to RAID1";
1265 if (info->delta_disks != UnSet &&
1266 info->delta_disks != 0)
1267 return "Cannot set raid_disk when "
1268 "converting RAID5->RAID1";
1269 re->level = 1;
1270 info->new_chunk = 0;
1271 return NULL;
1272 default:
1273 return "Impossible level change requested";
1274 }
1275 break;
1276 case 6:
1277 switch (info->new_level) {
1278 case 4:
1279 case 5:
1280 delta_parity = -1;
1281 case 6:
1282 re->level = 6;
1283 re->before.data_disks = info->array.raid_disks - 2;
1284 re->before.layout = info->array.layout;
1285 break;
1286 default:
1287 return "Impossible level change requested";
1288 }
1289 break;
1290 }
1291
1292 /* If we reached here then it looks like a re-stripe is
1293 * happening. We have determined the intermediate level
1294 * and initial raid_disks/layout and stored these in 're'.
1295 *
1296 * We need to deduce the final layout that can be atomically
1297 * converted to the end state.
1298 */
1299 switch (info->new_level) {
1300 case 0:
1301 /* We can only get to RAID0 from RAID4 or RAID5
1302 * with appropriate layout and one extra device
1303 */
1304 if (re->level != 4 && re->level != 5)
1305 return "Cannot covert to RAID0 from this level";
1306
1307 switch (re->level) {
1308 case 4:
1309 re->before.layout = 0;
1310 re->after.layout = 0;
1311 break;
1312 case 5:
1313 re->after.layout = ALGORITHM_PARITY_N;
1314 break;
1315 }
1316 break;
1317
1318 case 4:
1319 /* We can only get to RAID4 from RAID5 */
1320 if (re->level != 4 && re->level != 5)
1321 return "Cannot convert to RAID4 from this level";
1322
1323 switch (re->level) {
1324 case 4:
1325 re->before.layout = 0;
1326 re->after.layout = 0;
1327 break;
1328 case 5:
1329 re->after.layout = ALGORITHM_PARITY_N;
1330 break;
1331 }
1332 break;
1333
1334 case 5:
1335 /* We get to RAID5 from RAID5 or RAID6 */
1336 if (re->level != 5 && re->level != 6)
1337 return "Cannot convert to RAID5 from this level";
1338
1339 switch (re->level) {
1340 case 5:
1341 if (info->new_layout == UnSet)
1342 re->after.layout = re->before.layout;
1343 else
1344 re->after.layout = info->new_layout;
1345 break;
1346 case 6:
1347 if (info->new_layout == UnSet)
1348 info->new_layout = re->before.layout;
1349
1350 /* after.layout needs to be raid6 version of new_layout */
1351 if (info->new_layout == ALGORITHM_PARITY_N)
1352 re->after.layout = ALGORITHM_PARITY_N;
1353 else {
1354 char layout[40];
1355 char *ls = map_num(r5layout, info->new_layout);
1356 int l;
1357 if (ls) {
1358 /* Current RAID6 layout has a RAID5
1359 * equivalent - good
1360 */
1361 strcat(strcpy(layout, ls), "-6");
1362 l = map_name(r6layout, layout);
1363 if (l == UnSet)
1364 return "Cannot find RAID6 layout"
1365 " to convert to";
1366 } else {
1367 /* Current RAID6 has no equivalent.
1368 * If it is already a '-6' layout we
1369 * can leave it unchanged, else we must
1370 * fail
1371 */
1372 ls = map_num(r6layout, info->new_layout);
1373 if (!ls ||
1374 strcmp(ls+strlen(ls)-2, "-6") != 0)
1375 return "Please specify new layout";
1376 l = info->new_layout;
1377 }
1378 re->after.layout = l;
1379 }
1380 }
1381 break;
1382
1383 case 6:
1384 /* We must already be at level 6 */
1385 if (re->level != 6)
1386 return "Impossible level change";
1387 if (info->new_layout == UnSet)
1388 re->after.layout = info->array.layout;
1389 else
1390 re->after.layout = info->new_layout;
1391 break;
1392 default:
1393 return "Impossible level change requested";
1394 }
1395 if (info->delta_disks == UnSet)
1396 info->delta_disks = delta_parity;
1397
1398 re->after.data_disks = (re->before.data_disks
1399 + info->delta_disks
1400 - delta_parity);
1401 switch (re->level) {
1402 case 6: re->parity = 2;
1403 break;
1404 case 4:
1405 case 5: re->parity = 1;
1406 break;
1407 default: re->parity = 0;
1408 break;
1409 }
1410 /* So we have a restripe operation, we need to calculate the number
1411 * of blocks per reshape operation.
1412 */
1413 re->new_size = info->component_size * re->before.data_disks;
1414 if (info->new_chunk == 0)
1415 info->new_chunk = info->array.chunk_size;
1416 if (re->after.data_disks == re->before.data_disks &&
1417 re->after.layout == re->before.layout &&
1418 info->new_chunk == info->array.chunk_size) {
1419 /* Nothing to change, can change level immediately. */
1420 re->level = info->new_level;
1421 re->backup_blocks = 0;
1422 return NULL;
1423 }
1424 if (re->after.data_disks == 1 && re->before.data_disks == 1) {
1425 /* chunk and layout changes make no difference */
1426 re->level = info->new_level;
1427 re->backup_blocks = 0;
1428 return NULL;
1429 }
1430
1431 if (re->after.data_disks == re->before.data_disks &&
1432 get_linux_version() < 2006032)
1433 return "in-place reshape is not safe before 2.6.32 - sorry.";
1434
1435 if (re->after.data_disks < re->before.data_disks &&
1436 get_linux_version() < 2006030)
1437 return "reshape to fewer devices is not supported before 2.6.30 - sorry.";
1438
1439 re->backup_blocks = compute_backup_blocks(
1440 info->new_chunk, info->array.chunk_size,
1441 re->after.data_disks,
1442 re->before.data_disks);
1443 re->min_offset_change = re->backup_blocks / re->before.data_disks;
1444
1445 re->new_size = info->component_size * re->after.data_disks;
1446 return NULL;
1447 }
1448
1449 static int set_array_size(struct supertype *st, struct mdinfo *sra,
1450 char *text_version)
1451 {
1452 struct mdinfo *info;
1453 char *subarray;
1454 int ret_val = -1;
1455
1456 if ((st == NULL) || (sra == NULL))
1457 return ret_val;
1458
1459 if (text_version == NULL)
1460 text_version = sra->text_version;
1461 subarray = strchr(text_version+1, '/')+1;
1462 info = st->ss->container_content(st, subarray);
1463 if (info) {
1464 unsigned long long current_size = 0;
1465 unsigned long long new_size =
1466 info->custom_array_size/2;
1467
1468 if (sysfs_get_ll(sra, NULL, "array_size", &current_size) == 0 &&
1469 new_size > current_size) {
1470 if (sysfs_set_num(sra, NULL, "array_size", new_size)
1471 < 0)
1472 dprintf("Error: Cannot set array size");
1473 else {
1474 ret_val = 0;
1475 dprintf("Array size changed");
1476 }
1477 dprintf(" from %llu to %llu.\n",
1478 current_size, new_size);
1479 }
1480 sysfs_free(info);
1481 } else
1482 dprintf("Error: set_array_size(): info pointer in NULL\n");
1483
1484 return ret_val;
1485 }
1486
1487 static int reshape_array(char *container, int fd, char *devname,
1488 struct supertype *st, struct mdinfo *info,
1489 int force, struct mddev_dev *devlist,
1490 unsigned long long data_offset,
1491 char *backup_file, int verbose, int forked,
1492 int restart, int freeze_reshape);
1493 static int reshape_container(char *container, char *devname,
1494 int mdfd,
1495 struct supertype *st,
1496 struct mdinfo *info,
1497 int force,
1498 char *backup_file,
1499 int verbose, int restart, int freeze_reshape);
1500
1501 int Grow_reshape(char *devname, int fd,
1502 struct mddev_dev *devlist,
1503 unsigned long long data_offset,
1504 struct context *c, struct shape *s)
1505 {
1506 /* Make some changes in the shape of an array.
1507 * The kernel must support the change.
1508 *
1509 * There are three different changes. Each can trigger
1510 * a resync or recovery so we freeze that until we have
1511 * requested everything (if kernel supports freezing - 2.6.30).
1512 * The steps are:
1513 * - change size (i.e. component_size)
1514 * - change level
1515 * - change layout/chunksize/ndisks
1516 *
1517 * The last can require a reshape. It is different on different
1518 * levels so we need to check the level before actioning it.
1519 * Some times the level change needs to be requested after the
1520 * reshape (e.g. raid6->raid5, raid5->raid0)
1521 *
1522 */
1523 struct mdu_array_info_s array;
1524 int rv = 0;
1525 struct supertype *st;
1526 char *subarray = NULL;
1527
1528 int frozen;
1529 int changed = 0;
1530 char *container = NULL;
1531 int cfd = -1;
1532
1533 struct mddev_dev *dv;
1534 int added_disks;
1535
1536 struct mdinfo info;
1537 struct mdinfo *sra;
1538
1539 if (ioctl(fd, GET_ARRAY_INFO, &array) < 0) {
1540 pr_err("%s is not an active md array - aborting\n",
1541 devname);
1542 return 1;
1543 }
1544 if (data_offset != INVALID_SECTORS && array.level != 10
1545 && (array.level < 4 || array.level > 6)) {
1546 pr_err("--grow --data-offset not yet supported\n");
1547 return 1;
1548 }
1549
1550 if (s->size > 0 &&
1551 (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) {
1552 pr_err("cannot change component size at the same time "
1553 "as other changes.\n"
1554 " Change size first, then check data is intact before "
1555 "making other changes.\n");
1556 return 1;
1557 }
1558
1559 if (s->raiddisks && s->raiddisks < array.raid_disks && array.level > 1 &&
1560 get_linux_version() < 2006032 &&
1561 !check_env("MDADM_FORCE_FEWER")) {
1562 pr_err("reducing the number of devices is not safe before Linux 2.6.32\n"
1563 " Please use a newer kernel\n");
1564 return 1;
1565 }
1566
1567 st = super_by_fd(fd, &subarray);
1568 if (!st) {
1569 pr_err("Unable to determine metadata format for %s\n", devname);
1570 return 1;
1571 }
1572 if (s->raiddisks > st->max_devs) {
1573 pr_err("Cannot increase raid-disks on this array"
1574 " beyond %d\n", st->max_devs);
1575 return 1;
1576 }
1577
1578 /* in the external case we need to check that the requested reshape is
1579 * supported, and perform an initial check that the container holds the
1580 * pre-requisite spare devices (mdmon owns final validation)
1581 */
1582 if (st->ss->external) {
1583 int rv;
1584
1585 if (subarray) {
1586 container = st->container_devnm;
1587 cfd = open_dev_excl(st->container_devnm);
1588 } else {
1589 container = st->devnm;
1590 close(fd);
1591 cfd = open_dev_excl(st->devnm);
1592 fd = cfd;
1593 }
1594 if (cfd < 0) {
1595 pr_err("Unable to open container for %s\n",
1596 devname);
1597 free(subarray);
1598 return 1;
1599 }
1600
1601 rv = st->ss->load_container(st, cfd, NULL);
1602
1603 if (rv) {
1604 pr_err("Cannot read superblock for %s\n",
1605 devname);
1606 free(subarray);
1607 return 1;
1608 }
1609
1610 /* check if operation is supported for metadata handler */
1611 if (st->ss->container_content) {
1612 struct mdinfo *cc = NULL;
1613 struct mdinfo *content = NULL;
1614
1615 cc = st->ss->container_content(st, subarray);
1616 for (content = cc; content ; content = content->next) {
1617 int allow_reshape = 1;
1618
1619 /* check if reshape is allowed based on metadata
1620 * indications stored in content.array.status
1621 */
1622 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
1623 allow_reshape = 0;
1624 if (content->array.state
1625 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))
1626 allow_reshape = 0;
1627 if (!allow_reshape) {
1628 pr_err("cannot reshape arrays in"
1629 " container with unsupported"
1630 " metadata: %s(%s)\n",
1631 devname, container);
1632 sysfs_free(cc);
1633 free(subarray);
1634 return 1;
1635 }
1636 }
1637 sysfs_free(cc);
1638 }
1639 if (mdmon_running(container))
1640 st->update_tail = &st->updates;
1641 }
1642
1643 added_disks = 0;
1644 for (dv = devlist; dv; dv = dv->next)
1645 added_disks++;
1646 if (s->raiddisks > array.raid_disks &&
1647 array.spare_disks +added_disks < (s->raiddisks - array.raid_disks) &&
1648 !c->force) {
1649 pr_err("Need %d spare%s to avoid degraded array,"
1650 " and only have %d.\n"
1651 " Use --force to over-ride this check.\n",
1652 s->raiddisks - array.raid_disks,
1653 s->raiddisks - array.raid_disks == 1 ? "" : "s",
1654 array.spare_disks + added_disks);
1655 return 1;
1656 }
1657
1658 sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS
1659 | GET_STATE | GET_VERSION);
1660 if (sra) {
1661 if (st->ss->external && subarray == NULL) {
1662 array.level = LEVEL_CONTAINER;
1663 sra->array.level = LEVEL_CONTAINER;
1664 }
1665 } else {
1666 pr_err("failed to read sysfs parameters for %s\n",
1667 devname);
1668 return 1;
1669 }
1670 frozen = freeze(st);
1671 if (frozen < -1) {
1672 /* freeze() already spewed the reason */
1673 sysfs_free(sra);
1674 return 1;
1675 } else if (frozen < 0) {
1676 pr_err("%s is performing resync/recovery and cannot"
1677 " be reshaped\n", devname);
1678 sysfs_free(sra);
1679 return 1;
1680 }
1681
1682 /* ========= set size =============== */
1683 if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) {
1684 unsigned long long orig_size = get_component_size(fd)/2;
1685 unsigned long long min_csize;
1686 struct mdinfo *mdi;
1687 int raid0_takeover = 0;
1688
1689 if (orig_size == 0)
1690 orig_size = (unsigned) array.size;
1691
1692 if (orig_size == 0) {
1693 pr_err("Cannot set device size in this type of array.\n");
1694 rv = 1;
1695 goto release;
1696 }
1697
1698 if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL,
1699 devname, APPLY_METADATA_CHANGES, c->verbose > 0)) {
1700 rv = 1;
1701 goto release;
1702 }
1703 sync_metadata(st);
1704 if (st->ss->external) {
1705 /* metadata can have size limitation
1706 * update size value according to metadata information
1707 */
1708 struct mdinfo *sizeinfo =
1709 st->ss->container_content(st, subarray);
1710 if (sizeinfo) {
1711 unsigned long long new_size =
1712 sizeinfo->custom_array_size/2;
1713 int data_disks = get_data_disks(
1714 sizeinfo->array.level,
1715 sizeinfo->array.layout,
1716 sizeinfo->array.raid_disks);
1717 new_size /= data_disks;
1718 dprintf("Metadata size correction from %llu to "
1719 "%llu (%llu)\n", orig_size, new_size,
1720 new_size * data_disks);
1721 s->size = new_size;
1722 sysfs_free(sizeinfo);
1723 }
1724 }
1725
1726 /* Update the size of each member device in case
1727 * they have been resized. This will never reduce
1728 * below the current used-size. The "size" attribute
1729 * understands '0' to mean 'max'.
1730 */
1731 min_csize = 0;
1732 rv = 0;
1733 for (mdi = sra->devs; mdi; mdi = mdi->next) {
1734 if (sysfs_set_num(sra, mdi, "size",
1735 s->size == MAX_SIZE ? 0 : s->size) < 0) {
1736 /* Probably kernel refusing to let us
1737 * reduce the size - not an error.
1738 */
1739 break;
1740 }
1741 if (array.not_persistent == 0 &&
1742 array.major_version == 0 &&
1743 get_linux_version() < 3001000) {
1744 /* Dangerous to allow size to exceed 2TB */
1745 unsigned long long csize;
1746 if (sysfs_get_ll(sra, mdi, "size", &csize) == 0) {
1747 if (csize >= 2ULL*1024*1024*1024)
1748 csize = 2ULL*1024*1024*1024;
1749 if ((min_csize == 0 || (min_csize
1750 > csize)))
1751 min_csize = csize;
1752 }
1753 }
1754 }
1755 if (rv) {
1756 pr_err("Cannot set size on "
1757 "array members.\n");
1758 goto size_change_error;
1759 }
1760 if (min_csize && s->size > min_csize) {
1761 pr_err("Cannot safely make this array "
1762 "use more than 2TB per device on this kernel.\n");
1763 rv = 1;
1764 goto size_change_error;
1765 }
1766 if (min_csize && s->size == MAX_SIZE) {
1767 /* Don't let the kernel choose a size - it will get
1768 * it wrong
1769 */
1770 pr_err("Limited v0.90 array to "
1771 "2TB per device\n");
1772 s->size = min_csize;
1773 }
1774 if (st->ss->external) {
1775 if (sra->array.level == 0) {
1776 rv = sysfs_set_str(sra, NULL, "level",
1777 "raid5");
1778 if (!rv) {
1779 raid0_takeover = 1;
1780 /* get array parametes after takeover
1781 * to chane one parameter at time only
1782 */
1783 rv = ioctl(fd, GET_ARRAY_INFO, &array);
1784 }
1785 }
1786 /* make sure mdmon is
1787 * aware of the new level */
1788 if (!mdmon_running(st->container_devnm))
1789 start_mdmon(st->container_devnm);
1790 ping_monitor(container);
1791 if (mdmon_running(st->container_devnm) &&
1792 st->update_tail == NULL)
1793 st->update_tail = &st->updates;
1794 }
1795
1796 if (s->size == MAX_SIZE)
1797 s->size = 0;
1798 array.size = s->size;
1799 if ((unsigned)array.size != s->size) {
1800 /* got truncated to 32bit, write to
1801 * component_size instead
1802 */
1803 if (sra)
1804 rv = sysfs_set_num(sra, NULL,
1805 "component_size", s->size);
1806 else
1807 rv = -1;
1808 } else {
1809 rv = ioctl(fd, SET_ARRAY_INFO, &array);
1810
1811 /* manage array size when it is managed externally
1812 */
1813 if ((rv == 0) && st->ss->external)
1814 rv = set_array_size(st, sra, sra->text_version);
1815 }
1816
1817 if (raid0_takeover) {
1818 /* do not recync non-existing parity,
1819 * we will drop it anyway
1820 */
1821 sysfs_set_str(sra, NULL, "sync_action", "frozen");
1822 /* go back to raid0, drop parity disk
1823 */
1824 sysfs_set_str(sra, NULL, "level", "raid0");
1825 ioctl(fd, GET_ARRAY_INFO, &array);
1826 }
1827
1828 size_change_error:
1829 if (rv != 0) {
1830 int err = errno;
1831
1832 /* restore metadata */
1833 if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0,
1834 UnSet, NULL, devname,
1835 ROLLBACK_METADATA_CHANGES,
1836 c->verbose) == 0)
1837 sync_metadata(st);
1838 pr_err("Cannot set device size for %s: %s\n",
1839 devname, strerror(err));
1840 if (err == EBUSY &&
1841 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
1842 cont_err("Bitmap must be removed before size can be changed\n");
1843 rv = 1;
1844 goto release;
1845 }
1846 if (s->assume_clean) {
1847 /* This will fail on kernels older than 3.0 unless
1848 * a backport has been arranged.
1849 */
1850 if (sra == NULL ||
1851 sysfs_set_str(sra, NULL, "resync_start", "none") < 0)
1852 pr_err("--assume-clean not supported with --grow on this kernel\n");
1853 }
1854 ioctl(fd, GET_ARRAY_INFO, &array);
1855 s->size = get_component_size(fd)/2;
1856 if (s->size == 0)
1857 s->size = array.size;
1858 if (c->verbose >= 0) {
1859 if (s->size == orig_size)
1860 pr_err("component size of %s "
1861 "unchanged at %lluK\n",
1862 devname, s->size);
1863 else
1864 pr_err("component size of %s "
1865 "has been set to %lluK\n",
1866 devname, s->size);
1867 }
1868 changed = 1;
1869 } else if (array.level != LEVEL_CONTAINER) {
1870 s->size = get_component_size(fd)/2;
1871 if (s->size == 0)
1872 s->size = array.size;
1873 }
1874
1875 /* See if there is anything else to do */
1876 if ((s->level == UnSet || s->level == array.level) &&
1877 (s->layout_str == NULL) &&
1878 (s->chunk == 0 || s->chunk == array.chunk_size) &&
1879 data_offset == INVALID_SECTORS &&
1880 (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) {
1881 /* Nothing more to do */
1882 if (!changed && c->verbose >= 0)
1883 pr_err("%s: no change requested\n",
1884 devname);
1885 goto release;
1886 }
1887
1888 /* ========= check for Raid10/Raid1 -> Raid0 conversion ===============
1889 * current implementation assumes that following conditions must be met:
1890 * - RAID10:
1891 * - far_copies == 1
1892 * - near_copies == 2
1893 */
1894 if ((s->level == 0 && array.level == 10 && sra &&
1895 array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) ||
1896 (s->level == 0 && array.level == 1 && sra)) {
1897 int err;
1898 err = remove_disks_for_takeover(st, sra, array.layout);
1899 if (err) {
1900 dprintf(Name": Array cannot be reshaped\n");
1901 if (cfd > -1)
1902 close(cfd);
1903 rv = 1;
1904 goto release;
1905 }
1906 /* Make sure mdmon has seen the device removal
1907 * and updated metadata before we continue with
1908 * level change
1909 */
1910 if (container)
1911 ping_monitor(container);
1912 }
1913
1914 memset(&info, 0, sizeof(info));
1915 info.array = array;
1916 sysfs_init(&info, fd, NULL);
1917 strcpy(info.text_version, sra->text_version);
1918 info.component_size = s->size*2;
1919 info.new_level = s->level;
1920 info.new_chunk = s->chunk * 1024;
1921 if (info.array.level == LEVEL_CONTAINER) {
1922 info.delta_disks = UnSet;
1923 info.array.raid_disks = s->raiddisks;
1924 } else if (s->raiddisks)
1925 info.delta_disks = s->raiddisks - info.array.raid_disks;
1926 else
1927 info.delta_disks = UnSet;
1928 if (s->layout_str == NULL) {
1929 info.new_layout = UnSet;
1930 if (info.array.level == 6 &&
1931 (info.new_level == 6 || info.new_level == UnSet) &&
1932 info.array.layout >= 16) {
1933 pr_err("%s has a non-standard layout. If you"
1934 " wish to preserve this\n", devname);
1935 cont_err("during the reshape, please specify"
1936 " --layout=preserve\n");
1937 cont_err("If you want to change it, specify a"
1938 " layout or use --layout=normalise\n");
1939 rv = 1;
1940 goto release;
1941 }
1942 } else if (strcmp(s->layout_str, "normalise") == 0 ||
1943 strcmp(s->layout_str, "normalize") == 0) {
1944 /* If we have a -6 RAID6 layout, remove the '-6'. */
1945 info.new_layout = UnSet;
1946 if (info.array.level == 6 && info.new_level == UnSet) {
1947 char l[40], *h;
1948 strcpy(l, map_num(r6layout, info.array.layout));
1949 h = strrchr(l, '-');
1950 if (h && strcmp(h, "-6") == 0) {
1951 *h = 0;
1952 info.new_layout = map_name(r6layout, l);
1953 }
1954 } else {
1955 pr_err("%s is only meaningful when reshaping"
1956 " a RAID6 array.\n", s->layout_str);
1957 rv = 1;
1958 goto release;
1959 }
1960 } else if (strcmp(s->layout_str, "preserve") == 0) {
1961 /* This means that a non-standard RAID6 layout
1962 * is OK.
1963 * In particular:
1964 * - When reshape a RAID6 (e.g. adding a device)
1965 * which is in a non-standard layout, it is OK
1966 * to preserve that layout.
1967 * - When converting a RAID5 to RAID6, leave it in
1968 * the XXX-6 layout, don't re-layout.
1969 */
1970 if (info.array.level == 6 && info.new_level == UnSet)
1971 info.new_layout = info.array.layout;
1972 else if (info.array.level == 5 && info.new_level == 6) {
1973 char l[40];
1974 strcpy(l, map_num(r5layout, info.array.layout));
1975 strcat(l, "-6");
1976 info.new_layout = map_name(r6layout, l);
1977 } else {
1978 pr_err("%s in only meaningful when reshaping"
1979 " to RAID6\n", s->layout_str);
1980 rv = 1;
1981 goto release;
1982 }
1983 } else {
1984 int l = info.new_level;
1985 if (l == UnSet)
1986 l = info.array.level;
1987 switch (l) {
1988 case 5:
1989 info.new_layout = map_name(r5layout, s->layout_str);
1990 break;
1991 case 6:
1992 info.new_layout = map_name(r6layout, s->layout_str);
1993 break;
1994 case 10:
1995 info.new_layout = parse_layout_10(s->layout_str);
1996 break;
1997 case LEVEL_FAULTY:
1998 info.new_layout = parse_layout_faulty(s->layout_str);
1999 break;
2000 default:
2001 pr_err("layout not meaningful"
2002 " with this level\n");
2003 rv = 1;
2004 goto release;
2005 }
2006 if (info.new_layout == UnSet) {
2007 pr_err("layout %s not understood"
2008 " for this level\n",
2009 s->layout_str);
2010 rv = 1;
2011 goto release;
2012 }
2013 }
2014
2015 if (array.level == LEVEL_FAULTY) {
2016 if (s->level != UnSet && s->level != array.level) {
2017 pr_err("cannot change level of Faulty device\n");
2018 rv =1 ;
2019 }
2020 if (s->chunk) {
2021 pr_err("cannot set chunksize of Faulty device\n");
2022 rv =1 ;
2023 }
2024 if (s->raiddisks && s->raiddisks != 1) {
2025 pr_err("cannot set raid_disks of Faulty device\n");
2026 rv =1 ;
2027 }
2028 if (s->layout_str) {
2029 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2030 dprintf("Cannot get array information.\n");
2031 goto release;
2032 }
2033 array.layout = info.new_layout;
2034 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2035 pr_err("failed to set new layout\n");
2036 rv = 1;
2037 } else if (c->verbose >= 0)
2038 printf("layout for %s set to %d\n",
2039 devname, array.layout);
2040 }
2041 } else if (array.level == LEVEL_CONTAINER) {
2042 /* This change is to be applied to every array in the
2043 * container. This is only needed when the metadata imposes
2044 * restraints of the various arrays in the container.
2045 * Currently we only know that IMSM requires all arrays
2046 * to have the same number of devices so changing the
2047 * number of devices (On-Line Capacity Expansion) must be
2048 * performed at the level of the container
2049 */
2050 rv = reshape_container(container, devname, -1, st, &info,
2051 c->force, c->backup_file, c->verbose, 0, 0);
2052 frozen = 0;
2053 } else {
2054 /* get spare devices from external metadata
2055 */
2056 if (st->ss->external) {
2057 struct mdinfo *info2;
2058
2059 info2 = st->ss->container_content(st, subarray);
2060 if (info2) {
2061 info.array.spare_disks =
2062 info2->array.spare_disks;
2063 sysfs_free(info2);
2064 }
2065 }
2066
2067 /* Impose these changes on a single array. First
2068 * check that the metadata is OK with the change. */
2069
2070 if (reshape_super(st, 0, info.new_level,
2071 info.new_layout, info.new_chunk,
2072 info.array.raid_disks, info.delta_disks,
2073 c->backup_file, devname, APPLY_METADATA_CHANGES,
2074 c->verbose)) {
2075 rv = 1;
2076 goto release;
2077 }
2078 sync_metadata(st);
2079 rv = reshape_array(container, fd, devname, st, &info, c->force,
2080 devlist, data_offset, c->backup_file, c->verbose,
2081 0, 0, 0);
2082 frozen = 0;
2083 }
2084 release:
2085 sysfs_free(sra);
2086 if (frozen > 0)
2087 unfreeze(st);
2088 return rv;
2089 }
2090
2091 /* verify_reshape_position()
2092 * Function checks if reshape position in metadata is not farther
2093 * than position in md.
2094 * Return value:
2095 * 0 : not valid sysfs entry
2096 * it can be caused by not started reshape, it should be started
2097 * by reshape array or raid0 array is before takeover
2098 * -1 : error, reshape position is obviously wrong
2099 * 1 : success, reshape progress correct or updated
2100 */
2101 static int verify_reshape_position(struct mdinfo *info, int level)
2102 {
2103 int ret_val = 0;
2104 char buf[40];
2105 int rv;
2106
2107 /* read sync_max, failure can mean raid0 array */
2108 rv = sysfs_get_str(info, NULL, "sync_max", buf, 40);
2109
2110 if (rv > 0) {
2111 char *ep;
2112 unsigned long long position = strtoull(buf, &ep, 0);
2113
2114 dprintf(Name": Read sync_max sysfs entry is: %s\n", buf);
2115 if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) {
2116 position *= get_data_disks(level,
2117 info->new_layout,
2118 info->array.raid_disks);
2119 if (info->reshape_progress < position) {
2120 dprintf("Corrected reshape progress (%llu) to "
2121 "md position (%llu)\n",
2122 info->reshape_progress, position);
2123 info->reshape_progress = position;
2124 ret_val = 1;
2125 } else if (info->reshape_progress > position) {
2126 pr_err("Fatal error: array "
2127 "reshape was not properly frozen "
2128 "(expected reshape position is %llu, "
2129 "but reshape progress is %llu.\n",
2130 position, info->reshape_progress);
2131 ret_val = -1;
2132 } else {
2133 dprintf("Reshape position in md and metadata "
2134 "are the same;");
2135 ret_val = 1;
2136 }
2137 }
2138 } else if (rv == 0) {
2139 /* for valid sysfs entry, 0-length content
2140 * should be indicated as error
2141 */
2142 ret_val = -1;
2143 }
2144
2145 return ret_val;
2146 }
2147
2148 static unsigned long long choose_offset(unsigned long long lo,
2149 unsigned long long hi,
2150 unsigned long long min,
2151 unsigned long long max)
2152 {
2153 /* Choose a new offset between hi and lo.
2154 * It must be between min and max, but
2155 * we would prefer something near the middle of hi/lo, and also
2156 * prefer to be aligned to a big power of 2.
2157 *
2158 * So we start with the middle, then for each bit,
2159 * starting at '1' and increasing, if it is set, we either
2160 * add it or subtract it if possible, preferring the option
2161 * which is furthest from the boundary.
2162 *
2163 * We stop once we get a 1MB alignment. As units are in sectors,
2164 * 1MB = 2*1024 sectors.
2165 */
2166 unsigned long long choice = (lo + hi) / 2;
2167 unsigned long long bit = 1;
2168
2169 for (bit = 1; bit < 2*1024; bit = bit << 1) {
2170 unsigned long long bigger, smaller;
2171 if (! (bit & choice))
2172 continue;
2173 bigger = choice + bit;
2174 smaller = choice - bit;
2175 if (bigger > max && smaller < min)
2176 break;
2177 if (bigger > max)
2178 choice = smaller;
2179 else if (smaller < min)
2180 choice = bigger;
2181 else if (hi - bigger > smaller - lo)
2182 choice = bigger;
2183 else
2184 choice = smaller;
2185 }
2186 return choice;
2187 }
2188
2189 static int set_new_data_offset(struct mdinfo *sra, struct supertype *st,
2190 char *devname, int delta_disks,
2191 unsigned long long data_offset,
2192 unsigned long long min,
2193 int can_fallback)
2194 {
2195 struct mdinfo *sd;
2196 int dir = 0;
2197 int err = 0;
2198 unsigned long long before, after;
2199
2200 /* Need to find min space before and after so same is used
2201 * on all devices
2202 */
2203 before = UINT64_MAX;
2204 after = UINT64_MAX;
2205 for (sd = sra->devs; sd; sd = sd->next) {
2206 char *dn;
2207 int dfd;
2208 int rv;
2209 struct supertype *st2;
2210 struct mdinfo info2;
2211
2212 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2213 continue;
2214 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2215 dfd = dev_open(dn, O_RDONLY);
2216 if (dfd < 0) {
2217 pr_err("%s: cannot open component %s\n",
2218 devname, dn ? dn : "-unknown-");
2219 goto release;
2220 }
2221 st2 = dup_super(st);
2222 rv = st2->ss->load_super(st2,dfd, NULL);
2223 close(dfd);
2224 if (rv) {
2225 free(st2);
2226 pr_err("%s: cannot get superblock from %s\n",
2227 devname, dn);
2228 goto release;
2229 }
2230 st2->ss->getinfo_super(st2, &info2, NULL);
2231 st2->ss->free_super(st2);
2232 free(st2);
2233 if (info2.space_before == 0 &&
2234 info2.space_after == 0) {
2235 /* Metadata doesn't support data_offset changes */
2236 return 1;
2237 }
2238 if (before > info2.space_before)
2239 before = info2.space_before;
2240 if (after > info2.space_after)
2241 after = info2.space_after;
2242
2243 if (data_offset != INVALID_SECTORS) {
2244 if (dir == 0) {
2245 if (info2.data_offset == data_offset) {
2246 pr_err("%s: already has that data_offset\n",
2247 dn);
2248 goto release;
2249 }
2250 if (data_offset < info2.data_offset)
2251 dir = -1;
2252 else
2253 dir = 1;
2254 } else if ((data_offset <= info2.data_offset && dir == 1) ||
2255 (data_offset >= info2.data_offset && dir == -1)) {
2256 pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n",
2257 dn);
2258 goto release;
2259 }
2260 }
2261 }
2262 if (before == UINT64_MAX)
2263 /* impossible really, there must be no devices */
2264 return 1;
2265
2266 for (sd = sra->devs; sd; sd = sd->next) {
2267 char *dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2268 unsigned long long new_data_offset;
2269
2270 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2271 continue;
2272 if (delta_disks < 0) {
2273 /* Don't need any space as array is shrinking
2274 * just move data_offset up by min
2275 */
2276 if (data_offset == INVALID_SECTORS)
2277 new_data_offset = sd->data_offset + min;
2278 else {
2279 if (data_offset < sd->data_offset + min) {
2280 pr_err("--data-offset too small for %s\n",
2281 dn);
2282 goto release;
2283 }
2284 new_data_offset = data_offset;
2285 }
2286 } else if (delta_disks > 0) {
2287 /* need space before */
2288 if (before < min) {
2289 if (can_fallback)
2290 goto fallback;
2291 pr_err("Insufficient head-space for reshape on %s\n",
2292 dn);
2293 goto release;
2294 }
2295 if (data_offset == INVALID_SECTORS)
2296 new_data_offset = sd->data_offset - min;
2297 else {
2298 if (data_offset > sd->data_offset - min) {
2299 pr_err("--data-offset too large for %s\n",
2300 dn);
2301 goto release;
2302 }
2303 new_data_offset = data_offset;
2304 }
2305 } else {
2306 if (dir == 0) {
2307 /* can move up or down. If 'data_offset'
2308 * was set we would have already decided,
2309 * so just choose direction with most space.
2310 */
2311 if (before > after)
2312 dir = -1;
2313 else
2314 dir = 1;
2315 }
2316 sysfs_set_str(sra, NULL, "reshape_direction",
2317 dir == 1 ? "backwards" : "forwards");
2318 if (dir > 0) {
2319 /* Increase data offset */
2320 if (after < min) {
2321 if (can_fallback)
2322 goto fallback;
2323 pr_err("Insufficient tail-space for reshape on %s\n",
2324 dn);
2325 goto release;
2326 }
2327 if (data_offset != INVALID_SECTORS &&
2328 data_offset < sd->data_offset + min) {
2329 pr_err("--data-offset too small on %s\n",
2330 dn);
2331 goto release;
2332 }
2333 if (data_offset != INVALID_SECTORS)
2334 new_data_offset = data_offset;
2335 else
2336 new_data_offset = choose_offset(sd->data_offset,
2337 sd->data_offset + after,
2338 sd->data_offset + min,
2339 sd->data_offset + after);
2340 } else {
2341 /* Decrease data offset */
2342 if (before < min) {
2343 if (can_fallback)
2344 goto fallback;
2345 pr_err("insufficient head-room on %s\n",
2346 dn);
2347 goto release;
2348 }
2349 if (data_offset != INVALID_SECTORS &&
2350 data_offset < sd->data_offset - min) {
2351 pr_err("--data-offset too small on %s\n",
2352 dn);
2353 goto release;
2354 }
2355 if (data_offset != INVALID_SECTORS)
2356 new_data_offset = data_offset;
2357 else
2358 new_data_offset = choose_offset(sd->data_offset - before,
2359 sd->data_offset,
2360 sd->data_offset - before,
2361 sd->data_offset - min);
2362 }
2363 }
2364 err = sysfs_set_num(sra, sd, "new_offset", new_data_offset);
2365 if (err < 0 && errno == E2BIG) {
2366 /* try again after increasing data size to max */
2367 err = sysfs_set_num(sra, sd, "size", 0);
2368 if (err < 0 && errno == EINVAL &&
2369 !(sd->disk.state & (1<<MD_DISK_SYNC))) {
2370 /* some kernels have a bug where you cannot
2371 * use '0' on spare devices. */
2372 sysfs_set_num(sra, sd, "size",
2373 (sra->component_size + after)/2);
2374 }
2375 err = sysfs_set_num(sra, sd, "new_offset",
2376 new_data_offset);
2377 }
2378 if (err < 0) {
2379 if (errno == E2BIG && data_offset != INVALID_SECTORS) {
2380 pr_err("data-offset is too big for %s\n",
2381 dn);
2382 goto release;
2383 }
2384 if (sd == sra->devs &&
2385 (errno == ENOENT || errno == E2BIG))
2386 /* Early kernel, no 'new_offset' file,
2387 * or kernel doesn't like us.
2388 * For RAID5/6 this is not fatal
2389 */
2390 return 1;
2391 pr_err("Cannot set new_offset for %s\n",
2392 dn);
2393 break;
2394 }
2395 }
2396 return err;
2397 release:
2398 return -1;
2399 fallback:
2400 /* Just use a backup file */
2401 return 1;
2402 }
2403
2404 static int raid10_reshape(char *container, int fd, char *devname,
2405 struct supertype *st, struct mdinfo *info,
2406 struct reshape *reshape,
2407 unsigned long long data_offset,
2408 int force, int verbose)
2409 {
2410 /* Changing raid_disks, layout, chunksize or possibly
2411 * just data_offset for a RAID10.
2412 * We must always change data_offset. We change by at least
2413 * ->min_offset_change which is the largest of the old and new
2414 * chunk sizes.
2415 * If raid_disks is increasing, then data_offset must decrease
2416 * by at least this copy size.
2417 * If raid_disks is unchanged, data_offset must increase or
2418 * decrease by at least min_offset_change but preferably by much more.
2419 * We choose half of the available space.
2420 * If raid_disks is decreasing, data_offset must increase by
2421 * at least min_offset_change. To allow of this, component_size
2422 * must be decreased by the same amount.
2423 *
2424 * So we calculate the required minimum and direction, possibly
2425 * reduce the component_size, then iterate through the devices
2426 * and set the new_data_offset.
2427 * If that all works, we set chunk_size, layout, raid_disks, and start
2428 * 'reshape'
2429 */
2430 struct mdinfo *sra;
2431 unsigned long long min;
2432 int err = 0;
2433
2434 sra = sysfs_read(fd, NULL,
2435 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK
2436 );
2437 if (!sra) {
2438 pr_err("%s: Cannot get array details from sysfs\n",
2439 devname);
2440 goto release;
2441 }
2442 min = reshape->min_offset_change;
2443
2444 if (info->delta_disks)
2445 sysfs_set_str(sra, NULL, "reshape_direction",
2446 info->delta_disks < 0 ? "backwards" : "forwards");
2447 if (info->delta_disks < 0 &&
2448 info->space_after < min) {
2449 int rv = sysfs_set_num(sra, NULL, "component_size",
2450 (sra->component_size -
2451 min)/2);
2452 if (rv) {
2453 pr_err("cannot reduce component size\n");
2454 goto release;
2455 }
2456 }
2457 err = set_new_data_offset(sra, st, devname, info->delta_disks, data_offset,
2458 min, 0);
2459 if (err == 1) {
2460 pr_err("Cannot set new_data_offset: RAID10 reshape not\n");
2461 cont_err("supported on this kernel\n");
2462 err = -1;
2463 }
2464 if (err < 0)
2465 goto release;
2466
2467 if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2468 err = errno;
2469 if (!err && sysfs_set_num(sra, NULL, "layout", reshape->after.layout) < 0)
2470 err = errno;
2471 if (!err && sysfs_set_num(sra, NULL, "raid_disks",
2472 info->array.raid_disks + info->delta_disks) < 0)
2473 err = errno;
2474 if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0)
2475 err = errno;
2476 if (err) {
2477 pr_err("Cannot set array shape for %s\n",
2478 devname);
2479 if (err == EBUSY &&
2480 (info->array.state & (1<<MD_SB_BITMAP_PRESENT)))
2481 cont_err(" Bitmap must be removed before"
2482 " shape can be changed\n");
2483 goto release;
2484 }
2485 sysfs_free(sra);
2486 return 0;
2487 release:
2488 sysfs_free(sra);
2489 return 1;
2490 }
2491
2492 static void get_space_after(int fd, struct supertype *st, struct mdinfo *info)
2493 {
2494 struct mdinfo *sra, *sd;
2495 /* Initialisation to silence compiler warning */
2496 unsigned long long min_space_before = 0, min_space_after = 0;
2497 int first = 1;
2498
2499 sra = sysfs_read(fd, NULL, GET_DEVS);
2500 if (!sra)
2501 return;
2502 for (sd = sra->devs; sd; sd = sd->next) {
2503 char *dn;
2504 int dfd;
2505 struct supertype *st2;
2506 struct mdinfo info2;
2507
2508 if (sd->disk.state & (1<<MD_DISK_FAULTY))
2509 continue;
2510 dn = map_dev(sd->disk.major, sd->disk.minor, 0);
2511 dfd = dev_open(dn, O_RDONLY);
2512 if (dfd < 0)
2513 break;
2514 st2 = dup_super(st);
2515 if (st2->ss->load_super(st2,dfd, NULL)) {
2516 close(dfd);
2517 free(st2);
2518 break;
2519 }
2520 close(dfd);
2521 st2->ss->getinfo_super(st2, &info2, NULL);
2522 st2->ss->free_super(st2);
2523 free(st2);
2524 if (first ||
2525 min_space_before > info2.space_before)
2526 min_space_before = info2.space_before;
2527 if (first ||
2528 min_space_after > info2.space_after)
2529 min_space_after = info2.space_after;
2530 first = 0;
2531 }
2532 if (sd == NULL && !first) {
2533 info->space_after = min_space_after;
2534 info->space_before = min_space_before;
2535 }
2536 sysfs_free(sra);
2537 }
2538
2539 static void update_cache_size(char *container, struct mdinfo *sra,
2540 struct mdinfo *info,
2541 int disks, unsigned long long blocks)
2542 {
2543 /* Check that the internal stripe cache is
2544 * large enough, or it won't work.
2545 * It must hold at least 4 stripes of the larger
2546 * chunk size
2547 */
2548 unsigned long cache;
2549 cache = max(info->array.chunk_size, info->new_chunk);
2550 cache *= 4; /* 4 stripes minimum */
2551 cache /= 512; /* convert to sectors */
2552 /* make sure there is room for 'blocks' with a bit to spare */
2553 if (cache < 16 + blocks / disks)
2554 cache = 16 + blocks / disks;
2555 cache /= (4096/512); /* Covert from sectors to pages */
2556
2557 if (sra->cache_size < cache)
2558 subarray_set_num(container, sra, "stripe_cache_size",
2559 cache+1);
2560 }
2561
2562 static int impose_reshape(struct mdinfo *sra,
2563 struct mdinfo *info,
2564 struct supertype *st,
2565 int fd,
2566 int restart,
2567 char *devname, char *container,
2568 struct reshape *reshape)
2569 {
2570 struct mdu_array_info_s array;
2571
2572 sra->new_chunk = info->new_chunk;
2573
2574 if (restart) {
2575 /* for external metadata checkpoint saved by mdmon can be lost
2576 * or missed /due to e.g. crash/. Check if md is not during
2577 * restart farther than metadata points to.
2578 * If so, this means metadata information is obsolete.
2579 */
2580 if (st->ss->external)
2581 verify_reshape_position(info, reshape->level);
2582 sra->reshape_progress = info->reshape_progress;
2583 } else {
2584 sra->reshape_progress = 0;
2585 if (reshape->after.data_disks < reshape->before.data_disks)
2586 /* start from the end of the new array */
2587 sra->reshape_progress = (sra->component_size
2588 * reshape->after.data_disks);
2589 }
2590
2591 ioctl(fd, GET_ARRAY_INFO, &array);
2592 if (info->array.chunk_size == info->new_chunk &&
2593 reshape->before.layout == reshape->after.layout &&
2594 st->ss->external == 0) {
2595 /* use SET_ARRAY_INFO but only if reshape hasn't started */
2596 array.raid_disks = reshape->after.data_disks + reshape->parity;
2597 if (!restart &&
2598 ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2599 int err = errno;
2600
2601 pr_err("Cannot set device shape for %s: %s\n",
2602 devname, strerror(errno));
2603
2604 if (err == EBUSY &&
2605 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2606 cont_err("Bitmap must be removed before"
2607 " shape can be changed\n");
2608
2609 goto release;
2610 }
2611 } else if (!restart) {
2612 /* set them all just in case some old 'new_*' value
2613 * persists from some earlier problem.
2614 */
2615 int err = 0;
2616 if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0)
2617 err = errno;
2618 if (!err && sysfs_set_num(sra, NULL, "layout",
2619 reshape->after.layout) < 0)
2620 err = errno;
2621 if (!err && subarray_set_num(container, sra, "raid_disks",
2622 reshape->after.data_disks +
2623 reshape->parity) < 0)
2624 err = errno;
2625 if (err) {
2626 pr_err("Cannot set device shape for %s\n",
2627 devname);
2628
2629 if (err == EBUSY &&
2630 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2631 cont_err("Bitmap must be removed before"
2632 " shape can be changed\n");
2633 goto release;
2634 }
2635 }
2636 return 0;
2637 release:
2638 return -1;
2639 }
2640
2641 static int impose_level(int fd, int level, char *devname, int verbose)
2642 {
2643 char *c;
2644 struct mdu_array_info_s array;
2645 struct mdinfo info;
2646 sysfs_init(&info, fd, NULL);
2647
2648 ioctl(fd, GET_ARRAY_INFO, &array);
2649 if (level == 0 &&
2650 (array.level >= 4 && array.level <= 6)) {
2651 /* To convert to RAID0 we need to fail and
2652 * remove any non-data devices. */
2653 int found = 0;
2654 int d;
2655 int data_disks = array.raid_disks - 1;
2656 if (array.level == 6)
2657 data_disks -= 1;
2658 if (array.level == 5 &&
2659 array.layout != ALGORITHM_PARITY_N)
2660 return -1;
2661 if (array.level == 6 &&
2662 array.layout != ALGORITHM_PARITY_N_6)
2663 return -1;
2664 sysfs_set_str(&info, NULL,"sync_action", "idle");
2665 /* First remove any spares so no recovery starts */
2666 for (d = 0, found = 0;
2667 d < MAX_DISKS && found < array.nr_disks;
2668 d++) {
2669 mdu_disk_info_t disk;
2670 disk.number = d;
2671 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2672 continue;
2673 if (disk.major == 0 && disk.minor == 0)
2674 continue;
2675 found++;
2676 if ((disk.state & (1 << MD_DISK_ACTIVE))
2677 && disk.raid_disk < data_disks)
2678 /* keep this */
2679 continue;
2680 ioctl(fd, HOT_REMOVE_DISK,
2681 makedev(disk.major, disk.minor));
2682 }
2683 /* Now fail anything left */
2684 ioctl(fd, GET_ARRAY_INFO, &array);
2685 for (d = 0, found = 0;
2686 d < MAX_DISKS && found < array.nr_disks;
2687 d++) {
2688 int cnt;
2689 mdu_disk_info_t disk;
2690 disk.number = d;
2691 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
2692 continue;
2693 if (disk.major == 0 && disk.minor == 0)
2694 continue;
2695 found++;
2696 if ((disk.state & (1 << MD_DISK_ACTIVE))
2697 && disk.raid_disk < data_disks)
2698 /* keep this */
2699 continue;
2700 ioctl(fd, SET_DISK_FAULTY,
2701 makedev(disk.major, disk.minor));
2702 cnt = 5;
2703 while (ioctl(fd, HOT_REMOVE_DISK,
2704 makedev(disk.major, disk.minor)) < 0
2705 && errno == EBUSY
2706 && cnt--) {
2707 usleep(10000);
2708 }
2709 }
2710 }
2711 c = map_num(pers, level);
2712 if (c) {
2713 int err = sysfs_set_str(&info, NULL, "level", c);
2714 if (err) {
2715 err = errno;
2716 pr_err("%s: could not set level to %s\n",
2717 devname, c);
2718 if (err == EBUSY &&
2719 (array.state & (1<<MD_SB_BITMAP_PRESENT)))
2720 cont_err("Bitmap must be removed"
2721 " before level can be changed\n");
2722 return err;
2723 }
2724 if (verbose >= 0)
2725 pr_err("level of %s changed to %s\n",
2726 devname, c);
2727 }
2728 return 0;
2729 }
2730
2731 int sigterm = 0;
2732 static void catch_term(int sig)
2733 {
2734 sigterm = 1;
2735 }
2736
2737 static int reshape_array(char *container, int fd, char *devname,
2738 struct supertype *st, struct mdinfo *info,
2739 int force, struct mddev_dev *devlist,
2740 unsigned long long data_offset,
2741 char *backup_file, int verbose, int forked,
2742 int restart, int freeze_reshape)
2743 {
2744 struct reshape reshape;
2745 int spares_needed;
2746 char *msg;
2747 int orig_level = UnSet;
2748 int odisks;
2749 int delayed;
2750
2751 struct mdu_array_info_s array;
2752 char *c;
2753
2754 struct mddev_dev *dv;
2755 int added_disks;
2756
2757 int *fdlist = NULL;
2758 unsigned long long *offsets = NULL;
2759 int d;
2760 int nrdisks;
2761 int err;
2762 unsigned long blocks;
2763 unsigned long long array_size;
2764 int done;
2765 struct mdinfo *sra = NULL;
2766
2767 /* when reshaping a RAID0, the component_size might be zero.
2768 * So try to fix that up.
2769 */
2770 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2771 dprintf("Cannot get array information.\n");
2772 goto release;
2773 }
2774 if (array.level == 0 && info->component_size == 0) {
2775 get_dev_size(fd, NULL, &array_size);
2776 info->component_size = array_size / array.raid_disks;
2777 }
2778
2779 if (array.level == 10)
2780 /* Need space_after info */
2781 get_space_after(fd, st, info);
2782
2783 if (info->reshape_active) {
2784 int new_level = info->new_level;
2785 info->new_level = UnSet;
2786 if (info->delta_disks > 0)
2787 info->array.raid_disks -= info->delta_disks;
2788 msg = analyse_change(devname, info, &reshape);
2789 info->new_level = new_level;
2790 if (info->delta_disks > 0)
2791 info->array.raid_disks += info->delta_disks;
2792 if (!restart)
2793 /* Make sure the array isn't read-only */
2794 ioctl(fd, RESTART_ARRAY_RW, 0);
2795 } else
2796 msg = analyse_change(devname, info, &reshape);
2797 if (msg) {
2798 /* if msg == "", error has already been printed */
2799 if (msg[0])
2800 pr_err("%s\n", msg);
2801 goto release;
2802 }
2803 if (restart &&
2804 (reshape.level != info->array.level ||
2805 reshape.before.layout != info->array.layout ||
2806 reshape.before.data_disks + reshape.parity
2807 != info->array.raid_disks - max(0, info->delta_disks))) {
2808 pr_err("reshape info is not in native format -"
2809 " cannot continue.\n");
2810 goto release;
2811 }
2812
2813 if (st->ss->external && restart && (info->reshape_progress == 0)) {
2814 /* When reshape is restarted from '0', very begin of array
2815 * it is possible that for external metadata reshape and array
2816 * configuration doesn't happen.
2817 * Check if md has the same opinion, and reshape is restarted
2818 * from 0. If so, this is regular reshape start after reshape
2819 * switch in metadata to next array only.
2820 */
2821 if ((verify_reshape_position(info, reshape.level) >= 0) &&
2822 (info->reshape_progress == 0))
2823 restart = 0;
2824 }
2825 if (restart) {
2826 /* reshape already started. just skip to monitoring the reshape */
2827 if (reshape.backup_blocks == 0)
2828 return 0;
2829 if (restart & RESHAPE_NO_BACKUP)
2830 return 0;
2831
2832 /* Need 'sra' down at 'started:' */
2833 sra = sysfs_read(fd, NULL,
2834 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
2835 GET_CACHE);
2836 if (!sra) {
2837 pr_err("%s: Cannot get array details from sysfs\n",
2838 devname);
2839 goto release;
2840 }
2841 goto started;
2842 }
2843 /* The container is frozen but the array may not be.
2844 * So freeze the array so spares don't get put to the wrong use
2845 * FIXME there should probably be a cleaner separation between
2846 * freeze_array and freeze_container.
2847 */
2848 sysfs_freeze_array(info);
2849 /* Check we have enough spares to not be degraded */
2850 added_disks = 0;
2851 for (dv = devlist; dv ; dv=dv->next)
2852 added_disks++;
2853 spares_needed = max(reshape.before.data_disks,
2854 reshape.after.data_disks)
2855 + reshape.parity - array.raid_disks;
2856
2857 if (!force &&
2858 info->new_level > 1 && info->array.level > 1 &&
2859 spares_needed > info->array.spare_disks + added_disks) {
2860 pr_err("Need %d spare%s to avoid degraded array,"
2861 " and only have %d.\n"
2862 " Use --force to over-ride this check.\n",
2863 spares_needed,
2864 spares_needed == 1 ? "" : "s",
2865 info->array.spare_disks + added_disks);
2866 goto release;
2867 }
2868 /* Check we have enough spares to not fail */
2869 spares_needed = max(reshape.before.data_disks,
2870 reshape.after.data_disks)
2871 - array.raid_disks;
2872 if ((info->new_level > 1 || info->new_level == 0) &&
2873 spares_needed > info->array.spare_disks +added_disks) {
2874 pr_err("Need %d spare%s to create working array,"
2875 " and only have %d.\n",
2876 spares_needed,
2877 spares_needed == 1 ? "" : "s",
2878 info->array.spare_disks + added_disks);
2879 goto release;
2880 }
2881
2882 if (reshape.level != array.level) {
2883 int err = impose_level(fd, reshape.level, devname, verbose);
2884 if (err)
2885 goto release;
2886 info->new_layout = UnSet; /* after level change,
2887 * layout is meaningless */
2888 orig_level = array.level;
2889 sysfs_freeze_array(info);
2890
2891 if (reshape.level > 0 && st->ss->external) {
2892 /* make sure mdmon is aware of the new level */
2893 if (mdmon_running(container))
2894 flush_mdmon(container);
2895
2896 if (!mdmon_running(container))
2897 start_mdmon(container);
2898 ping_monitor(container);
2899 if (mdmon_running(container) &&
2900 st->update_tail == NULL)
2901 st->update_tail = &st->updates;
2902 }
2903 }
2904 /* ->reshape_super might have chosen some spares from the
2905 * container that it wants to be part of the new array.
2906 * We can collect them with ->container_content and give
2907 * them to the kernel.
2908 */
2909 if (st->ss->reshape_super && st->ss->container_content) {
2910 char *subarray = strchr(info->text_version+1, '/')+1;
2911 struct mdinfo *info2 =
2912 st->ss->container_content(st, subarray);
2913 struct mdinfo *d;
2914
2915 if (info2) {
2916 sysfs_init(info2, fd, st->devnm);
2917 /* When increasing number of devices, we need to set
2918 * new raid_disks before adding these, or they might
2919 * be rejected.
2920 */
2921 if (reshape.backup_blocks &&
2922 reshape.after.data_disks > reshape.before.data_disks)
2923 subarray_set_num(container, info2, "raid_disks",
2924 reshape.after.data_disks +
2925 reshape.parity);
2926 for (d = info2->devs; d; d = d->next) {
2927 if (d->disk.state == 0 &&
2928 d->disk.raid_disk >= 0) {
2929 /* This is a spare that wants to
2930 * be part of the array.
2931 */
2932 add_disk(fd, st, info2, d);
2933 }
2934 }
2935 sysfs_free(info2);
2936 }
2937 }
2938 /* We might have been given some devices to add to the
2939 * array. Now that the array has been changed to the right
2940 * level and frozen, we can safely add them.
2941 */
2942 if (devlist)
2943 Manage_subdevs(devname, fd, devlist, verbose,
2944 0,NULL, 0);
2945
2946 if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS)
2947 reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512;
2948 if (reshape.backup_blocks == 0) {
2949 /* No restriping needed, but we might need to impose
2950 * some more changes: layout, raid_disks, chunk_size
2951 */
2952 /* read current array info */
2953 if (ioctl(fd, GET_ARRAY_INFO, &array) != 0) {
2954 dprintf("Cannot get array information.\n");
2955 goto release;
2956 }
2957 /* compare current array info with new values and if
2958 * it is different update them to new */
2959 if (info->new_layout != UnSet &&
2960 info->new_layout != array.layout) {
2961 array.layout = info->new_layout;
2962 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2963 pr_err("failed to set new layout\n");
2964 goto release;
2965 } else if (verbose >= 0)
2966 printf("layout for %s set to %d\n",
2967 devname, array.layout);
2968 }
2969 if (info->delta_disks != UnSet &&
2970 info->delta_disks != 0 &&
2971 array.raid_disks != (info->array.raid_disks + info->delta_disks)) {
2972 array.raid_disks += info->delta_disks;
2973 if (ioctl(fd, SET_ARRAY_INFO, &array) != 0) {
2974 pr_err("failed to set raid disks\n");
2975 goto release;
2976 } else if (verbose >= 0) {
2977 printf("raid_disks for %s set to %d\n",
2978 devname, array.raid_disks);
2979 }
2980 }
2981 if (info->new_chunk != 0 &&
2982 info->new_chunk != array.chunk_size) {
2983 if (sysfs_set_num(info, NULL,
2984 "chunk_size", info->new_chunk) != 0) {
2985 pr_err("failed to set chunk size\n");
2986 goto release;
2987 } else if (verbose >= 0)
2988 printf("chunk size for %s set to %d\n",
2989 devname, array.chunk_size);
2990 }
2991 unfreeze(st);
2992 return 0;
2993 }
2994
2995 /*
2996 * There are three possibilities.
2997 * 1/ The array will shrink.
2998 * We need to ensure the reshape will pause before reaching
2999 * the 'critical section'. We also need to fork and wait for
3000 * that to happen. When it does we
3001 * suspend/backup/complete/unfreeze
3002 *
3003 * 2/ The array will not change size.
3004 * This requires that we keep a backup of a sliding window
3005 * so that we can restore data after a crash. So we need
3006 * to fork and monitor progress.
3007 * In future we will allow the data_offset to change, so
3008 * a sliding backup becomes unnecessary.
3009 *
3010 * 3/ The array will grow. This is relatively easy.
3011 * However the kernel's restripe routines will cheerfully
3012 * overwrite some early data before it is safe. So we
3013 * need to make a backup of the early parts of the array
3014 * and be ready to restore it if rebuild aborts very early.
3015 * For externally managed metadata, we still need a forked
3016 * child to monitor the reshape and suspend IO over the region
3017 * that is being reshaped.
3018 *
3019 * We backup data by writing it to one spare, or to a
3020 * file which was given on command line.
3021 *
3022 * In each case, we first make sure that storage is available
3023 * for the required backup.
3024 * Then we:
3025 * - request the shape change.
3026 * - fork to handle backup etc.
3027 */
3028 /* Check that we can hold all the data */
3029 get_dev_size(fd, NULL, &array_size);
3030 if (reshape.new_size < (array_size/512)) {
3031 pr_err("this change will reduce the size of the array.\n"
3032 " use --grow --array-size first to truncate array.\n"
3033 " e.g. mdadm --grow %s --array-size %llu\n",
3034 devname, reshape.new_size/2);
3035 goto release;
3036 }
3037
3038 if (array.level == 10) {
3039 /* Reshaping RAID10 does not require any data backup by
3040 * user-space. Instead it requires that the data_offset
3041 * is changed to avoid the need for backup.
3042 * So this is handled very separately
3043 */
3044 if (restart)
3045 /* Nothing to do. */
3046 return 0;
3047 return raid10_reshape(container, fd, devname, st, info,
3048 &reshape, data_offset,
3049 force, verbose);
3050 }
3051 sra = sysfs_read(fd, NULL,
3052 GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK|
3053 GET_CACHE);
3054 if (!sra) {
3055 pr_err("%s: Cannot get array details from sysfs\n",
3056 devname);
3057 goto release;
3058 }
3059
3060 if (!backup_file)
3061 switch(set_new_data_offset(sra, st, devname,
3062 reshape.after.data_disks - reshape.before.data_disks,
3063 data_offset,
3064 reshape.min_offset_change, 1)) {
3065 case -1:
3066 goto release;
3067 case 0:
3068 /* Updated data_offset, so it's easy now */
3069 update_cache_size(container, sra, info,
3070 min(reshape.before.data_disks,
3071 reshape.after.data_disks),
3072 reshape.backup_blocks);
3073
3074 /* Right, everything seems fine. Let's kick things off.
3075 */
3076 sync_metadata(st);
3077
3078 if (impose_reshape(sra, info, st, fd, restart,
3079 devname, container, &reshape) < 0)
3080 goto release;
3081 if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) {
3082 pr_err("Failed to initiate reshape!\n");
3083 goto release;
3084 }
3085 if (info->new_level == reshape.level)
3086 return 0;
3087 /* need to adjust level when reshape completes */
3088 switch(fork()) {
3089 case -1: /* ignore error, but don't wait */
3090 return 0;
3091 default: /* parent */
3092 return 0;
3093 case 0:
3094 map_fork();
3095 break;
3096 }
3097 close(fd);
3098 wait_reshape(sra);
3099 fd = open_dev(sra->sys_name);
3100 if (fd >= 0)
3101 impose_level(fd, info->new_level, devname, verbose);
3102 return 0;
3103 case 1: /* Couldn't set data_offset, try the old way */
3104 if (data_offset != INVALID_SECTORS) {
3105 pr_err("Cannot update data_offset on this array\n");
3106 goto release;
3107 }
3108 break;
3109 }
3110
3111 started:
3112 /* Decide how many blocks (sectors) for a reshape
3113 * unit. The number we have so far is just a minimum
3114 */
3115 blocks = reshape.backup_blocks;
3116 if (reshape.before.data_disks ==
3117 reshape.after.data_disks) {
3118 /* Make 'blocks' bigger for better throughput, but
3119 * not so big that we reject it below.
3120 * Try for 16 megabytes
3121 */
3122 while (blocks * 32 < sra->component_size &&
3123 blocks < 16*1024*2)
3124 blocks *= 2;
3125 } else
3126 pr_err("Need to backup %luK of critical "
3127 "section..\n", blocks/2);
3128
3129 if (blocks >= sra->component_size/2) {
3130 pr_err("%s: Something wrong"
3131 " - reshape aborted\n",
3132 devname);
3133 goto release;
3134 }
3135
3136 /* Now we need to open all these devices so we can read/write.
3137 */
3138 nrdisks = max(reshape.before.data_disks,
3139 reshape.after.data_disks) + reshape.parity
3140 + sra->array.spare_disks;
3141 fdlist = xcalloc((1+nrdisks), sizeof(int));
3142 offsets = xcalloc((1+nrdisks), sizeof(offsets[0]));
3143
3144 odisks = reshape.before.data_disks + reshape.parity;
3145 d = reshape_prepare_fdlist(devname, sra, odisks,
3146 nrdisks, blocks, backup_file,
3147 fdlist, offsets);
3148 if (d < 0) {
3149 goto release;
3150 }
3151 if ((st->ss->manage_reshape == NULL) ||
3152 (st->ss->recover_backup == NULL)) {
3153 if (backup_file == NULL) {
3154 if (reshape.after.data_disks <=
3155 reshape.before.data_disks) {
3156 pr_err("%s: Cannot grow - need backup-file\n",
3157 devname);
3158 pr_err(" Please provide one with \"--backup=...\"\n");
3159 goto release;
3160 } else if (sra->array.spare_disks == 0) {
3161 pr_err("%s: Cannot grow - "
3162 "need a spare or backup-file to backup "
3163 "critical section\n", devname);
3164 goto release;
3165 }
3166 } else {
3167 if (!reshape_open_backup_file(backup_file, fd, devname,
3168 (signed)blocks,
3169 fdlist+d, offsets+d,
3170 restart)) {
3171 goto release;
3172 }
3173 d++;
3174 }
3175 }
3176
3177 update_cache_size(container, sra, info,
3178 min(reshape.before.data_disks, reshape.after.data_disks),
3179 blocks);
3180
3181 /* Right, everything seems fine. Let's kick things off.
3182 * If only changing raid_disks, use ioctl, else use
3183 * sysfs.
3184 */
3185 sync_metadata(st);
3186
3187 if (impose_reshape(sra, info, st, fd, restart,
3188 devname, container, &reshape) < 0)
3189 goto release;
3190
3191 err = start_reshape(sra, restart, reshape.before.data_disks,
3192 reshape.after.data_disks);
3193 if (err) {
3194 pr_err("Cannot %s reshape for %s\n",
3195 restart ? "continue" : "start",
3196 devname);
3197 goto release;
3198 }
3199 if (restart)
3200 sysfs_set_str(sra, NULL, "array_state", "active");
3201 if (freeze_reshape) {
3202 free(fdlist);
3203 free(offsets);
3204 sysfs_free(sra);
3205 pr_err("Reshape has to be continued from"
3206 " location %llu when root filesystem has been mounted.\n",
3207 sra->reshape_progress);
3208 return 1;
3209 }
3210
3211 /* Now we just need to kick off the reshape and watch, while
3212 * handling backups of the data...
3213 * This is all done by a forked background process.
3214 */
3215 switch(forked ? 0 : fork()) {
3216 case -1:
3217 pr_err("Cannot run child to monitor reshape: %s\n",
3218 strerror(errno));
3219 abort_reshape(sra);
3220 goto release;
3221 default:
3222 free(fdlist);
3223 free(offsets);
3224 sysfs_free(sra);
3225 return 0;
3226 case 0:
3227 map_fork();
3228 break;
3229 }
3230
3231 /* If another array on the same devices is busy, the
3232 * reshape will wait for them. This would mean that
3233 * the first section that we suspend will stay suspended
3234 * for a long time. So check on that possibility
3235 * by looking for "DELAYED" in /proc/mdstat, and if found,
3236 * wait a while
3237 */
3238 do {
3239 struct mdstat_ent *mds, *m;
3240 delayed = 0;
3241 mds = mdstat_read(1, 0);
3242 for (m = mds; m; m = m->next)
3243 if (strcmp(m->devnm, sra->sys_name) == 0) {
3244 if (m->resync &&
3245 m->percent == RESYNC_DELAYED)
3246 delayed = 1;
3247 if (m->resync == 0)
3248 /* Haven't started the reshape thread
3249 * yet, wait a bit
3250 */
3251 delayed = 2;
3252 break;
3253 }
3254 free_mdstat(mds);
3255 if (delayed == 1 && get_linux_version() < 3007000) {
3256 pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n"
3257 " You might experience problems until other reshapes complete.\n");
3258 delayed = 0;
3259 }
3260 if (delayed)
3261 mdstat_wait(30 - (delayed-1) * 25);
3262 } while (delayed);
3263 mdstat_close();
3264 close(fd);
3265 if (check_env("MDADM_GROW_VERIFY"))
3266 fd = open(devname, O_RDONLY | O_DIRECT);
3267 else
3268 fd = -1;
3269 mlockall(MCL_FUTURE);
3270
3271 signal(SIGTERM, catch_term);
3272
3273 if (st->ss->external) {
3274 /* metadata handler takes it from here */
3275 done = st->ss->manage_reshape(
3276 fd, sra, &reshape, st, blocks,
3277 fdlist, offsets,
3278 d - odisks, fdlist+odisks,
3279 offsets+odisks);
3280 } else
3281 done = child_monitor(
3282 fd, sra, &reshape, st, blocks,
3283 fdlist, offsets,
3284 d - odisks, fdlist+odisks,
3285 offsets+odisks);
3286
3287 free(fdlist);
3288 free(offsets);
3289
3290 if (backup_file && done)
3291 unlink(backup_file);
3292 if (!done) {
3293 abort_reshape(sra);
3294 goto out;
3295 }
3296
3297 if (!st->ss->external &&
3298 !(reshape.before.data_disks != reshape.after.data_disks
3299 && info->custom_array_size) &&
3300 info->new_level == reshape.level &&
3301 !forked) {
3302 /* no need to wait for the reshape to finish as
3303 * there is nothing more to do.
3304 */
3305 sysfs_free(sra);
3306 exit(0);
3307 }
3308 wait_reshape(sra);
3309
3310 if (st->ss->external) {
3311 /* Re-load the metadata as much could have changed */
3312 int cfd = open_dev(st->container_devnm);
3313 if (cfd >= 0) {
3314 flush_mdmon(container);
3315 st->ss->free_super(st);
3316 st->ss->load_container(st, cfd, container);
3317 close(cfd);
3318 }
3319 }
3320
3321 /* set new array size if required customer_array_size is used
3322 * by this metadata.
3323 */
3324 if (reshape.before.data_disks !=
3325 reshape.after.data_disks &&
3326 info->custom_array_size)
3327 set_array_size(st, info, info->text_version);
3328
3329 if (info->new_level != reshape.level) {
3330 if (fd < 0)
3331 fd = open(devname, O_RDONLY);
3332 impose_level(fd, info->new_level, devname, verbose);
3333 close(fd);
3334 if (info->new_level == 0)
3335 st->update_tail = NULL;
3336 }
3337 out:
3338 sysfs_free(sra);
3339 if (forked)
3340 return 0;
3341 unfreeze(st);
3342 exit(0);
3343
3344 release:
3345 free(fdlist);
3346 free(offsets);
3347 if (orig_level != UnSet && sra) {
3348 c = map_num(pers, orig_level);
3349 if (c && sysfs_set_str(sra, NULL, "level", c) == 0)
3350 pr_err("aborting level change\n");
3351 }
3352 sysfs_free(sra);
3353 if (!forked)
3354 unfreeze(st);
3355 return 1;
3356 }
3357
3358 /* mdfd handle is passed to be closed in child process (after fork).
3359 */
3360 int reshape_container(char *container, char *devname,
3361 int mdfd,
3362 struct supertype *st,
3363 struct mdinfo *info,
3364 int force,
3365 char *backup_file,
3366 int verbose, int restart, int freeze_reshape)
3367 {
3368 struct mdinfo *cc = NULL;
3369 int rv = restart;
3370 char last_devnm[32] = "";
3371
3372 /* component_size is not meaningful for a container,
3373 * so pass '0' meaning 'no change'
3374 */
3375 if (!restart &&
3376 reshape_super(st, 0, info->new_level,
3377 info->new_layout, info->new_chunk,
3378 info->array.raid_disks, info->delta_disks,
3379 backup_file, devname, APPLY_METADATA_CHANGES,
3380 verbose)) {
3381 unfreeze(st);
3382 return 1;
3383 }
3384
3385 sync_metadata(st);
3386
3387 /* ping monitor to be sure that update is on disk
3388 */
3389 ping_monitor(container);
3390
3391 switch (fork()) {
3392 case -1: /* error */
3393 perror("Cannot fork to complete reshape\n");
3394 unfreeze(st);
3395 return 1;
3396 default: /* parent */
3397 if (!freeze_reshape)
3398 printf(Name ": multi-array reshape continues"
3399 " in background\n");
3400 return 0;
3401 case 0: /* child */
3402 map_fork();
3403 break;
3404 }
3405
3406 /* close unused handle in child process
3407 */
3408 if (mdfd > -1)
3409 close(mdfd);
3410
3411 while(1) {
3412 /* For each member array with reshape_active,
3413 * we need to perform the reshape.
3414 * We pick the first array that needs reshaping and
3415 * reshape it. reshape_array() will re-read the metadata
3416 * so the next time through a different array should be
3417 * ready for reshape.
3418 * It is possible that the 'different' array will not
3419 * be assembled yet. In that case we simple exit.
3420 * When it is assembled, the mdadm which assembles it
3421 * will take over the reshape.
3422 */
3423 struct mdinfo *content;
3424 int fd;
3425 struct mdstat_ent *mdstat;
3426 char *adev;
3427 int devid;
3428
3429 sysfs_free(cc);
3430
3431 cc = st->ss->container_content(st, NULL);
3432
3433 for (content = cc; content ; content = content->next) {
3434 char *subarray;
3435 if (!content->reshape_active)
3436 continue;
3437
3438 subarray = strchr(content->text_version+1, '/')+1;
3439 mdstat = mdstat_by_subdev(subarray, container);
3440 if (!mdstat)
3441 continue;
3442 if (mdstat->active == 0) {
3443 pr_err("Skipping inactive array %s.\n",
3444 mdstat->devnm);
3445 free_mdstat(mdstat);
3446 mdstat = NULL;
3447 continue;
3448 }
3449 break;
3450 }
3451 if (!content)
3452 break;
3453
3454 devid = devnm2devid(mdstat->devnm);
3455 adev = map_dev(major(devid), minor(devid), 0);
3456 if (!adev)
3457 adev = content->text_version;
3458
3459 fd = open_dev(mdstat->devnm);
3460 if (fd < 0) {
3461 printf(Name ": Device %s cannot be opened for reshape.",
3462 adev);
3463 break;
3464 }
3465
3466 if (strcmp(last_devnm, mdstat->devnm) == 0) {
3467 /* Do not allow for multiple reshape_array() calls for
3468 * the same array.
3469 * It can happen when reshape_array() returns without
3470 * error, when reshape is not finished (wrong reshape
3471 * starting/continuation conditions). Mdmon doesn't
3472 * switch to next array in container and reentry
3473 * conditions for the same array occur.
3474 * This is possibly interim until the behaviour of
3475 * reshape_array is resolved().
3476 */
3477 printf(Name ": Multiple reshape execution detected for "
3478 "device %s.", adev);
3479 close(fd);
3480 break;
3481 }
3482 strcpy(last_devnm, mdstat->devnm);
3483
3484 sysfs_init(content, fd, mdstat->devnm);
3485
3486 if (mdmon_running(container))
3487 flush_mdmon(container);
3488
3489 rv = reshape_array(container, fd, adev, st,
3490 content, force, NULL, INVALID_SECTORS,
3491 backup_file, verbose, 1, restart,
3492 freeze_reshape);
3493 close(fd);
3494
3495 if (freeze_reshape) {
3496 sysfs_free(cc);
3497 exit(0);
3498 }
3499
3500 restart = 0;
3501 if (rv)
3502 break;
3503
3504 if (mdmon_running(container))
3505 flush_mdmon(container);
3506 }
3507 if (!rv)
3508 unfreeze(st);
3509 sysfs_free(cc);
3510 exit(0);
3511 }
3512
3513 /*
3514 * We run a child process in the background which performs the following
3515 * steps:
3516 * - wait for resync to reach a certain point
3517 * - suspend io to the following section
3518 * - backup that section
3519 * - allow resync to proceed further
3520 * - resume io
3521 * - discard the backup.
3522 *
3523 * When are combined in slightly different ways in the three cases.
3524 * Grow:
3525 * - suspend/backup/allow/wait/resume/discard
3526 * Shrink:
3527 * - allow/wait/suspend/backup/allow/wait/resume/discard
3528 * same-size:
3529 * - wait/resume/discard/suspend/backup/allow
3530 *
3531 * suspend/backup/allow always come together
3532 * wait/resume/discard do too.
3533 * For the same-size case we have two backups to improve flow.
3534 *
3535 */
3536
3537 int progress_reshape(struct mdinfo *info, struct reshape *reshape,
3538 unsigned long long backup_point,
3539 unsigned long long wait_point,
3540 unsigned long long *suspend_point,
3541 unsigned long long *reshape_completed, int *frozen)
3542 {
3543 /* This function is called repeatedly by the reshape manager.
3544 * It determines how much progress can safely be made and allows
3545 * that progress.
3546 * - 'info' identifies the array and particularly records in
3547 * ->reshape_progress the metadata's knowledge of progress
3548 * This is a sector offset from the start of the array
3549 * of the next array block to be relocated. This number
3550 * may increase from 0 or decrease from array_size, depending
3551 * on the type of reshape that is happening.
3552 * Note that in contrast, 'sync_completed' is a block count of the
3553 * reshape so far. It gives the distance between the start point
3554 * (head or tail of device) and the next place that data will be
3555 * written. It always increases.
3556 * - 'reshape' is the structure created by analyse_change
3557 * - 'backup_point' shows how much the metadata manager has backed-up
3558 * data. For reshapes with increasing progress, it is the next address
3559 * to be backed up, previous addresses have been backed-up. For
3560 * decreasing progress, it is the earliest address that has been
3561 * backed up - later address are also backed up.
3562 * So addresses between reshape_progress and backup_point are
3563 * backed up providing those are in the 'correct' order.
3564 * - 'wait_point' is an array address. When reshape_completed
3565 * passes this point, progress_reshape should return. It might
3566 * return earlier if it determines that ->reshape_progress needs
3567 * to be updated or further backup is needed.
3568 * - suspend_point is maintained by progress_reshape and the caller
3569 * should not touch it except to initialise to zero.
3570 * It is an array address and it only increases in 2.6.37 and earlier.
3571 * This makes it difficult to handle reducing reshapes with
3572 * external metadata.
3573 * However: it is similar to backup_point in that it records the
3574 * other end of a suspended region from reshape_progress.
3575 * it is moved to extend the region that is safe to backup and/or
3576 * reshape
3577 * - reshape_completed is read from sysfs and returned. The caller
3578 * should copy this into ->reshape_progress when it has reason to
3579 * believe that the metadata knows this, and any backup outside this
3580 * has been erased.
3581 *
3582 * Return value is:
3583 * 1 if more data from backup_point - but only as far as suspend_point,
3584 * should be backed up
3585 * 0 if things are progressing smoothly
3586 * -1 if the reshape is finished because it is all done,
3587 * -2 if the reshape is finished due to an error.
3588 */
3589
3590 int advancing = (reshape->after.data_disks
3591 >= reshape->before.data_disks);
3592 unsigned long long need_backup; /* All data between start of array and
3593 * here will at some point need to
3594 * be backed up.
3595 */
3596 unsigned long long read_offset, write_offset;
3597 unsigned long long write_range;
3598 unsigned long long max_progress, target, completed;
3599 unsigned long long array_size = (info->component_size
3600 * reshape->before.data_disks);
3601 int fd;
3602 char buf[20];
3603
3604 /* First, we unsuspend any region that is now known to be safe.
3605 * If suspend_point is on the 'wrong' side of reshape_progress, then
3606 * we don't have or need suspension at the moment. This is true for
3607 * native metadata when we don't need to back-up.
3608 */
3609 if (advancing) {
3610 if (info->reshape_progress <= *suspend_point)
3611 sysfs_set_num(info, NULL, "suspend_lo",
3612 info->reshape_progress);
3613 } else {
3614 /* Note: this won't work in 2.6.37 and before.
3615 * Something somewhere should make sure we don't need it!
3616 */
3617 if (info->reshape_progress >= *suspend_point)
3618 sysfs_set_num(info, NULL, "suspend_hi",
3619 info->reshape_progress);
3620 }
3621
3622 /* Now work out how far it is safe to progress.
3623 * If the read_offset for ->reshape_progress is less than
3624 * 'blocks' beyond the write_offset, we can only progress as far
3625 * as a backup.
3626 * Otherwise we can progress until the write_offset for the new location
3627 * reaches (within 'blocks' of) the read_offset at the current location.
3628 * However that region must be suspended unless we are using native
3629 * metadata.
3630 * If we need to suspend more, we limit it to 128M per device, which is
3631 * rather arbitrary and should be some time-based calculation.
3632 */
3633 read_offset = info->reshape_progress / reshape->before.data_disks;
3634 write_offset = info->reshape_progress / reshape->after.data_disks;
3635 write_range = info->new_chunk/512;
3636 if (reshape->before.data_disks == reshape->after.data_disks)
3637 need_backup = array_size;
3638 else
3639 need_backup = reshape->backup_blocks;
3640 if (advancing) {
3641 if (read_offset < write_offset + write_range)
3642 max_progress = backup_point;
3643 else
3644 max_progress =
3645 read_offset *
3646 reshape->after.data_disks;
3647 } else {
3648 if (read_offset > write_offset - write_range)
3649 /* Can only progress as far as has been backed up,
3650 * which must be suspended */
3651 max_progress = backup_point;
3652 else if (info->reshape_progress <= need_backup)
3653 max_progress = backup_point;
3654 else {
3655 if (info->array.major_version >= 0)
3656 /* Can progress until backup is needed */
3657 max_progress = need_backup;
3658 else {
3659 /* Can progress until metadata update is required */
3660 max_progress =
3661 read_offset *
3662 reshape->after.data_disks;
3663 /* but data must be suspended */
3664 if (max_progress < *suspend_point)
3665 max_progress = *suspend_point;
3666 }
3667 }
3668 }
3669
3670 /* We know it is safe to progress to 'max_progress' providing
3671 * it is suspended or we are using native metadata.
3672 * Consider extending suspend_point 128M per device if it
3673 * is less than 64M per device beyond reshape_progress.
3674 * But always do a multiple of 'blocks'
3675 * FIXME this is too big - it takes to long to complete
3676 * this much.
3677 */
3678 target = 64*1024*2 * min(reshape->before.data_disks,
3679 reshape->after.data_disks);
3680 target /= reshape->backup_blocks;
3681 if (target < 2)
3682 target = 2;
3683 target *= reshape->backup_blocks;
3684
3685 /* For externally managed metadata we always need to suspend IO to
3686 * the area being reshaped so we regularly push suspend_point forward.
3687 * For native metadata we only need the suspend if we are going to do
3688 * a backup.
3689 */
3690 if (advancing) {
3691 if ((need_backup > info->reshape_progress
3692 || info->array.major_version < 0) &&
3693 *suspend_point < info->reshape_progress + target) {
3694 if (need_backup < *suspend_point + 2 * target)
3695 *suspend_point = need_backup;
3696 else if (*suspend_point + 2 * target < array_size)
3697 *suspend_point += 2 * target;
3698 else
3699 *suspend_point = array_size;
3700 sysfs_set_num(info, NULL, "suspend_hi", *suspend_point);
3701 if (max_progress > *suspend_point)
3702 max_progress = *suspend_point;
3703 }
3704 } else {
3705 if (info->array.major_version >= 0) {
3706 /* Only need to suspend when about to backup */
3707 if (info->reshape_progress < need_backup * 2 &&
3708 *suspend_point > 0) {
3709 *suspend_point = 0;
3710 sysfs_set_num(info, NULL, "suspend_lo", 0);
3711 sysfs_set_num(info, NULL, "suspend_hi", need_backup);
3712 }
3713 } else {
3714 /* Need to suspend continually */
3715 if (info->reshape_progress < *suspend_point)
3716 *suspend_point = info->reshape_progress;
3717 if (*suspend_point + target < info->reshape_progress)
3718 /* No need to move suspend region yet */;
3719 else {
3720 if (*suspend_point >= 2 * target)
3721 *suspend_point -= 2 * target;
3722 else
3723 *suspend_point = 0;
3724 sysfs_set_num(info, NULL, "suspend_lo",
3725 *suspend_point);
3726 }
3727 if (max_progress < *suspend_point)
3728 max_progress = *suspend_point;
3729 }
3730 }
3731
3732 /* now set sync_max to allow that progress. sync_max, like
3733 * sync_completed is a count of sectors written per device, so
3734 * we find the difference between max_progress and the start point,
3735 * and divide that by after.data_disks to get a sync_max
3736 * number.
3737 * At the same time we convert wait_point to a similar number
3738 * for comparing against sync_completed.
3739 */
3740 /* scale down max_progress to per_disk */
3741 max_progress /= reshape->after.data_disks;
3742 /* Round to chunk size as some kernels give an erroneously high number */
3743 max_progress /= info->new_chunk/512;
3744 max_progress *= info->new_chunk/512;
3745 /* And round to old chunk size as the kernel wants that */
3746 max_progress /= info->array.chunk_size/512;
3747 max_progress *= info->array.chunk_size/512;
3748 /* Limit progress to the whole device */
3749 if (max_progress > info->component_size)
3750 max_progress = info->component_size;
3751 wait_point /= reshape->after.data_disks;
3752 if (!advancing) {
3753 /* switch from 'device offset' to 'processed block count' */
3754 max_progress = info->component_size - max_progress;
3755 wait_point = info->component_size - wait_point;
3756 }
3757
3758 if (!*frozen)
3759 sysfs_set_num(info, NULL, "sync_max", max_progress);
3760
3761 /* Now wait. If we have already reached the point that we were
3762 * asked to wait to, don't wait at all, else wait for any change.
3763 * We need to select on 'sync_completed' as that is the place that
3764 * notifications happen, but we are really interested in
3765 * 'reshape_position'
3766 */
3767 fd = sysfs_get_fd(info, NULL, "sync_completed");
3768 if (fd < 0)
3769 goto check_progress;
3770
3771 if (sysfs_fd_get_ll(fd, &completed) < 0)
3772 goto check_progress;
3773
3774 while (completed < max_progress && completed < wait_point) {
3775 /* Check that sync_action is still 'reshape' to avoid
3776 * waiting forever on a dead array
3777 */
3778 char action[20];
3779 if (sysfs_get_str(info, NULL, "sync_action",
3780 action, 20) <= 0 ||
3781 strncmp(action, "reshape", 7) != 0)
3782 break;
3783 /* Some kernels reset 'sync_completed' to zero
3784 * before setting 'sync_action' to 'idle'.
3785 * So we need these extra tests.
3786 */
3787 if (completed == 0 && advancing
3788 && info->reshape_progress > 0)
3789 break;
3790 if (completed == 0 && !advancing
3791 && info->reshape_progress < (info->component_size
3792 * reshape->after.data_disks))
3793 break;
3794 sysfs_wait(fd, NULL);
3795 if (sysfs_fd_get_ll(fd, &completed) < 0)
3796 goto check_progress;
3797 }
3798 /* Some kernels reset 'sync_completed' to zero,
3799 * we need to have real point we are in md
3800 */
3801 if (completed == 0)
3802 completed = max_progress;
3803
3804 /* some kernels can give an incorrectly high 'completed' number */
3805 completed /= (info->new_chunk/512);
3806 completed *= (info->new_chunk/512);
3807 /* Convert 'completed' back in to a 'progress' number */
3808 completed *= reshape->after.data_disks;
3809 if (!advancing) {
3810 completed = info->component_size * reshape->after.data_disks
3811 - completed;
3812 }
3813 *reshape_completed = completed;
3814
3815 close(fd);
3816
3817 /* We return the need_backup flag. Caller will decide
3818 * how much - a multiple of ->backup_blocks up to *suspend_point
3819 */
3820 if (advancing)
3821 return need_backup > info->reshape_progress;
3822 else
3823 return need_backup >= info->reshape_progress;
3824
3825 check_progress:
3826 /* if we couldn't read a number from sync_completed, then
3827 * either the reshape did complete, or it aborted.
3828 * We can tell which by checking for 'none' in reshape_position.
3829 * If it did abort, then it might immediately restart if it
3830 * it was just a device failure that leaves us degraded but
3831 * functioning.
3832 */
3833 strcpy(buf, "hi");
3834 if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0
3835 || strncmp(buf, "none", 4) != 0) {
3836 /* The abort might only be temporary. Wait up to 10
3837 * seconds for fd to contain a valid number again.
3838 */
3839 int wait = 10000;
3840 int rv = -2;
3841 unsigned long long new_sync_max;
3842 while (fd >= 0 && rv < 0 && wait > 0) {
3843 if (sysfs_wait(fd, &wait) != 1)
3844 break;
3845 switch (sysfs_fd_get_ll(fd, &completed)) {
3846 case 0:
3847 /* all good again */
3848 rv = 1;
3849 /* If "sync_max" is no longer max_progress
3850 * we need to freeze things
3851 */
3852 sysfs_get_ll(info, NULL, "sync_max", &new_sync_max);
3853 *frozen = (new_sync_max != max_progress);
3854 break;
3855 case -2: /* read error - abort */
3856 wait = 0;
3857 break;
3858 }
3859 }
3860 if (fd >= 0)
3861 close(fd);
3862 return rv; /* abort */
3863 } else {
3864 /* Maybe racing with array shutdown - check state */
3865 if (fd >= 0)
3866 close(fd);
3867 if (sysfs_get_str(info, NULL, "array_state", buf, sizeof(buf)) < 0
3868 || strncmp(buf, "inactive", 8) == 0
3869 || strncmp(buf, "clear",5) == 0)
3870 return -2; /* abort */
3871 return -1; /* complete */
3872 }
3873 }
3874
3875 /* FIXME return status is never checked */
3876 static int grow_backup(struct mdinfo *sra,
3877 unsigned long long offset, /* per device */
3878 unsigned long stripes, /* per device, in old chunks */
3879 int *sources, unsigned long long *offsets,
3880 int disks, int chunk, int level, int layout,
3881 int dests, int *destfd, unsigned long long *destoffsets,
3882 int part, int *degraded,
3883 char *buf)
3884 {
3885 /* Backup 'blocks' sectors at 'offset' on each device of the array,
3886 * to storage 'destfd' (offset 'destoffsets'), after first
3887 * suspending IO. Then allow resync to continue
3888 * over the suspended section.
3889 * Use part 'part' of the backup-super-block.
3890 */
3891 int odata = disks;
3892 int rv = 0;
3893 int i;
3894 unsigned long long ll;
3895 int new_degraded;
3896 //printf("offset %llu\n", offset);
3897 if (level >= 4)
3898 odata--;
3899 if (level == 6)
3900 odata--;
3901
3902 /* Check that array hasn't become degraded, else we might backup the wrong data */
3903 if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0)
3904 return -1; /* FIXME this error is ignored */
3905 new_degraded = (int)ll;
3906 if (new_degraded != *degraded) {
3907 /* check each device to ensure it is still working */
3908 struct mdinfo *sd;
3909 for (sd = sra->devs ; sd ; sd = sd->next) {
3910 if (sd->disk.state & (1<<MD_DISK_FAULTY))
3911 continue;
3912 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
3913 char sbuf[20];
3914 if (sysfs_get_str(sra, sd, "state", sbuf, 20) < 0 ||
3915 strstr(sbuf, "faulty") ||
3916 strstr(sbuf, "in_sync") == NULL) {
3917 /* this device is dead */
3918 sd->disk.state = (1<<MD_DISK_FAULTY);
3919 if (sd->disk.raid_disk >= 0 &&
3920 sources[sd->disk.raid_disk] >= 0) {
3921 close(sources[sd->disk.raid_disk]);
3922 sources[sd->disk.raid_disk] = -1;
3923 }
3924 }
3925 }
3926 }
3927 *degraded = new_degraded;
3928 }
3929 if (part) {
3930 bsb.arraystart2 = __cpu_to_le64(offset * odata);
3931 bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata);
3932 } else {
3933 bsb.arraystart = __cpu_to_le64(offset * odata);
3934 bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata);
3935 }
3936 if (part)
3937 bsb.magic[15] = '2';
3938 for (i = 0; i < dests; i++)
3939 if (part)
3940 lseek64(destfd[i], destoffsets[i] + __le64_to_cpu(bsb.devstart2)*512, 0);
3941 else
3942 lseek64(destfd[i], destoffsets[i], 0);
3943
3944 rv = save_stripes(sources, offsets,
3945 disks, chunk, level, layout,
3946 dests, destfd,
3947 offset*512*odata, stripes * chunk * odata,
3948 buf);
3949
3950 if (rv)
3951 return rv;
3952 bsb.mtime = __cpu_to_le64(time(0));
3953 for (i = 0; i < dests; i++) {
3954 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
3955
3956 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
3957 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
3958 bsb.sb_csum2 = bsb_csum((char*)&bsb,
3959 ((char*)&bsb.sb_csum2)-((char*)&bsb));
3960
3961 rv = -1;
3962 if ((unsigned long long)lseek64(destfd[i], destoffsets[i] - 4096, 0)
3963 != destoffsets[i] - 4096)
3964 break;
3965 if (write(destfd[i], &bsb, 512) != 512)
3966 break;
3967 if (destoffsets[i] > 4096) {
3968 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) !=
3969 destoffsets[i]+stripes*chunk*odata)
3970 break;
3971 if (write(destfd[i], &bsb, 512) != 512)
3972 break;
3973 }
3974 fsync(destfd[i]);
3975 rv = 0;
3976 }
3977
3978 return rv;
3979 }
3980
3981 /* in 2.6.30, the value reported by sync_completed can be
3982 * less that it should be by one stripe.
3983 * This only happens when reshape hits sync_max and pauses.
3984 * So allow wait_backup to either extent sync_max further
3985 * than strictly necessary, or return before the
3986 * sync has got quite as far as we would really like.
3987 * This is what 'blocks2' is for.
3988 * The various caller give appropriate values so that
3989 * every works.
3990 */
3991 /* FIXME return value is often ignored */
3992 static int forget_backup(int dests, int *destfd,
3993 unsigned long long *destoffsets,
3994 int part)
3995 {
3996 /*
3997 * Erase backup 'part' (which is 0 or 1)
3998 */
3999 int i;
4000 int rv;
4001
4002 if (part) {
4003 bsb.arraystart2 = __cpu_to_le64(0);
4004 bsb.length2 = __cpu_to_le64(0);
4005 } else {
4006 bsb.arraystart = __cpu_to_le64(0);
4007 bsb.length = __cpu_to_le64(0);
4008 }
4009 bsb.mtime = __cpu_to_le64(time(0));
4010 rv = 0;
4011 for (i = 0; i < dests; i++) {
4012 bsb.devstart = __cpu_to_le64(destoffsets[i]/512);
4013 bsb.sb_csum = bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb));
4014 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0)
4015 bsb.sb_csum2 = bsb_csum((char*)&bsb,
4016 ((char*)&bsb.sb_csum2)-((char*)&bsb));
4017 if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) !=
4018 destoffsets[i]-4096)
4019 rv = -1;
4020 if (rv == 0 &&
4021 write(destfd[i], &bsb, 512) != 512)
4022 rv = -1;
4023 fsync(destfd[i]);
4024 }
4025 return rv;
4026 }
4027
4028 static void fail(char *msg)
4029 {
4030 int rv;
4031 rv = (write(2, msg, strlen(msg)) != (int)strlen(msg));
4032 rv |= (write(2, "\n", 1) != 1);
4033 exit(rv ? 1 : 2);
4034 }
4035
4036 static char *abuf, *bbuf;
4037 static unsigned long long abuflen;
4038 static void validate(int afd, int bfd, unsigned long long offset)
4039 {
4040 /* check that the data in the backup against the array.
4041 * This is only used for regression testing and should not
4042 * be used while the array is active
4043 */
4044 if (afd < 0)
4045 return;
4046 lseek64(bfd, offset - 4096, 0);
4047 if (read(bfd, &bsb2, 512) != 512)
4048 fail("cannot read bsb");
4049 if (bsb2.sb_csum != bsb_csum((char*)&bsb2,
4050 ((char*)&bsb2.sb_csum)-((char*)&bsb2)))
4051 fail("first csum bad");
4052 if (memcmp(bsb2.magic, "md_backup_data", 14) != 0)
4053 fail("magic is bad");
4054 if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 &&
4055 bsb2.sb_csum2 != bsb_csum((char*)&bsb2,
4056 ((char*)&bsb2.sb_csum2)-((char*)&bsb2)))
4057 fail("second csum bad");
4058
4059 if (__le64_to_cpu(bsb2.devstart)*512 != offset)
4060 fail("devstart is wrong");
4061
4062 if (bsb2.length) {
4063 unsigned long long len = __le64_to_cpu(bsb2.length)*512;
4064
4065 if (abuflen < len) {
4066 free(abuf);
4067 free(bbuf);
4068 abuflen = len;
4069 if (posix_memalign((void**)&abuf, 4096, abuflen) ||
4070 posix_memalign((void**)&bbuf, 4096, abuflen)) {
4071 abuflen = 0;
4072 /* just stop validating on mem-alloc failure */
4073 return;
4074 }
4075 }
4076
4077 lseek64(bfd, offset, 0);
4078 if ((unsigned long long)read(bfd, bbuf, len) != len) {
4079 //printf("len %llu\n", len);
4080 fail("read first backup failed");
4081 }
4082 lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0);
4083 if ((unsigned long long)read(afd, abuf, len) != len)
4084 fail("read first from array failed");
4085 if (memcmp(bbuf, abuf, len) != 0) {
4086 #if 0
4087 int i;
4088 printf("offset=%llu len=%llu\n",
4089 (unsigned long long)__le64_to_cpu(bsb2.arraystart)*512, len);
4090 for (i=0; i<len; i++)
4091 if (bbuf[i] != abuf[i]) {
4092 printf("first diff byte %d\n", i);
4093 break;
4094 }
4095 #endif
4096 fail("data1 compare failed");
4097 }
4098 }
4099 if (bsb2.length2) {
4100 unsigned long long len = __le64_to_cpu(bsb2.length2)*512;
4101
4102 if (abuflen < len) {
4103 free(abuf);
4104 free(bbuf);
4105 abuflen = len;
4106 abuf = xmalloc(abuflen);
4107 bbuf = xmalloc(abuflen);
4108 }
4109
4110 lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0);
4111 if ((unsigned long long)read(bfd, bbuf, len) != len)
4112 fail("read second backup failed");
4113 lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0);
4114 if ((unsigned long long)read(afd, abuf, len) != len)
4115 fail("read second from array failed");
4116 if (memcmp(bbuf, abuf, len) != 0)
4117 fail("data2 compare failed");
4118 }
4119 }
4120
4121 int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape,
4122 struct supertype *st, unsigned long blocks,
4123 int *fds, unsigned long long *offsets,
4124 int dests, int *destfd, unsigned long long *destoffsets)
4125 {
4126 /* Monitor a reshape where backup is being performed using
4127 * 'native' mechanism - either to a backup file, or
4128 * to some space in a spare.
4129 */
4130 char *buf;
4131 int degraded = -1;
4132 unsigned long long speed;
4133 unsigned long long suspend_point, array_size;
4134 unsigned long long backup_point, wait_point;
4135 unsigned long long reshape_completed;
4136 int done = 0;
4137 int increasing = reshape->after.data_disks >= reshape->before.data_disks;
4138 int part = 0; /* The next part of the backup area to fill. It may already
4139 * be full, so we need to check */
4140 int level = reshape->level;
4141 int layout = reshape->before.layout;
4142 int data = reshape->before.data_disks;
4143 int disks = reshape->before.data_disks + reshape->parity;
4144 int chunk = sra->array.chunk_size;
4145 struct mdinfo *sd;
4146 unsigned long stripes;
4147 int uuid[4];
4148 int frozen = 0;
4149
4150 /* set up the backup-super-block. This requires the
4151 * uuid from the array.
4152 */
4153 /* Find a superblock */
4154 for (sd = sra->devs; sd; sd = sd->next) {
4155 char *dn;
4156 int devfd;
4157 int ok;
4158 if (sd->disk.state & (1<<MD_DISK_FAULTY))
4159 continue;
4160 dn = map_dev(sd->disk.major, sd->disk.minor, 1);
4161 devfd = dev_open(dn, O_RDONLY);
4162 if (devfd < 0)
4163 continue;
4164 ok = st->ss->load_super(st, devfd, NULL);
4165 close(devfd);
4166 if (ok == 0)
4167 break;
4168 }
4169 if (!sd) {
4170 pr_err("Cannot find a superblock\n");
4171 return 0;
4172 }
4173
4174 memset(&bsb, 0, 512);
4175 memcpy(bsb.magic, "md_backup_data-1", 16);
4176 st->ss->uuid_from_super(st, uuid);
4177 memcpy(bsb.set_uuid, uuid, 16);
4178 bsb.mtime = __cpu_to_le64(time(0));
4179 bsb.devstart2 = blocks;
4180
4181 stripes = blocks / (sra->array.chunk_size/512) /
4182 reshape->before.data_disks;
4183
4184 if (posix_memalign((void**)&buf, 4096, disks * chunk))
4185 /* Don't start the 'reshape' */
4186 return 0;
4187 if (reshape->before.data_disks == reshape->after.data_disks) {
4188 sysfs_get_ll(sra, NULL, "sync_speed_min", &speed);
4189 sysfs_set_num(sra, NULL, "sync_speed_min", 200000);
4190 }
4191
4192 if (increasing) {
4193 array_size = sra->component_size * reshape->after.data_disks;
4194 backup_point = sra->reshape_progress;
4195 suspend_point = 0;
4196 } else {
4197 array_size = sra->component_size * reshape->before.data_disks;
4198 backup_point = reshape->backup_blocks;
4199 suspend_point = array_size;
4200 }
4201
4202 while (!done) {
4203 int rv;
4204
4205 /* Want to return as soon the oldest backup slot can
4206 * be released as that allows us to start backing up
4207 * some more, providing suspend_point has been
4208 * advanced, which it should have.
4209 */
4210 if (increasing) {
4211 wait_point = array_size;
4212 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4213 wait_point = (__le64_to_cpu(bsb.arraystart) +
4214 __le64_to_cpu(bsb.length));
4215 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4216 wait_point = (__le64_to_cpu(bsb.arraystart2) +
4217 __le64_to_cpu(bsb.length2));
4218 } else {
4219 wait_point = 0;
4220 if (part == 0 && __le64_to_cpu(bsb.length) > 0)
4221 wait_point = __le64_to_cpu(bsb.arraystart);
4222 if (part == 1 && __le64_to_cpu(bsb.length2) > 0)
4223 wait_point = __le64_to_cpu(bsb.arraystart2);
4224 }
4225
4226 reshape_completed = sra->reshape_progress;
4227 rv = progress_reshape(sra, reshape,
4228 backup_point, wait_point,
4229 &suspend_point, &reshape_completed,
4230 &frozen);
4231 /* external metadata would need to ping_monitor here */
4232 sra->reshape_progress = reshape_completed;
4233
4234 /* Clear any backup region that is before 'here' */
4235 if (increasing) {
4236 if (__le64_to_cpu(bsb.length) > 0 &&
4237 reshape_completed >= (__le64_to_cpu(bsb.arraystart) +
4238 __le64_to_cpu(bsb.length)))
4239 forget_backup(dests, destfd,
4240 destoffsets, 0);
4241 if (__le64_to_cpu(bsb.length2) > 0 &&
4242 reshape_completed >= (__le64_to_cpu(bsb.arraystart2) +
4243 __le64_to_cpu(bsb.length2)))
4244 forget_backup(dests, destfd,
4245 destoffsets, 1);
4246 } else {
4247 if (__le64_to_cpu(bsb.length) > 0 &&
4248 reshape_completed <= (__le64_to_cpu(bsb.arraystart)))
4249 forget_backup(dests, destfd,
4250 destoffsets, 0);
4251 if (__le64_to_cpu(bsb.length2) > 0 &&
4252 reshape_completed <= (__le64_to_cpu(bsb.arraystart2)))
4253 forget_backup(dests, destfd,
4254 destoffsets, 1);
4255 }
4256 if (sigterm)
4257 rv = -2;
4258 if (rv < 0) {
4259 if (rv == -1)
4260 done = 1;
4261 break;
4262 }
4263 if (rv == 0 && increasing && !st->ss->external) {
4264 /* No longer need to monitor this reshape */
4265 sysfs_set_str(sra, NULL, "sync_max", "max");
4266 done = 1;
4267 break;
4268 }
4269
4270 while (rv) {
4271 unsigned long long offset;
4272 unsigned long actual_stripes;
4273 /* Need to backup some data.
4274 * If 'part' is not used and the desired
4275 * backup size is suspended, do a backup,
4276 * then consider the next part.
4277 */
4278 /* Check that 'part' is unused */
4279 if (part == 0 && __le64_to_cpu(bsb.length) != 0)
4280 break;
4281 if (part == 1 && __le64_to_cpu(bsb.length2) != 0)
4282 break;
4283
4284 offset = backup_point / data;
4285 actual_stripes = stripes;
4286 if (increasing) {
4287 if (offset + actual_stripes * (chunk/512) >
4288 sra->component_size)
4289 actual_stripes = ((sra->component_size - offset)
4290 / (chunk/512));
4291 if (offset + actual_stripes * (chunk/512) >
4292 suspend_point/data)
4293 break;
4294 } else {
4295 if (offset < actual_stripes * (chunk/512))
4296 actual_stripes = offset / (chunk/512);
4297 offset -= actual_stripes * (chunk/512);
4298 if (offset < suspend_point/data)
4299 break;
4300 }
4301 if (actual_stripes == 0)
4302 break;
4303 grow_backup(sra, offset, actual_stripes,
4304 fds, offsets,
4305 disks, chunk, level, layout,
4306 dests, destfd, destoffsets,
4307 part, &degraded, buf);
4308 validate(afd, destfd[0], destoffsets[0]);
4309 /* record where 'part' is up to */
4310 part = !part;
4311 if (increasing)
4312 backup_point += actual_stripes * (chunk/512) * data;
4313 else
4314 backup_point -= actual_stripes * (chunk/512) * data;
4315 }
4316 }
4317
4318 /* FIXME maybe call progress_reshape one more time instead */
4319 abort_reshape(sra); /* remove any remaining suspension */
4320 if (reshape->before.data_disks == reshape->after.data_disks)
4321 sysfs_set_num(sra, NULL, "sync_speed_min", speed);
4322 free(buf);
4323 return done;
4324 }
4325
4326 /*
4327 * If any spare contains md_back_data-1 which is recent wrt mtime,
4328 * write that data into the array and update the super blocks with
4329 * the new reshape_progress
4330 */
4331 int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, int cnt,
4332 char *backup_file, int verbose)
4333 {
4334 int i, j;
4335 int old_disks;
4336 unsigned long long *offsets;
4337 unsigned long long nstripe, ostripe;
4338 int ndata, odata;
4339
4340 odata = info->array.raid_disks - info->delta_disks - 1;
4341 if (info->array.level == 6) odata--; /* number of data disks */
4342 ndata = info->array.raid_disks - 1;
4343 if (info->new_level == 6) ndata--;
4344
4345 old_disks = info->array.raid_disks - info->delta_disks;
4346
4347 if (info->delta_disks <= 0)
4348 /* Didn't grow, so the backup file must have
4349 * been used
4350 */
4351 old_disks = cnt;
4352 for (i=old_disks-(backup_file?1:0); i<cnt; i++) {
4353 struct mdinfo dinfo;
4354 int fd;
4355 int bsbsize;
4356 char *devname, namebuf[20];
4357 unsigned long long lo, hi;
4358
4359 /* This was a spare and may have some saved data on it.
4360 * Load the superblock, find and load the
4361 * backup_super_block.
4362 * If either fail, go on to next device.
4363 * If the backup contains no new info, just return
4364 * else restore data and update all superblocks
4365 */
4366 if (i == old_disks-1) {
4367 fd = open(backup_file, O_RDONLY);
4368 if (fd<0) {
4369 pr_err("backup file %s inaccessible: %s\n",
4370 backup_file, strerror(errno));
4371 continue;
4372 }
4373 devname = backup_file;
4374 } else {
4375 fd = fdlist[i];
4376 if (fd < 0)
4377 continue;
4378 if (st->ss->load_super(st, fd, NULL))
4379 continue;
4380
4381 st->ss->getinfo_super(st, &dinfo, NULL);
4382 st->ss->free_super(st);
4383
4384 if (lseek64(fd,
4385 (dinfo.data_offset + dinfo.component_size - 8) <<9,
4386 0) < 0) {
4387 pr_err("Cannot seek on device %d\n", i);
4388 continue; /* Cannot seek */
4389 }
4390 sprintf(namebuf, "device-%d", i);
4391 devname = namebuf;
4392 }
4393 if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) {
4394 if (verbose)
4395 pr_err("Cannot read from %s\n", devname);
4396 continue; /* Cannot read */
4397 }
4398 if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 &&
4399 memcmp(bsb.magic, "md_backup_data-2", 16) != 0) {
4400 if (verbose)
4401 pr_err("No backup metadata on %s\n", devname);
4402 continue;
4403 }
4404 if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) {
4405 if (verbose)
4406 pr_err("Bad backup-metadata checksum on %s\n", devname);
4407 continue; /* bad checksum */
4408 }
4409 if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 &&
4410 bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) {
4411 if (verbose)
4412 pr_err("Bad backup-metadata checksum2 on %s\n", devname);
4413 continue; /* Bad second checksum */
4414 }
4415 if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) {
4416 if (verbose)
4417 pr_err("Wrong uuid on backup-metadata on %s\n", devname);
4418 continue; /* Wrong uuid */
4419 }
4420
4421 /* array utime and backup-mtime should be updated at much the same time, but it seems that
4422 * sometimes they aren't... So allow considerable flexability in matching, and allow
4423 * this test to be overridden by an environment variable.
4424 */
4425 if (info->array.utime > (int)__le64_to_cpu(bsb.mtime) + 2*60*60 ||
4426 info->array.utime < (int)__le64_to_cpu(bsb.mtime) - 10*60) {
4427 if (check_env("MDADM_GROW_ALLOW_OLD")) {
4428 pr_err("accepting backup with timestamp %lu "
4429 "for array with timestamp %lu\n",
4430 (unsigned long)__le64_to_cpu(bsb.mtime),
4431 (unsigned long)info->array.utime);
4432 } else {
4433 pr_err("too-old timestamp on backup-metadata on %s\n", devname);
4434 pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n");
4435 continue; /* time stamp is too bad */
4436 }
4437 }
4438
4439 if (bsb.magic[15] == '1') {
4440 if (bsb.length == 0)
4441 continue;
4442 if (info->delta_disks >= 0) {
4443 /* reshape_progress is increasing */
4444 if (__le64_to_cpu(bsb.arraystart)
4445 + __le64_to_cpu(bsb.length)
4446 < info->reshape_progress) {
4447 nonew:
4448 if (verbose)
4449 pr_err("backup-metadata found on %s but is not needed\n", devname);
4450 continue; /* No new data here */
4451 }
4452 } else {
4453 /* reshape_progress is decreasing */
4454 if (__le64_to_cpu(bsb.arraystart) >=
4455 info->reshape_progress)
4456 goto nonew; /* No new data here */
4457 }
4458 } else {
4459 if (bsb.length == 0 && bsb.length2 == 0)
4460 continue;
4461 if (info->delta_disks >= 0) {
4462 /* reshape_progress is increasing */
4463 if ((__le64_to_cpu(bsb.arraystart)
4464 + __le64_to_cpu(bsb.length)
4465 < info->reshape_progress)
4466 &&
4467 (__le64_to_cpu(bsb.arraystart2)
4468 + __le64_to_cpu(bsb.length2)
4469 < info->reshape_progress))
4470 goto nonew; /* No new data here */
4471 } else {
4472 /* reshape_progress is decreasing */
4473 if (__le64_to_cpu(bsb.arraystart) >=
4474 info->reshape_progress &&
4475 __le64_to_cpu(bsb.arraystart2) >=
4476 info->reshape_progress)
4477 goto nonew; /* No new data here */
4478 }
4479 }
4480 if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) {
4481 second_fail:
4482 if (verbose)
4483 pr_err("Failed to verify secondary backup-metadata block on %s\n",
4484 devname);
4485 continue; /* Cannot seek */
4486 }
4487 /* There should be a duplicate backup superblock 4k before here */
4488 if (lseek64(fd, -4096, 1) < 0 ||
4489 read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2))
4490 goto second_fail; /* Cannot find leading superblock */
4491 if (bsb.magic[15] == '1')
4492 bsbsize = offsetof(struct mdp_backup_super, pad1);
4493 else
4494 bsbsize = offsetof(struct mdp_backup_super, pad);
4495 if (memcmp(&bsb2, &bsb, bsbsize) != 0)
4496 goto second_fail; /* Cannot find leading superblock */
4497
4498 /* Now need the data offsets for all devices. */
4499 offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks);
4500 for(j=0; j<info->array.raid_disks; j++) {
4501 if (fdlist[j] < 0)
4502 continue;
4503 if (st->ss->load_super(st, fdlist[j], NULL))
4504 /* FIXME should be this be an error */
4505 continue;
4506 st->ss->getinfo_super(st, &dinfo, NULL);
4507 st->ss->free_super(st);
4508 offsets[j] = dinfo.data_offset * 512;
4509 }
4510 printf(Name ": restoring critical section\n");
4511
4512 if (restore_stripes(fdlist, offsets,
4513 info->array.raid_disks,
4514 info->new_chunk,
4515 info->new_level,
4516 info->new_layout,
4517 fd, __le64_to_cpu(bsb.devstart)*512,
4518 __le64_to_cpu(bsb.arraystart)*512,
4519 __le64_to_cpu(bsb.length)*512, NULL)) {
4520 /* didn't succeed, so giveup */
4521 if (verbose)
4522 pr_err("Error restoring backup from %s\n",
4523 devname);
4524 free(offsets);
4525 return 1;
4526 }
4527
4528 if (bsb.magic[15] == '2' &&
4529 restore_stripes(fdlist, offsets,
4530 info->array.raid_disks,
4531 info->new_chunk,
4532 info->new_level,
4533 info->new_layout,
4534 fd, __le64_to_cpu(bsb.devstart)*512 +
4535 __le64_to_cpu(bsb.devstart2)*512,
4536 __le64_to_cpu(bsb.arraystart2)*512,
4537 __le64_to_cpu(bsb.length2)*512, NULL)) {
4538 /* didn't succeed, so giveup */
4539 if (verbose)
4540 pr_err("Error restoring second backup from %s\n",
4541 devname);
4542 free(offsets);
4543 return 1;
4544 }
4545
4546 free(offsets);
4547
4548 /* Ok, so the data is restored. Let's update those superblocks. */
4549
4550 lo = hi = 0;
4551 if (bsb.length) {
4552 lo = __le64_to_cpu(bsb.arraystart);
4553 hi = lo + __le64_to_cpu(bsb.length);
4554 }
4555 if (bsb.magic[15] == '2' && bsb.length2) {
4556 unsigned long long lo1, hi1;
4557 lo1 = __le64_to_cpu(bsb.arraystart2);
4558 hi1 = lo1 + __le64_to_cpu(bsb.length2);
4559 if (lo == hi) {
4560 lo = lo1;
4561 hi = hi1;
4562 } else if (lo < lo1)
4563 hi = hi1;
4564 else
4565 lo = lo1;
4566 }
4567 if (lo < hi &&
4568 (info->reshape_progress < lo ||
4569 info->reshape_progress > hi))
4570 /* backup does not affect reshape_progress*/ ;
4571 else if (info->delta_disks >= 0) {
4572 info->reshape_progress = __le64_to_cpu(bsb.arraystart) +
4573 __le64_to_cpu(bsb.length);
4574 if (bsb.magic[15] == '2') {
4575 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2) +
4576 __le64_to_cpu(bsb.length2);
4577 if (p2 > info->reshape_progress)
4578 info->reshape_progress = p2;
4579 }
4580 } else {
4581 info->reshape_progress = __le64_to_cpu(bsb.arraystart);
4582 if (bsb.magic[15] == '2') {
4583 unsigned long long p2 = __le64_to_cpu(bsb.arraystart2);
4584 if (p2 < info->reshape_progress)
4585 info->reshape_progress = p2;
4586 }
4587 }
4588 for (j=0; j<info->array.raid_disks; j++) {
4589 if (fdlist[j] < 0)
4590 continue;
4591 if (st->ss->load_super(st, fdlist[j], NULL))
4592 continue;
4593 st->ss->getinfo_super(st, &dinfo, NULL);
4594 dinfo.reshape_progress = info->reshape_progress;
4595 st->ss->update_super(st, &dinfo,
4596 "_reshape_progress",
4597 NULL,0, 0, NULL);
4598 st->ss->store_super(st, fdlist[j]);
4599 st->ss->free_super(st);
4600 }
4601 return 0;
4602 }
4603 /* Didn't find any backup data, try to see if any
4604 * was needed.
4605 */
4606 if (info->delta_disks < 0) {
4607 /* When shrinking, the critical section is at the end.
4608 * So see if we are before the critical section.
4609 */
4610 unsigned long long first_block;
4611 nstripe = ostripe = 0;
4612 first_block = 0;
4613 while (ostripe >= nstripe) {
4614 ostripe += info->array.chunk_size / 512;
4615 first_block = ostripe * odata;
4616 nstripe = first_block / ndata / (info->new_chunk/512) *
4617 (info->new_chunk/512);
4618 }
4619
4620 if (info->reshape_progress >= first_block)
4621 return 0;
4622 }
4623 if (info->delta_disks > 0) {
4624 /* See if we are beyond the critical section. */
4625 unsigned long long last_block;
4626 nstripe = ostripe = 0;
4627 last_block = 0;
4628 while (nstripe >= ostripe) {
4629 nstripe += info->new_chunk / 512;
4630 last_block = nstripe * ndata;
4631 ostripe = last_block / odata / (info->array.chunk_size/512) *
4632 (info->array.chunk_size/512);
4633 }
4634
4635 if (info->reshape_progress >= last_block)
4636 return 0;
4637 }
4638 /* needed to recover critical section! */
4639 if (verbose)
4640 pr_err("Failed to find backup of critical section\n");
4641 return 1;
4642 }
4643
4644 int Grow_continue_command(char *devname, int fd,
4645 char *backup_file, int verbose)
4646 {
4647 int ret_val = 0;
4648 struct supertype *st = NULL;
4649 struct mdinfo *content = NULL;
4650 struct mdinfo array;
4651 char *subarray = NULL;
4652 struct mdinfo *cc = NULL;
4653 struct mdstat_ent *mdstat = NULL;
4654 int cfd = -1;
4655 int fd2 = -1;
4656
4657 dprintf("Grow continue from command line called for %s\n",
4658 devname);
4659
4660 st = super_by_fd(fd, &subarray);
4661 if (!st || !st->ss) {
4662 pr_err("Unable to determine metadata format for %s\n",
4663 devname);
4664 return 1;
4665 }
4666 dprintf("Grow continue is run for ");
4667 if (st->ss->external == 0) {
4668 int d;
4669 dprintf("native array (%s)\n", devname);
4670 if (ioctl(fd, GET_ARRAY_INFO, &array.array) < 0) {
4671 pr_err("%s is not an active md array -"
4672 " aborting\n", devname);
4673 ret_val = 1;
4674 goto Grow_continue_command_exit;
4675 }
4676 content = &array;
4677 /* Need to load a superblock.
4678 * FIXME we should really get what we need from
4679 * sysfs
4680 */
4681 for (d = 0; d < MAX_DISKS; d++) {
4682 mdu_disk_info_t disk;
4683 char *dv;
4684 int err;
4685 disk.number = d;
4686 if (ioctl(fd, GET_DISK_INFO, &disk) < 0)
4687 continue;
4688 if (disk.major == 0 && disk.minor == 0)
4689 continue;
4690 if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0)
4691 continue;
4692 dv = map_dev(disk.major, disk.minor, 1);
4693 if (!dv)
4694 continue;
4695 fd2 = dev_open(dv, O_RDONLY);
4696 if (fd2 < 0)
4697 continue;
4698 err = st->ss->load_super(st, fd2, NULL);
4699 close(fd2);
4700 /* invalidate fd2 to avoid possible double close() */
4701 fd2 = -1;
4702 if (err)
4703 continue;
4704 break;
4705 }
4706 if (d == MAX_DISKS) {
4707 pr_err("Unable to load metadata for %s\n",
4708 devname);
4709 ret_val = 1;
4710 goto Grow_continue_command_exit;
4711 }
4712 st->ss->getinfo_super(st, content, NULL);
4713 } else {
4714 char *container;
4715
4716 if (subarray) {
4717 dprintf("subarray (%s)\n", subarray);
4718 container = st->container_devnm;
4719 cfd = open_dev_excl(st->container_devnm);
4720 } else {
4721 container = st->devnm;
4722 close(fd);
4723 cfd = open_dev_excl(st->devnm);
4724 dprintf("container (%s)\n", container);
4725 fd = cfd;
4726 }
4727 if (cfd < 0) {
4728 pr_err("Unable to open container "
4729 "for %s\n", devname);
4730 ret_val = 1;
4731 goto Grow_continue_command_exit;
4732 }
4733
4734 /* find in container array under reshape
4735 */
4736 ret_val = st->ss->load_container(st, cfd, NULL);
4737 if (ret_val) {
4738 pr_err("Cannot read superblock for %s\n",
4739 devname);
4740 ret_val = 1;
4741 goto Grow_continue_command_exit;
4742 }
4743
4744 cc = st->ss->container_content(st, subarray);
4745 for (content = cc; content ; content = content->next) {
4746 char *array;
4747 int allow_reshape = 1;
4748
4749 if (content->reshape_active == 0)
4750 continue;
4751 /* The decision about array or container wide
4752 * reshape is taken in Grow_continue based
4753 * content->reshape_active state, therefore we
4754 * need to check_reshape based on
4755 * reshape_active and subarray name
4756 */
4757 if (content->array.state & (1<<MD_SB_BLOCK_VOLUME))
4758 allow_reshape = 0;
4759 if (content->reshape_active == CONTAINER_RESHAPE &&
4760 (content->array.state
4761 & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE)))
4762 allow_reshape = 0;
4763
4764 if (!allow_reshape) {
4765 pr_err("cannot continue reshape of an array"
4766 " in container with unsupported"
4767 " metadata: %s(%s)\n",
4768 devname, container);
4769 ret_val = 1;
4770 goto Grow_continue_command_exit;
4771 }
4772
4773 array = strchr(content->text_version+1, '/')+1;
4774 mdstat = mdstat_by_subdev(array, container);
4775 if (!mdstat)
4776 continue;
4777 if (mdstat->active == 0) {
4778 pr_err("Skipping inactive array %s.\n",
4779 mdstat->devnm);
4780 free_mdstat(mdstat);
4781 mdstat = NULL;
4782 continue;
4783 }
4784 break;
4785 }
4786 if (!content) {
4787 pr_err("Unable to determine reshaped "
4788 "array for %s\n", devname);
4789 ret_val = 1;
4790 goto Grow_continue_command_exit;
4791 }
4792 fd2 = open_dev(mdstat->devnm);
4793 if (fd2 < 0) {
4794 pr_err("cannot open (%s)\n", mdstat->devnm);
4795 ret_val = 1;
4796 goto Grow_continue_command_exit;
4797 }
4798
4799 sysfs_init(content, fd2, mdstat->devnm);
4800
4801 /* start mdmon in case it is not running
4802 */
4803 if (!mdmon_running(container))
4804 start_mdmon(container);
4805 ping_monitor(container);
4806
4807 if (mdmon_running(container))
4808 st->update_tail = &st->updates;
4809 else {
4810 pr_err("No mdmon found. "
4811 "Grow cannot continue.\n");
4812 ret_val = 1;
4813 goto Grow_continue_command_exit;
4814 }
4815 }
4816
4817 /* verify that array under reshape is started from
4818 * correct position
4819 */
4820 if (verify_reshape_position(content, content->array.level) < 0) {
4821 ret_val = 1;
4822 goto Grow_continue_command_exit;
4823 }
4824
4825 /* continue reshape
4826 */
4827 ret_val = Grow_continue(fd, st, content, backup_file, 0);
4828
4829 Grow_continue_command_exit:
4830 if (fd2 > -1)
4831 close(fd2);
4832 if (cfd > -1)
4833 close(cfd);
4834 st->ss->free_super(st);
4835 free_mdstat(mdstat);
4836 sysfs_free(cc);
4837 free(subarray);
4838
4839 return ret_val;
4840 }
4841
4842 int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info,
4843 char *backup_file, int freeze_reshape)
4844 {
4845 int ret_val = 2;
4846
4847 if (!info->reshape_active)
4848 return ret_val;
4849
4850 if (st->ss->external) {
4851 int cfd = open_dev(st->container_devnm);
4852
4853 if (cfd < 0)
4854 return 1;
4855
4856 st->ss->load_container(st, cfd, st->container_devnm);
4857 close(cfd);
4858 ret_val = reshape_container(st->container_devnm, NULL, mdfd,
4859 st, info, 0, backup_file,
4860 0,
4861 1 | info->reshape_active,
4862 freeze_reshape);
4863 } else
4864 ret_val = reshape_array(NULL, mdfd, "array", st, info, 1,
4865 NULL, INVALID_SECTORS,
4866 backup_file, 0, 0,
4867 1 | info->reshape_active,
4868 freeze_reshape);
4869
4870 return ret_val;
4871 }