]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * mdadm - manage Linux "md" devices aka RAID arrays. | |
3 | * | |
4 | * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de> | |
5 | * | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | * | |
21 | * Author: Neil Brown | |
22 | * Email: <neilb@suse.de> | |
23 | */ | |
24 | #include "mdadm.h" | |
25 | #include "dlink.h" | |
26 | #include <sys/mman.h> | |
27 | #include <stddef.h> | |
28 | #include <stdint.h> | |
29 | #include <sys/wait.h> | |
30 | ||
31 | #if ! defined(__BIG_ENDIAN) && ! defined(__LITTLE_ENDIAN) | |
32 | #error no endian defined | |
33 | #endif | |
34 | #include "md_u.h" | |
35 | #include "md_p.h" | |
36 | ||
37 | int restore_backup(struct supertype *st, | |
38 | struct mdinfo *content, | |
39 | int working_disks, | |
40 | int next_spare, | |
41 | char **backup_filep, | |
42 | int verbose) | |
43 | { | |
44 | int i; | |
45 | int *fdlist; | |
46 | struct mdinfo *dev; | |
47 | int err; | |
48 | int disk_count = next_spare + working_disks; | |
49 | char *backup_file = *backup_filep; | |
50 | ||
51 | dprintf("Called restore_backup()\n"); | |
52 | fdlist = xmalloc(sizeof(int) * disk_count); | |
53 | ||
54 | enable_fds(next_spare); | |
55 | for (i = 0; i < next_spare; i++) | |
56 | fdlist[i] = -1; | |
57 | for (dev = content->devs; dev; dev = dev->next) { | |
58 | char buf[22]; | |
59 | int fd; | |
60 | ||
61 | sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor); | |
62 | fd = dev_open(buf, O_RDWR); | |
63 | ||
64 | if (dev->disk.raid_disk >= 0) | |
65 | fdlist[dev->disk.raid_disk] = fd; | |
66 | else | |
67 | fdlist[next_spare++] = fd; | |
68 | } | |
69 | ||
70 | if (!backup_file) { | |
71 | backup_file = locate_backup(content->sys_name); | |
72 | *backup_filep = backup_file; | |
73 | } | |
74 | ||
75 | if (st->ss->external && st->ss->recover_backup) | |
76 | err = st->ss->recover_backup(st, content); | |
77 | else | |
78 | err = Grow_restart(st, content, fdlist, next_spare, | |
79 | backup_file, verbose > 0); | |
80 | ||
81 | while (next_spare > 0) { | |
82 | next_spare--; | |
83 | if (fdlist[next_spare] >= 0) | |
84 | close(fdlist[next_spare]); | |
85 | } | |
86 | free(fdlist); | |
87 | if (err) { | |
88 | pr_err("Failed to restore critical section for reshape - sorry.\n"); | |
89 | if (!backup_file) | |
90 | pr_err("Possibly you need to specify a --backup-file\n"); | |
91 | return 1; | |
92 | } | |
93 | ||
94 | dprintf("restore_backup() returns status OK.\n"); | |
95 | return 0; | |
96 | } | |
97 | ||
98 | int Grow_Add_device(char *devname, int fd, char *newdev) | |
99 | { | |
100 | /* Add a device to an active array. | |
101 | * Currently, just extend a linear array. | |
102 | * This requires writing a new superblock on the | |
103 | * new device, calling the kernel to add the device, | |
104 | * and if that succeeds, update the superblock on | |
105 | * all other devices. | |
106 | * This means that we need to *find* all other devices. | |
107 | */ | |
108 | struct mdinfo info; | |
109 | ||
110 | dev_t rdev; | |
111 | int nfd, fd2; | |
112 | int d, nd; | |
113 | struct supertype *st = NULL; | |
114 | char *subarray = NULL; | |
115 | ||
116 | if (md_get_array_info(fd, &info.array) < 0) { | |
117 | pr_err("cannot get array info for %s\n", devname); | |
118 | return 1; | |
119 | } | |
120 | ||
121 | if (info.array.level != -1) { | |
122 | pr_err("can only add devices to linear arrays\n"); | |
123 | return 1; | |
124 | } | |
125 | ||
126 | st = super_by_fd(fd, &subarray); | |
127 | if (!st) { | |
128 | pr_err("cannot handle arrays with superblock version %d\n", | |
129 | info.array.major_version); | |
130 | return 1; | |
131 | } | |
132 | ||
133 | if (subarray) { | |
134 | pr_err("Cannot grow linear sub-arrays yet\n"); | |
135 | free(subarray); | |
136 | free(st); | |
137 | return 1; | |
138 | } | |
139 | ||
140 | nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT); | |
141 | if (nfd < 0) { | |
142 | pr_err("cannot open %s\n", newdev); | |
143 | free(st); | |
144 | return 1; | |
145 | } | |
146 | if (!fstat_is_blkdev(nfd, newdev, &rdev)) { | |
147 | close(nfd); | |
148 | free(st); | |
149 | return 1; | |
150 | } | |
151 | /* now check out all the devices and make sure we can read the | |
152 | * superblock */ | |
153 | for (d=0 ; d < info.array.raid_disks ; d++) { | |
154 | mdu_disk_info_t disk; | |
155 | char *dv; | |
156 | ||
157 | st->ss->free_super(st); | |
158 | ||
159 | disk.number = d; | |
160 | if (md_get_disk_info(fd, &disk) < 0) { | |
161 | pr_err("cannot get device detail for device %d\n", d); | |
162 | close(nfd); | |
163 | free(st); | |
164 | return 1; | |
165 | } | |
166 | dv = map_dev(disk.major, disk.minor, 1); | |
167 | if (!dv) { | |
168 | pr_err("cannot find device file for device %d\n", d); | |
169 | close(nfd); | |
170 | free(st); | |
171 | return 1; | |
172 | } | |
173 | fd2 = dev_open(dv, O_RDWR); | |
174 | if (fd2 < 0) { | |
175 | pr_err("cannot open device file %s\n", dv); | |
176 | close(nfd); | |
177 | free(st); | |
178 | return 1; | |
179 | } | |
180 | ||
181 | if (st->ss->load_super(st, fd2, NULL)) { | |
182 | pr_err("cannot find super block on %s\n", dv); | |
183 | close(nfd); | |
184 | close(fd2); | |
185 | free(st); | |
186 | return 1; | |
187 | } | |
188 | close(fd2); | |
189 | } | |
190 | /* Ok, looks good. Lets update the superblock and write it out to | |
191 | * newdev. | |
192 | */ | |
193 | ||
194 | info.disk.number = d; | |
195 | info.disk.major = major(rdev); | |
196 | info.disk.minor = minor(rdev); | |
197 | info.disk.raid_disk = d; | |
198 | info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE); | |
199 | if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_NEW, newdev, | |
200 | 0, 0, NULL) != 0) { | |
201 | pr_err("Preparing new metadata failed on %s\n", newdev); | |
202 | close(nfd); | |
203 | return 1; | |
204 | } | |
205 | ||
206 | if (st->ss->store_super(st, nfd)) { | |
207 | pr_err("Cannot store new superblock on %s\n", newdev); | |
208 | close(nfd); | |
209 | return 1; | |
210 | } | |
211 | close(nfd); | |
212 | ||
213 | if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) { | |
214 | pr_err("Cannot add new disk to this array\n"); | |
215 | return 1; | |
216 | } | |
217 | /* Well, that seems to have worked. | |
218 | * Now go through and update all superblocks | |
219 | */ | |
220 | ||
221 | if (md_get_array_info(fd, &info.array) < 0) { | |
222 | pr_err("cannot get array info for %s\n", devname); | |
223 | return 1; | |
224 | } | |
225 | ||
226 | nd = d; | |
227 | for (d=0 ; d < info.array.raid_disks ; d++) { | |
228 | mdu_disk_info_t disk; | |
229 | char *dv; | |
230 | ||
231 | disk.number = d; | |
232 | if (md_get_disk_info(fd, &disk) < 0) { | |
233 | pr_err("cannot get device detail for device %d\n", d); | |
234 | return 1; | |
235 | } | |
236 | dv = map_dev(disk.major, disk.minor, 1); | |
237 | if (!dv) { | |
238 | pr_err("cannot find device file for device %d\n", d); | |
239 | return 1; | |
240 | } | |
241 | fd2 = dev_open(dv, O_RDWR); | |
242 | if (fd2 < 0) { | |
243 | pr_err("cannot open device file %s\n", dv); | |
244 | return 1; | |
245 | } | |
246 | if (st->ss->load_super(st, fd2, NULL)) { | |
247 | pr_err("cannot find super block on %s\n", dv); | |
248 | close(fd); | |
249 | close(fd2); | |
250 | return 1; | |
251 | } | |
252 | info.array.raid_disks = nd+1; | |
253 | info.array.nr_disks = nd+1; | |
254 | info.array.active_disks = nd+1; | |
255 | info.array.working_disks = nd+1; | |
256 | ||
257 | if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_UPDATE, dv, | |
258 | 0, 0, NULL) != 0) { | |
259 | pr_err("Updating metadata failed on %s\n", dv); | |
260 | close(fd2); | |
261 | return 1; | |
262 | } | |
263 | ||
264 | if (st->ss->store_super(st, fd2)) { | |
265 | pr_err("Cannot store new superblock on %s\n", dv); | |
266 | close(fd2); | |
267 | return 1; | |
268 | } | |
269 | close(fd2); | |
270 | } | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s) | |
276 | { | |
277 | /* | |
278 | * First check that array doesn't have a bitmap | |
279 | * Then create the bitmap | |
280 | * Then add it | |
281 | * | |
282 | * For internal bitmaps, we need to check the version, | |
283 | * find all the active devices, and write the bitmap block | |
284 | * to all devices | |
285 | */ | |
286 | mdu_bitmap_file_t bmf; | |
287 | mdu_array_info_t array; | |
288 | struct supertype *st; | |
289 | char *subarray = NULL; | |
290 | int major = BITMAP_MAJOR_HI; | |
291 | unsigned long long bitmapsize, array_size; | |
292 | struct mdinfo *mdi; | |
293 | ||
294 | /* | |
295 | * We only ever get called if s->bitmap_file is != NULL, so this check | |
296 | * is just here to quiet down static code checkers. | |
297 | */ | |
298 | if (!s->bitmap_file) | |
299 | return 1; | |
300 | ||
301 | if (strcmp(s->bitmap_file, "clustered") == 0) | |
302 | major = BITMAP_MAJOR_CLUSTERED; | |
303 | ||
304 | if (ioctl(fd, GET_BITMAP_FILE, &bmf) != 0) { | |
305 | if (errno == ENOMEM) | |
306 | pr_err("Memory allocation failure.\n"); | |
307 | else | |
308 | pr_err("bitmaps not supported by this kernel.\n"); | |
309 | return 1; | |
310 | } | |
311 | if (bmf.pathname[0]) { | |
312 | if (str_is_none(s->bitmap_file) == true) { | |
313 | if (ioctl(fd, SET_BITMAP_FILE, -1) != 0) { | |
314 | pr_err("failed to remove bitmap %s\n", | |
315 | bmf.pathname); | |
316 | return 1; | |
317 | } | |
318 | return 0; | |
319 | } | |
320 | pr_err("%s already has a bitmap (%s)\n", devname, bmf.pathname); | |
321 | return 1; | |
322 | } | |
323 | if (md_get_array_info(fd, &array) != 0) { | |
324 | pr_err("cannot get array status for %s\n", devname); | |
325 | return 1; | |
326 | } | |
327 | if (array.state & (1 << MD_SB_BITMAP_PRESENT)) { | |
328 | if (str_is_none(s->bitmap_file) == true) { | |
329 | array.state &= ~(1 << MD_SB_BITMAP_PRESENT); | |
330 | if (md_set_array_info(fd, &array) != 0) { | |
331 | if (array.state & (1 << MD_SB_CLUSTERED)) | |
332 | pr_err("failed to remove clustered bitmap.\n"); | |
333 | else | |
334 | pr_err("failed to remove internal bitmap.\n"); | |
335 | return 1; | |
336 | } | |
337 | return 0; | |
338 | } | |
339 | pr_err("bitmap already present on %s\n", devname); | |
340 | return 1; | |
341 | } | |
342 | ||
343 | if (str_is_none(s->bitmap_file) == true) { | |
344 | pr_err("no bitmap found on %s\n", devname); | |
345 | return 1; | |
346 | } | |
347 | if (array.level <= 0) { | |
348 | pr_err("Bitmaps not meaningful with level %s\n", | |
349 | map_num(pers, array.level)?:"of this array"); | |
350 | return 1; | |
351 | } | |
352 | bitmapsize = array.size; | |
353 | bitmapsize <<= 1; | |
354 | if (get_dev_size(fd, NULL, &array_size) && | |
355 | array_size > (0x7fffffffULL << 9)) { | |
356 | /* Array is big enough that we cannot trust array.size | |
357 | * try other approaches | |
358 | */ | |
359 | bitmapsize = get_component_size(fd); | |
360 | } | |
361 | if (bitmapsize == 0) { | |
362 | pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n"); | |
363 | return 1; | |
364 | } | |
365 | ||
366 | if (array.level == 10) { | |
367 | int ncopies; | |
368 | ||
369 | ncopies = (array.layout & 255) * ((array.layout >> 8) & 255); | |
370 | bitmapsize = bitmapsize * array.raid_disks / ncopies; | |
371 | ||
372 | if (strcmp(s->bitmap_file, "clustered") == 0 && | |
373 | !is_near_layout_10(array.layout)) { | |
374 | pr_err("only near layout is supported with clustered raid10\n"); | |
375 | return 1; | |
376 | } | |
377 | } | |
378 | ||
379 | st = super_by_fd(fd, &subarray); | |
380 | if (!st) { | |
381 | pr_err("Cannot understand version %d.%d\n", | |
382 | array.major_version, array.minor_version); | |
383 | return 1; | |
384 | } | |
385 | if (subarray) { | |
386 | pr_err("Cannot add bitmaps to sub-arrays yet\n"); | |
387 | free(subarray); | |
388 | free(st); | |
389 | return 1; | |
390 | } | |
391 | ||
392 | mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY); | |
393 | if (mdi) { | |
394 | if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
395 | pr_err("Cannot add bitmap to array with PPL\n"); | |
396 | free(mdi); | |
397 | free(st); | |
398 | return 1; | |
399 | } | |
400 | free(mdi); | |
401 | } | |
402 | ||
403 | if (strcmp(s->bitmap_file, "internal") == 0 || | |
404 | strcmp(s->bitmap_file, "clustered") == 0) { | |
405 | int rv; | |
406 | int d; | |
407 | int offset_setable = 0; | |
408 | if (st->ss->add_internal_bitmap == NULL) { | |
409 | pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name); | |
410 | return 1; | |
411 | } | |
412 | st->nodes = c->nodes; | |
413 | st->cluster_name = c->homecluster; | |
414 | mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION); | |
415 | if (mdi) | |
416 | offset_setable = 1; | |
417 | for (d = 0; d < st->max_devs; d++) { | |
418 | mdu_disk_info_t disk; | |
419 | char *dv; | |
420 | int fd2; | |
421 | ||
422 | disk.number = d; | |
423 | if (md_get_disk_info(fd, &disk) < 0) | |
424 | continue; | |
425 | if (disk.major == 0 && disk.minor == 0) | |
426 | continue; | |
427 | if ((disk.state & (1 << MD_DISK_SYNC)) == 0) | |
428 | continue; | |
429 | dv = map_dev(disk.major, disk.minor, 1); | |
430 | if (!dv) | |
431 | continue; | |
432 | if ((disk.state & (1 << MD_DISK_WRITEMOSTLY)) && | |
433 | (strcmp(s->bitmap_file, "clustered") == 0)) { | |
434 | pr_err("%s disks marked write-mostly are not supported with clustered bitmap\n",devname); | |
435 | free(mdi); | |
436 | return 1; | |
437 | } | |
438 | fd2 = dev_open(dv, O_RDWR); | |
439 | if (fd2 < 0) | |
440 | continue; | |
441 | rv = st->ss->load_super(st, fd2, NULL); | |
442 | if (!rv) { | |
443 | rv = st->ss->add_internal_bitmap( | |
444 | st, &s->bitmap_chunk, c->delay, | |
445 | s->write_behind, bitmapsize, | |
446 | offset_setable, major); | |
447 | if (!rv) { | |
448 | st->ss->write_bitmap(st, fd2, | |
449 | NodeNumUpdate); | |
450 | } else { | |
451 | pr_err("failed to create internal bitmap - chunksize problem.\n"); | |
452 | } | |
453 | } else { | |
454 | pr_err("failed to load super-block.\n"); | |
455 | } | |
456 | close(fd2); | |
457 | if (rv) { | |
458 | free(mdi); | |
459 | return 1; | |
460 | } | |
461 | } | |
462 | if (offset_setable) { | |
463 | st->ss->getinfo_super(st, mdi, NULL); | |
464 | if (sysfs_init(mdi, fd, NULL)) { | |
465 | pr_err("failed to initialize sysfs.\n"); | |
466 | free(mdi); | |
467 | } | |
468 | rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location", | |
469 | mdi->bitmap_offset); | |
470 | free(mdi); | |
471 | } else { | |
472 | if (strcmp(s->bitmap_file, "clustered") == 0) | |
473 | array.state |= (1 << MD_SB_CLUSTERED); | |
474 | array.state |= (1 << MD_SB_BITMAP_PRESENT); | |
475 | rv = md_set_array_info(fd, &array); | |
476 | } | |
477 | if (rv < 0) { | |
478 | if (errno == EBUSY) | |
479 | pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n"); | |
480 | pr_err("failed to set internal bitmap.\n"); | |
481 | return 1; | |
482 | } | |
483 | } else { | |
484 | int uuid[4]; | |
485 | int bitmap_fd; | |
486 | int d; | |
487 | int max_devs = st->max_devs; | |
488 | ||
489 | /* try to load a superblock */ | |
490 | for (d = 0; d < max_devs; d++) { | |
491 | mdu_disk_info_t disk; | |
492 | char *dv; | |
493 | int fd2; | |
494 | disk.number = d; | |
495 | if (md_get_disk_info(fd, &disk) < 0) | |
496 | continue; | |
497 | if ((disk.major==0 && disk.minor == 0) || | |
498 | (disk.state & (1 << MD_DISK_REMOVED))) | |
499 | continue; | |
500 | dv = map_dev(disk.major, disk.minor, 1); | |
501 | if (!dv) | |
502 | continue; | |
503 | fd2 = dev_open(dv, O_RDONLY); | |
504 | if (fd2 >= 0) { | |
505 | if (st->ss->load_super(st, fd2, NULL) == 0) { | |
506 | close(fd2); | |
507 | st->ss->uuid_from_super(st, uuid); | |
508 | break; | |
509 | } | |
510 | close(fd2); | |
511 | } | |
512 | } | |
513 | if (d == max_devs) { | |
514 | pr_err("cannot find UUID for array!\n"); | |
515 | return 1; | |
516 | } | |
517 | if (CreateBitmap(s->bitmap_file, c->force, (char*)uuid, | |
518 | s->bitmap_chunk, c->delay, s->write_behind, | |
519 | bitmapsize, major)) { | |
520 | return 1; | |
521 | } | |
522 | bitmap_fd = open(s->bitmap_file, O_RDWR); | |
523 | if (bitmap_fd < 0) { | |
524 | pr_err("weird: %s cannot be opened\n", s->bitmap_file); | |
525 | return 1; | |
526 | } | |
527 | if (ioctl(fd, SET_BITMAP_FILE, bitmap_fd) < 0) { | |
528 | int err = errno; | |
529 | if (errno == EBUSY) | |
530 | pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n"); | |
531 | pr_err("Cannot set bitmap file for %s: %s\n", | |
532 | devname, strerror(err)); | |
533 | return 1; | |
534 | } | |
535 | } | |
536 | ||
537 | return 0; | |
538 | } | |
539 | ||
540 | int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s) | |
541 | { | |
542 | struct supertype *st; | |
543 | struct mdinfo *sra; | |
544 | struct mdinfo *sd; | |
545 | char *subarray = NULL; | |
546 | int ret = 0; | |
547 | char container_dev[PATH_MAX]; | |
548 | char buf[SYSFS_MAX_BUF_SIZE]; | |
549 | ||
550 | if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC && | |
551 | s->consistency_policy != CONSISTENCY_POLICY_PPL) { | |
552 | pr_err("Operation not supported for consistency policy %s\n", | |
553 | map_num_s(consistency_policies, s->consistency_policy)); | |
554 | return 1; | |
555 | } | |
556 | ||
557 | st = super_by_fd(fd, &subarray); | |
558 | if (!st) | |
559 | return 1; | |
560 | ||
561 | sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL| | |
562 | GET_DEVS|GET_STATE); | |
563 | if (!sra) { | |
564 | ret = 1; | |
565 | goto free_st; | |
566 | } | |
567 | ||
568 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL && | |
569 | !st->ss->write_init_ppl) { | |
570 | pr_err("%s metadata does not support PPL\n", st->ss->name); | |
571 | ret = 1; | |
572 | goto free_info; | |
573 | } | |
574 | ||
575 | if (sra->array.level != 5) { | |
576 | pr_err("Operation not supported for array level %d\n", | |
577 | sra->array.level); | |
578 | ret = 1; | |
579 | goto free_info; | |
580 | } | |
581 | ||
582 | if (sra->consistency_policy == (unsigned)s->consistency_policy) { | |
583 | pr_err("Consistency policy is already %s\n", | |
584 | map_num_s(consistency_policies, s->consistency_policy)); | |
585 | ret = 1; | |
586 | goto free_info; | |
587 | } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC && | |
588 | sra->consistency_policy != CONSISTENCY_POLICY_PPL) { | |
589 | pr_err("Current consistency policy is %s, cannot change to %s\n", | |
590 | map_num_s(consistency_policies, sra->consistency_policy), | |
591 | map_num_s(consistency_policies, s->consistency_policy)); | |
592 | ret = 1; | |
593 | goto free_info; | |
594 | } | |
595 | ||
596 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
597 | if (sysfs_get_str(sra, NULL, "sync_action", buf, sizeof(buf)) <= 0) { | |
598 | ret = 1; | |
599 | goto free_info; | |
600 | } else if (strcmp(buf, "reshape\n") == 0) { | |
601 | pr_err("PPL cannot be enabled when reshape is in progress\n"); | |
602 | ret = 1; | |
603 | goto free_info; | |
604 | } | |
605 | } | |
606 | ||
607 | if (subarray) { | |
608 | enum update_opt update; | |
609 | ||
610 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) | |
611 | update = UOPT_PPL; | |
612 | else | |
613 | update = UOPT_NO_PPL; | |
614 | ||
615 | sprintf(container_dev, "/dev/%s", st->container_devnm); | |
616 | ||
617 | ret = Update_subarray(container_dev, subarray, update, NULL, | |
618 | c->verbose); | |
619 | if (ret) | |
620 | goto free_info; | |
621 | } | |
622 | ||
623 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
624 | struct mdinfo info; | |
625 | ||
626 | if (subarray) { | |
627 | struct mdinfo *mdi; | |
628 | int cfd; | |
629 | ||
630 | cfd = open(container_dev, O_RDWR|O_EXCL); | |
631 | if (cfd < 0) { | |
632 | pr_err("Failed to open %s\n", container_dev); | |
633 | ret = 1; | |
634 | goto free_info; | |
635 | } | |
636 | ||
637 | ret = st->ss->load_container(st, cfd, st->container_devnm); | |
638 | close(cfd); | |
639 | ||
640 | if (ret) { | |
641 | pr_err("Cannot read superblock for %s\n", | |
642 | container_dev); | |
643 | goto free_info; | |
644 | } | |
645 | ||
646 | mdi = st->ss->container_content(st, subarray); | |
647 | info = *mdi; | |
648 | free(mdi); | |
649 | } | |
650 | ||
651 | for (sd = sra->devs; sd; sd = sd->next) { | |
652 | int dfd; | |
653 | char *devpath; | |
654 | ||
655 | devpath = map_dev(sd->disk.major, sd->disk.minor, 0); | |
656 | dfd = dev_open(devpath, O_RDWR); | |
657 | if (dfd < 0) { | |
658 | pr_err("Failed to open %s\n", devpath); | |
659 | ret = 1; | |
660 | goto free_info; | |
661 | } | |
662 | ||
663 | if (!subarray) { | |
664 | ret = st->ss->load_super(st, dfd, NULL); | |
665 | if (ret) { | |
666 | pr_err("Failed to load super-block.\n"); | |
667 | close(dfd); | |
668 | goto free_info; | |
669 | } | |
670 | ||
671 | ret = st->ss->update_super(st, sra, UOPT_PPL, | |
672 | devname, | |
673 | c->verbose, 0, NULL); | |
674 | if (ret) { | |
675 | close(dfd); | |
676 | st->ss->free_super(st); | |
677 | goto free_info; | |
678 | } | |
679 | st->ss->getinfo_super(st, &info, NULL); | |
680 | } | |
681 | ||
682 | ret |= sysfs_set_num(sra, sd, "ppl_sector", | |
683 | info.ppl_sector); | |
684 | ret |= sysfs_set_num(sra, sd, "ppl_size", | |
685 | info.ppl_size); | |
686 | ||
687 | if (ret) { | |
688 | pr_err("Failed to set PPL attributes for %s\n", | |
689 | sd->sys_name); | |
690 | close(dfd); | |
691 | st->ss->free_super(st); | |
692 | goto free_info; | |
693 | } | |
694 | ||
695 | ret = st->ss->write_init_ppl(st, &info, dfd); | |
696 | if (ret) | |
697 | pr_err("Failed to write PPL\n"); | |
698 | ||
699 | close(dfd); | |
700 | ||
701 | if (!subarray) | |
702 | st->ss->free_super(st); | |
703 | ||
704 | if (ret) | |
705 | goto free_info; | |
706 | } | |
707 | } | |
708 | ||
709 | ret = sysfs_set_str(sra, NULL, "consistency_policy", | |
710 | map_num_s(consistency_policies, | |
711 | s->consistency_policy)); | |
712 | if (ret) | |
713 | pr_err("Failed to change array consistency policy\n"); | |
714 | ||
715 | free_info: | |
716 | sysfs_free(sra); | |
717 | free_st: | |
718 | free(st); | |
719 | free(subarray); | |
720 | ||
721 | return ret; | |
722 | } | |
723 | ||
724 | /* | |
725 | * When reshaping an array we might need to backup some data. | |
726 | * This is written to all spares with a 'super_block' describing it. | |
727 | * The superblock goes 4K from the end of the used space on the | |
728 | * device. | |
729 | * It if written after the backup is complete. | |
730 | * It has the following structure. | |
731 | */ | |
732 | ||
733 | static struct mdp_backup_super { | |
734 | char magic[16]; /* md_backup_data-1 or -2 */ | |
735 | __u8 set_uuid[16]; | |
736 | __u64 mtime; | |
737 | /* start/sizes in 512byte sectors */ | |
738 | __u64 devstart; /* address on backup device/file of data */ | |
739 | __u64 arraystart; | |
740 | __u64 length; | |
741 | __u32 sb_csum; /* csum of preceeding bytes. */ | |
742 | __u32 pad1; | |
743 | __u64 devstart2; /* offset in to data of second section */ | |
744 | __u64 arraystart2; | |
745 | __u64 length2; | |
746 | __u32 sb_csum2; /* csum of preceeding bytes. */ | |
747 | __u8 pad[512-68-32]; | |
748 | } __attribute__((aligned(512))) bsb, bsb2; | |
749 | ||
750 | static __u32 bsb_csum(char *buf, int len) | |
751 | { | |
752 | int i; | |
753 | int csum = 0; | |
754 | for (i = 0; i < len; i++) | |
755 | csum = (csum<<3) + buf[0]; | |
756 | return __cpu_to_le32(csum); | |
757 | } | |
758 | ||
759 | static int check_idle(struct supertype *st) | |
760 | { | |
761 | /* Check that all member arrays for this container, or the | |
762 | * container of this array, are idle | |
763 | */ | |
764 | char *container = (st->container_devnm[0] | |
765 | ? st->container_devnm : st->devnm); | |
766 | struct mdstat_ent *ent, *e; | |
767 | int is_idle = 1; | |
768 | ||
769 | ent = mdstat_read(0, 0); | |
770 | for (e = ent ; e; e = e->next) { | |
771 | if (!is_container_member(e, container)) | |
772 | continue; | |
773 | /* frozen array is not idle*/ | |
774 | if (e->percent >= 0 || e->metadata_version[9] == '-') { | |
775 | is_idle = 0; | |
776 | break; | |
777 | } | |
778 | } | |
779 | free_mdstat(ent); | |
780 | return is_idle; | |
781 | } | |
782 | ||
783 | static int freeze_container(struct supertype *st) | |
784 | { | |
785 | char *container = (st->container_devnm[0] | |
786 | ? st->container_devnm : st->devnm); | |
787 | ||
788 | if (!check_idle(st)) | |
789 | return -1; | |
790 | ||
791 | if (block_monitor(container, 1)) { | |
792 | pr_err("failed to freeze container\n"); | |
793 | return -2; | |
794 | } | |
795 | ||
796 | return 1; | |
797 | } | |
798 | ||
799 | static void unfreeze_container(struct supertype *st) | |
800 | { | |
801 | char *container = (st->container_devnm[0] | |
802 | ? st->container_devnm : st->devnm); | |
803 | ||
804 | unblock_monitor(container, 1); | |
805 | } | |
806 | ||
807 | static int freeze(struct supertype *st) | |
808 | { | |
809 | /* Try to freeze resync/rebuild on this array/container. | |
810 | * Return -1 if the array is busy, | |
811 | * return -2 container cannot be frozen, | |
812 | * return 0 if this kernel doesn't support 'frozen' | |
813 | * return 1 if it worked. | |
814 | */ | |
815 | if (st->ss->external) | |
816 | return freeze_container(st); | |
817 | else { | |
818 | struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION); | |
819 | int err; | |
820 | char buf[SYSFS_MAX_BUF_SIZE]; | |
821 | ||
822 | if (!sra) | |
823 | return -1; | |
824 | /* Need to clear any 'read-auto' status */ | |
825 | if (sysfs_get_str(sra, NULL, "array_state", buf, sizeof(buf)) > 0 && | |
826 | strncmp(buf, "read-auto", 9) == 0) | |
827 | sysfs_set_str(sra, NULL, "array_state", "clean"); | |
828 | ||
829 | err = sysfs_freeze_array(sra); | |
830 | sysfs_free(sra); | |
831 | return err; | |
832 | } | |
833 | } | |
834 | ||
835 | static void unfreeze(struct supertype *st) | |
836 | { | |
837 | if (st->ss->external) | |
838 | return unfreeze_container(st); | |
839 | else { | |
840 | struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION); | |
841 | char buf[SYSFS_MAX_BUF_SIZE]; | |
842 | ||
843 | if (sra && | |
844 | sysfs_get_str(sra, NULL, "sync_action", buf, sizeof(buf)) > 0 && | |
845 | strcmp(buf, "frozen\n") == 0) | |
846 | sysfs_set_str(sra, NULL, "sync_action", "idle"); | |
847 | sysfs_free(sra); | |
848 | } | |
849 | } | |
850 | ||
851 | static void wait_reshape(struct mdinfo *sra) | |
852 | { | |
853 | int fd = sysfs_get_fd(sra, NULL, "sync_action"); | |
854 | char action[SYSFS_MAX_BUF_SIZE]; | |
855 | ||
856 | if (fd < 0) | |
857 | return; | |
858 | ||
859 | while (sysfs_fd_get_str(fd, action, sizeof(action)) > 0 && | |
860 | strncmp(action, "reshape", 7) == 0) | |
861 | sysfs_wait(fd, NULL); | |
862 | close(fd); | |
863 | } | |
864 | ||
865 | static int reshape_super(struct supertype *st, unsigned long long size, | |
866 | int level, int layout, int chunksize, int raid_disks, | |
867 | int delta_disks, char *backup_file, char *dev, | |
868 | int direction, int verbose) | |
869 | { | |
870 | /* nothing extra to check in the native case */ | |
871 | if (!st->ss->external) | |
872 | return 0; | |
873 | if (!st->ss->reshape_super || !st->ss->manage_reshape) { | |
874 | pr_err("%s metadata does not support reshape\n", | |
875 | st->ss->name); | |
876 | return 1; | |
877 | } | |
878 | ||
879 | return st->ss->reshape_super(st, size, level, layout, chunksize, | |
880 | raid_disks, delta_disks, backup_file, dev, | |
881 | direction, verbose); | |
882 | } | |
883 | ||
884 | static void sync_metadata(struct supertype *st) | |
885 | { | |
886 | if (st->ss->external) { | |
887 | if (st->update_tail) { | |
888 | flush_metadata_updates(st); | |
889 | st->update_tail = &st->updates; | |
890 | } else | |
891 | st->ss->sync_metadata(st); | |
892 | } | |
893 | } | |
894 | ||
895 | static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n) | |
896 | { | |
897 | /* when dealing with external metadata subarrays we need to be | |
898 | * prepared to handle EAGAIN. The kernel may need to wait for | |
899 | * mdmon to mark the array active so the kernel can handle | |
900 | * allocations/writeback when preparing the reshape action | |
901 | * (md_allow_write()). We temporarily disable safe_mode_delay | |
902 | * to close a race with the array_state going clean before the | |
903 | * next write to raid_disks / stripe_cache_size | |
904 | */ | |
905 | char safe[SYSFS_MAX_BUF_SIZE]; | |
906 | int rc; | |
907 | ||
908 | /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */ | |
909 | if (!container || | |
910 | (strcmp(name, "raid_disks") != 0 && | |
911 | strcmp(name, "stripe_cache_size") != 0)) | |
912 | return sysfs_set_num(sra, NULL, name, n); | |
913 | ||
914 | rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe)); | |
915 | if (rc <= 0) | |
916 | return -1; | |
917 | sysfs_set_num(sra, NULL, "safe_mode_delay", 0); | |
918 | rc = sysfs_set_num(sra, NULL, name, n); | |
919 | if (rc < 0 && errno == EAGAIN) { | |
920 | ping_monitor(container); | |
921 | /* if we get EAGAIN here then the monitor is not active | |
922 | * so stop trying | |
923 | */ | |
924 | rc = sysfs_set_num(sra, NULL, name, n); | |
925 | } | |
926 | sysfs_set_str(sra, NULL, "safe_mode_delay", safe); | |
927 | return rc; | |
928 | } | |
929 | ||
930 | int start_reshape(struct mdinfo *sra, int already_running, | |
931 | int before_data_disks, int data_disks, struct supertype *st) | |
932 | { | |
933 | int err; | |
934 | unsigned long long sync_max_to_set; | |
935 | ||
936 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
937 | err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress); | |
938 | err = err ?: sysfs_set_num(sra, NULL, "suspend_lo", | |
939 | sra->reshape_progress); | |
940 | if (before_data_disks <= data_disks) | |
941 | sync_max_to_set = sra->reshape_progress / data_disks; | |
942 | else | |
943 | sync_max_to_set = (sra->component_size * data_disks | |
944 | - sra->reshape_progress) / data_disks; | |
945 | ||
946 | if (!already_running) | |
947 | sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set); | |
948 | ||
949 | if (st->ss->external) | |
950 | err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set); | |
951 | else | |
952 | err = err ?: sysfs_set_str(sra, NULL, "sync_max", "max"); | |
953 | ||
954 | if (!already_running && err == 0) { | |
955 | int cnt = 5; | |
956 | do { | |
957 | err = sysfs_set_str(sra, NULL, "sync_action", | |
958 | "reshape"); | |
959 | if (err) | |
960 | sleep_for(1, 0, true); | |
961 | } while (err && errno == EBUSY && cnt-- > 0); | |
962 | } | |
963 | return err; | |
964 | } | |
965 | ||
966 | void abort_reshape(struct mdinfo *sra) | |
967 | { | |
968 | sysfs_set_str(sra, NULL, "sync_action", "idle"); | |
969 | /* | |
970 | * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and | |
971 | * suspend_hi to decrease as well as increase.") | |
972 | * you could only increase suspend_{lo,hi} unless the region they | |
973 | * covered was empty. So to reset to 0, you need to push suspend_lo | |
974 | * up past suspend_hi first. So to maximize the chance of mdadm | |
975 | * working on all kernels, we want to keep doing that. | |
976 | */ | |
977 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
978 | sysfs_set_num(sra, NULL, "suspend_hi", 0); | |
979 | sysfs_set_num(sra, NULL, "suspend_lo", 0); | |
980 | sysfs_set_num(sra, NULL, "sync_min", 0); | |
981 | // It isn't safe to reset sync_max as we aren't monitoring. | |
982 | // Array really should be stopped at this point. | |
983 | } | |
984 | ||
985 | int remove_disks_for_takeover(struct supertype *st, | |
986 | struct mdinfo *sra, | |
987 | int layout) | |
988 | { | |
989 | int nr_of_copies; | |
990 | struct mdinfo *remaining; | |
991 | int slot; | |
992 | ||
993 | if (st->ss->external) { | |
994 | int rv = 0; | |
995 | struct mdinfo *arrays = st->ss->container_content(st, NULL); | |
996 | /* | |
997 | * containter_content returns list of arrays in container | |
998 | * If arrays->next is not NULL it means that there are | |
999 | * 2 arrays in container and operation should be blocked | |
1000 | */ | |
1001 | if (arrays) { | |
1002 | if (arrays->next) | |
1003 | rv = 1; | |
1004 | sysfs_free(arrays); | |
1005 | if (rv) { | |
1006 | pr_err("Error. Cannot perform operation on %s- for this operation " | |
1007 | "it MUST be single array in container\n", st->devnm); | |
1008 | return rv; | |
1009 | } | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | if (sra->array.level == 10) | |
1014 | nr_of_copies = layout & 0xff; | |
1015 | else if (sra->array.level == 1) | |
1016 | nr_of_copies = sra->array.raid_disks; | |
1017 | else | |
1018 | return 1; | |
1019 | ||
1020 | remaining = sra->devs; | |
1021 | sra->devs = NULL; | |
1022 | /* for each 'copy', select one device and remove from the list. */ | |
1023 | for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) { | |
1024 | struct mdinfo **diskp; | |
1025 | int found = 0; | |
1026 | ||
1027 | /* Find a working device to keep */ | |
1028 | for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) { | |
1029 | struct mdinfo *disk = *diskp; | |
1030 | ||
1031 | if (disk->disk.raid_disk < slot) | |
1032 | continue; | |
1033 | if (disk->disk.raid_disk >= slot + nr_of_copies) | |
1034 | continue; | |
1035 | if (disk->disk.state & (1<<MD_DISK_REMOVED)) | |
1036 | continue; | |
1037 | if (disk->disk.state & (1<<MD_DISK_FAULTY)) | |
1038 | continue; | |
1039 | if (!(disk->disk.state & (1<<MD_DISK_SYNC))) | |
1040 | continue; | |
1041 | ||
1042 | /* We have found a good disk to use! */ | |
1043 | *diskp = disk->next; | |
1044 | disk->next = sra->devs; | |
1045 | sra->devs = disk; | |
1046 | found = 1; | |
1047 | break; | |
1048 | } | |
1049 | if (!found) | |
1050 | break; | |
1051 | } | |
1052 | ||
1053 | if (slot < sra->array.raid_disks) { | |
1054 | /* didn't find all slots */ | |
1055 | struct mdinfo **e; | |
1056 | e = &remaining; | |
1057 | while (*e) | |
1058 | e = &(*e)->next; | |
1059 | *e = sra->devs; | |
1060 | sra->devs = remaining; | |
1061 | return 1; | |
1062 | } | |
1063 | ||
1064 | /* Remove all 'remaining' devices from the array */ | |
1065 | while (remaining) { | |
1066 | struct mdinfo *sd = remaining; | |
1067 | remaining = sd->next; | |
1068 | ||
1069 | sysfs_set_str(sra, sd, "state", "faulty"); | |
1070 | sysfs_set_str(sra, sd, "slot", STR_COMMON_NONE); | |
1071 | /* for external metadata disks should be removed in mdmon */ | |
1072 | if (!st->ss->external) | |
1073 | sysfs_set_str(sra, sd, "state", "remove"); | |
1074 | sd->disk.state |= (1<<MD_DISK_REMOVED); | |
1075 | sd->disk.state &= ~(1<<MD_DISK_SYNC); | |
1076 | sd->next = sra->devs; | |
1077 | sra->devs = sd; | |
1078 | } | |
1079 | return 0; | |
1080 | } | |
1081 | ||
1082 | void reshape_free_fdlist(int *fdlist, | |
1083 | unsigned long long *offsets, | |
1084 | int size) | |
1085 | { | |
1086 | int i; | |
1087 | ||
1088 | for (i = 0; i < size; i++) | |
1089 | if (fdlist[i] >= 0) | |
1090 | close(fdlist[i]); | |
1091 | ||
1092 | free(fdlist); | |
1093 | free(offsets); | |
1094 | } | |
1095 | ||
1096 | int reshape_prepare_fdlist(char *devname, | |
1097 | struct mdinfo *sra, | |
1098 | int raid_disks, | |
1099 | int nrdisks, | |
1100 | unsigned long blocks, | |
1101 | char *backup_file, | |
1102 | int *fdlist, | |
1103 | unsigned long long *offsets) | |
1104 | { | |
1105 | int d = 0; | |
1106 | struct mdinfo *sd; | |
1107 | ||
1108 | enable_fds(nrdisks); | |
1109 | for (d = 0; d <= nrdisks; d++) | |
1110 | fdlist[d] = -1; | |
1111 | d = raid_disks; | |
1112 | for (sd = sra->devs; sd; sd = sd->next) { | |
1113 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
1114 | continue; | |
1115 | if (sd->disk.state & (1<<MD_DISK_SYNC) && | |
1116 | sd->disk.raid_disk < raid_disks) { | |
1117 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
1118 | fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY); | |
1119 | offsets[sd->disk.raid_disk] = sd->data_offset*512; | |
1120 | if (fdlist[sd->disk.raid_disk] < 0) { | |
1121 | pr_err("%s: cannot open component %s\n", | |
1122 | devname, dn ? dn : "-unknown-"); | |
1123 | d = -1; | |
1124 | goto release; | |
1125 | } | |
1126 | } else if (backup_file == NULL) { | |
1127 | /* spare */ | |
1128 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
1129 | fdlist[d] = dev_open(dn, O_RDWR); | |
1130 | offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512; | |
1131 | if (fdlist[d] < 0) { | |
1132 | pr_err("%s: cannot open component %s\n", | |
1133 | devname, dn ? dn : "-unknown-"); | |
1134 | d = -1; | |
1135 | goto release; | |
1136 | } | |
1137 | d++; | |
1138 | } | |
1139 | } | |
1140 | release: | |
1141 | return d; | |
1142 | } | |
1143 | ||
1144 | int reshape_open_backup_file(char *backup_file, | |
1145 | int fd, | |
1146 | char *devname, | |
1147 | long blocks, | |
1148 | int *fdlist, | |
1149 | unsigned long long *offsets, | |
1150 | char *sys_name, | |
1151 | int restart) | |
1152 | { | |
1153 | /* Return 1 on success, 0 on any form of failure */ | |
1154 | /* need to check backup file is large enough */ | |
1155 | char buf[512]; | |
1156 | struct stat stb; | |
1157 | unsigned int dev; | |
1158 | int i; | |
1159 | ||
1160 | *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL), | |
1161 | S_IRUSR | S_IWUSR); | |
1162 | *offsets = 8 * 512; | |
1163 | if (*fdlist < 0) { | |
1164 | pr_err("%s: cannot create backup file %s: %s\n", | |
1165 | devname, backup_file, strerror(errno)); | |
1166 | return 0; | |
1167 | } | |
1168 | /* Guard against backup file being on array device. | |
1169 | * If array is partitioned or if LVM etc is in the | |
1170 | * way this will not notice, but it is better than | |
1171 | * nothing. | |
1172 | */ | |
1173 | fstat(*fdlist, &stb); | |
1174 | dev = stb.st_dev; | |
1175 | fstat(fd, &stb); | |
1176 | if (stb.st_rdev == dev) { | |
1177 | pr_err("backup file must NOT be on the array being reshaped.\n"); | |
1178 | close(*fdlist); | |
1179 | return 0; | |
1180 | } | |
1181 | ||
1182 | memset(buf, 0, 512); | |
1183 | for (i=0; i < blocks + 8 ; i++) { | |
1184 | if (write(*fdlist, buf, 512) != 512) { | |
1185 | pr_err("%s: cannot create backup file %s: %s\n", | |
1186 | devname, backup_file, strerror(errno)); | |
1187 | return 0; | |
1188 | } | |
1189 | } | |
1190 | if (fsync(*fdlist) != 0) { | |
1191 | pr_err("%s: cannot create backup file %s: %s\n", | |
1192 | devname, backup_file, strerror(errno)); | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) { | |
1197 | char *bu = make_backup(sys_name); | |
1198 | if (symlink(backup_file, bu)) | |
1199 | pr_err("Recording backup file in " MAP_DIR " failed: %s\n", | |
1200 | strerror(errno)); | |
1201 | free(bu); | |
1202 | } | |
1203 | ||
1204 | return 1; | |
1205 | } | |
1206 | ||
1207 | unsigned long compute_backup_blocks(int nchunk, int ochunk, | |
1208 | unsigned int ndata, unsigned int odata) | |
1209 | { | |
1210 | unsigned long a, b, blocks; | |
1211 | /* So how much do we need to backup. | |
1212 | * We need an amount of data which is both a whole number of | |
1213 | * old stripes and a whole number of new stripes. | |
1214 | * So LCM for (chunksize*datadisks). | |
1215 | */ | |
1216 | a = (ochunk/512) * odata; | |
1217 | b = (nchunk/512) * ndata; | |
1218 | /* Find GCD */ | |
1219 | a = GCD(a, b); | |
1220 | /* LCM == product / GCD */ | |
1221 | blocks = (unsigned long)(ochunk/512) * (unsigned long)(nchunk/512) * | |
1222 | odata * ndata / a; | |
1223 | ||
1224 | return blocks; | |
1225 | } | |
1226 | ||
1227 | char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re) | |
1228 | { | |
1229 | /* Based on the current array state in info->array and | |
1230 | * the changes in info->new_* etc, determine: | |
1231 | * - whether the change is possible | |
1232 | * - Intermediate level/raid_disks/layout | |
1233 | * - whether a restriping reshape is needed | |
1234 | * - number of sectors in minimum change unit. This | |
1235 | * will cover a whole number of stripes in 'before' and | |
1236 | * 'after'. | |
1237 | * | |
1238 | * Return message if the change should be rejected | |
1239 | * NULL if the change can be achieved | |
1240 | * | |
1241 | * This can be called as part of starting a reshape, or | |
1242 | * when assembling an array that is undergoing reshape. | |
1243 | */ | |
1244 | int near, far, offset, copies; | |
1245 | int new_disks; | |
1246 | int old_chunk, new_chunk; | |
1247 | /* delta_parity records change in number of devices | |
1248 | * caused by level change | |
1249 | */ | |
1250 | int delta_parity = 0; | |
1251 | ||
1252 | memset(re, 0, sizeof(*re)); | |
1253 | ||
1254 | /* If a new level not explicitly given, we assume no-change */ | |
1255 | if (info->new_level == UnSet) | |
1256 | info->new_level = info->array.level; | |
1257 | ||
1258 | if (info->new_chunk) | |
1259 | switch (info->new_level) { | |
1260 | case 0: | |
1261 | case 4: | |
1262 | case 5: | |
1263 | case 6: | |
1264 | case 10: | |
1265 | /* chunk size is meaningful, must divide component_size | |
1266 | * evenly | |
1267 | */ | |
1268 | if (info->component_size % (info->new_chunk/512)) { | |
1269 | unsigned long long shrink = info->component_size; | |
1270 | shrink &= ~(unsigned long long)(info->new_chunk/512-1); | |
1271 | pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n", | |
1272 | info->new_chunk/1024, info->component_size/2); | |
1273 | pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n", | |
1274 | devname, shrink/2); | |
1275 | pr_err("will shrink the array so the given chunk size would work.\n"); | |
1276 | return ""; | |
1277 | } | |
1278 | break; | |
1279 | default: | |
1280 | return "chunk size not meaningful for this level"; | |
1281 | } | |
1282 | else | |
1283 | info->new_chunk = info->array.chunk_size; | |
1284 | ||
1285 | switch (info->array.level) { | |
1286 | default: | |
1287 | return "No reshape is possibly for this RAID level"; | |
1288 | case LEVEL_LINEAR: | |
1289 | if (info->delta_disks != UnSet) | |
1290 | return "Only --add is supported for LINEAR, setting --raid-disks is not needed"; | |
1291 | else | |
1292 | return "Only --add is supported for LINEAR, other --grow options are not meaningful"; | |
1293 | case 1: | |
1294 | /* RAID1 can convert to RAID1 with different disks, or | |
1295 | * raid5 with 2 disks, or | |
1296 | * raid0 with 1 disk | |
1297 | */ | |
1298 | if (info->new_level > 1 && (info->component_size & 7)) | |
1299 | return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first."; | |
1300 | if (info->new_level == 0) { | |
1301 | if (info->delta_disks != UnSet && | |
1302 | info->delta_disks != 0) | |
1303 | return "Cannot change number of disks with RAID1->RAID0 conversion"; | |
1304 | re->level = 0; | |
1305 | re->before.data_disks = 1; | |
1306 | re->after.data_disks = 1; | |
1307 | return NULL; | |
1308 | } | |
1309 | if (info->new_level == 1) { | |
1310 | if (info->delta_disks == UnSet) | |
1311 | /* Don't know what to do */ | |
1312 | return "no change requested for Growing RAID1"; | |
1313 | re->level = 1; | |
1314 | return NULL; | |
1315 | } | |
1316 | if (info->array.raid_disks != 2 && info->new_level == 5) | |
1317 | return "Can only convert a 2-device array to RAID5"; | |
1318 | if (info->array.raid_disks == 2 && info->new_level == 5) { | |
1319 | re->level = 5; | |
1320 | re->before.data_disks = 1; | |
1321 | if (info->delta_disks != UnSet && | |
1322 | info->delta_disks != 0) | |
1323 | re->after.data_disks = 1 + info->delta_disks; | |
1324 | else | |
1325 | re->after.data_disks = 1; | |
1326 | if (re->after.data_disks < 1) | |
1327 | return "Number of disks too small for RAID5"; | |
1328 | ||
1329 | re->before.layout = ALGORITHM_LEFT_SYMMETRIC; | |
1330 | info->array.chunk_size = 65536; | |
1331 | break; | |
1332 | } | |
1333 | /* Could do some multi-stage conversions, but leave that to | |
1334 | * later. | |
1335 | */ | |
1336 | return "Impossibly level change request for RAID1"; | |
1337 | ||
1338 | case 10: | |
1339 | /* RAID10 can be converted from near mode to | |
1340 | * RAID0 by removing some devices. | |
1341 | * It can also be reshaped if the kernel supports | |
1342 | * new_data_offset. | |
1343 | */ | |
1344 | switch (info->new_level) { | |
1345 | case 0: | |
1346 | if ((info->array.layout & ~0xff) != 0x100) | |
1347 | return "Cannot Grow RAID10 with far/offset layout"; | |
1348 | /* | |
1349 | * number of devices must be multiple of | |
1350 | * number of copies | |
1351 | */ | |
1352 | if (info->array.raid_disks % | |
1353 | (info->array.layout & 0xff)) | |
1354 | return "RAID10 layout too complex for Grow operation"; | |
1355 | ||
1356 | new_disks = (info->array.raid_disks / | |
1357 | (info->array.layout & 0xff)); | |
1358 | if (info->delta_disks == UnSet) | |
1359 | info->delta_disks = (new_disks | |
1360 | - info->array.raid_disks); | |
1361 | ||
1362 | if (info->delta_disks != | |
1363 | new_disks - info->array.raid_disks) | |
1364 | return "New number of raid-devices impossible for RAID10"; | |
1365 | if (info->new_chunk && | |
1366 | info->new_chunk != info->array.chunk_size) | |
1367 | return "Cannot change chunk-size with RAID10 Grow"; | |
1368 | ||
1369 | /* looks good */ | |
1370 | re->level = 0; | |
1371 | re->before.data_disks = new_disks; | |
1372 | re->after.data_disks = re->before.data_disks; | |
1373 | return NULL; | |
1374 | ||
1375 | case 10: | |
1376 | near = info->array.layout & 0xff; | |
1377 | far = (info->array.layout >> 8) & 0xff; | |
1378 | offset = info->array.layout & 0x10000; | |
1379 | if (far > 1 && !offset) | |
1380 | return "Cannot reshape RAID10 in far-mode"; | |
1381 | copies = near * far; | |
1382 | ||
1383 | old_chunk = info->array.chunk_size * far; | |
1384 | ||
1385 | if (info->new_layout == UnSet) | |
1386 | info->new_layout = info->array.layout; | |
1387 | else { | |
1388 | near = info->new_layout & 0xff; | |
1389 | far = (info->new_layout >> 8) & 0xff; | |
1390 | offset = info->new_layout & 0x10000; | |
1391 | if (far > 1 && !offset) | |
1392 | return "Cannot reshape RAID10 to far-mode"; | |
1393 | if (near * far != copies) | |
1394 | return "Cannot change number of copies when reshaping RAID10"; | |
1395 | } | |
1396 | if (info->delta_disks == UnSet) | |
1397 | info->delta_disks = 0; | |
1398 | new_disks = (info->array.raid_disks + | |
1399 | info->delta_disks); | |
1400 | ||
1401 | new_chunk = info->new_chunk * far; | |
1402 | ||
1403 | re->level = 10; | |
1404 | re->before.layout = info->array.layout; | |
1405 | re->before.data_disks = info->array.raid_disks; | |
1406 | re->after.layout = info->new_layout; | |
1407 | re->after.data_disks = new_disks; | |
1408 | /* For RAID10 we don't do backup but do allow reshape, | |
1409 | * so set backup_blocks to INVALID_SECTORS rather than | |
1410 | * zero. | |
1411 | * And there is no need to synchronise stripes on both | |
1412 | * 'old' and 'new'. So the important | |
1413 | * number is the minimum data_offset difference | |
1414 | * which is the larger of (offset copies * chunk). | |
1415 | */ | |
1416 | re->backup_blocks = INVALID_SECTORS; | |
1417 | re->min_offset_change = max(old_chunk, new_chunk) / 512; | |
1418 | if (new_disks < re->before.data_disks && | |
1419 | info->space_after < re->min_offset_change) | |
1420 | /* Reduce component size by one chunk */ | |
1421 | re->new_size = (info->component_size - | |
1422 | re->min_offset_change); | |
1423 | else | |
1424 | re->new_size = info->component_size; | |
1425 | re->new_size = re->new_size * new_disks / copies; | |
1426 | return NULL; | |
1427 | ||
1428 | default: | |
1429 | return "RAID10 can only be changed to RAID0"; | |
1430 | } | |
1431 | case 0: | |
1432 | /* RAID0 can be converted to RAID10, or to RAID456 */ | |
1433 | if (info->new_level == 10) { | |
1434 | if (info->new_layout == UnSet && | |
1435 | info->delta_disks == UnSet) { | |
1436 | /* Assume near=2 layout */ | |
1437 | info->new_layout = 0x102; | |
1438 | info->delta_disks = info->array.raid_disks; | |
1439 | } | |
1440 | if (info->new_layout == UnSet) { | |
1441 | int copies = 1 + (info->delta_disks | |
1442 | / info->array.raid_disks); | |
1443 | if (info->array.raid_disks * (copies-1) != | |
1444 | info->delta_disks) | |
1445 | return "Impossible number of devices for RAID0->RAID10"; | |
1446 | info->new_layout = 0x100 + copies; | |
1447 | } | |
1448 | if (info->delta_disks == UnSet) { | |
1449 | int copies = info->new_layout & 0xff; | |
1450 | if (info->new_layout != 0x100 + copies) | |
1451 | return "New layout impossible for RAID0->RAID10";; | |
1452 | info->delta_disks = (copies - 1) * | |
1453 | info->array.raid_disks; | |
1454 | } | |
1455 | if (info->new_chunk && | |
1456 | info->new_chunk != info->array.chunk_size) | |
1457 | return "Cannot change chunk-size with RAID0->RAID10"; | |
1458 | /* looks good */ | |
1459 | re->level = 10; | |
1460 | re->before.data_disks = (info->array.raid_disks + | |
1461 | info->delta_disks); | |
1462 | re->after.data_disks = re->before.data_disks; | |
1463 | re->before.layout = info->new_layout; | |
1464 | return NULL; | |
1465 | } | |
1466 | ||
1467 | /* RAID0 can also covert to RAID0/4/5/6 by first converting to | |
1468 | * a raid4 style layout of the final level. | |
1469 | */ | |
1470 | switch (info->new_level) { | |
1471 | case 4: | |
1472 | delta_parity = 1; | |
1473 | case 0: | |
1474 | re->level = 4; | |
1475 | re->before.layout = 0; | |
1476 | break; | |
1477 | case 5: | |
1478 | delta_parity = 1; | |
1479 | re->level = 5; | |
1480 | re->before.layout = ALGORITHM_PARITY_N; | |
1481 | if (info->new_layout == UnSet) | |
1482 | info->new_layout = map_name(r5layout, "default"); | |
1483 | break; | |
1484 | case 6: | |
1485 | delta_parity = 2; | |
1486 | re->level = 6; | |
1487 | re->before.layout = ALGORITHM_PARITY_N; | |
1488 | if (info->new_layout == UnSet) | |
1489 | info->new_layout = map_name(r6layout, "default"); | |
1490 | break; | |
1491 | default: | |
1492 | return "Impossible level change requested"; | |
1493 | } | |
1494 | re->before.data_disks = info->array.raid_disks; | |
1495 | /* determining 'after' layout happens outside this 'switch' */ | |
1496 | break; | |
1497 | ||
1498 | case 4: | |
1499 | info->array.layout = ALGORITHM_PARITY_N; | |
1500 | case 5: | |
1501 | switch (info->new_level) { | |
1502 | case 0: | |
1503 | delta_parity = -1; | |
1504 | case 4: | |
1505 | re->level = info->array.level; | |
1506 | re->before.data_disks = info->array.raid_disks - 1; | |
1507 | re->before.layout = info->array.layout; | |
1508 | break; | |
1509 | case 5: | |
1510 | re->level = 5; | |
1511 | re->before.data_disks = info->array.raid_disks - 1; | |
1512 | re->before.layout = info->array.layout; | |
1513 | break; | |
1514 | case 6: | |
1515 | delta_parity = 1; | |
1516 | re->level = 6; | |
1517 | re->before.data_disks = info->array.raid_disks - 1; | |
1518 | switch (info->array.layout) { | |
1519 | case ALGORITHM_LEFT_ASYMMETRIC: | |
1520 | re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6; | |
1521 | break; | |
1522 | case ALGORITHM_RIGHT_ASYMMETRIC: | |
1523 | re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6; | |
1524 | break; | |
1525 | case ALGORITHM_LEFT_SYMMETRIC: | |
1526 | re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6; | |
1527 | break; | |
1528 | case ALGORITHM_RIGHT_SYMMETRIC: | |
1529 | re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6; | |
1530 | break; | |
1531 | case ALGORITHM_PARITY_0: | |
1532 | re->before.layout = ALGORITHM_PARITY_0_6; | |
1533 | break; | |
1534 | case ALGORITHM_PARITY_N: | |
1535 | re->before.layout = ALGORITHM_PARITY_N_6; | |
1536 | break; | |
1537 | default: | |
1538 | return "Cannot convert an array with this layout"; | |
1539 | } | |
1540 | break; | |
1541 | case 1: | |
1542 | if (info->array.raid_disks != 2) | |
1543 | return "Can only convert a 2-device array to RAID1"; | |
1544 | if (info->delta_disks != UnSet && | |
1545 | info->delta_disks != 0) | |
1546 | return "Cannot set raid_disk when converting RAID5->RAID1"; | |
1547 | re->level = 1; | |
1548 | info->new_chunk = 0; | |
1549 | return NULL; | |
1550 | default: | |
1551 | return "Impossible level change requested"; | |
1552 | } | |
1553 | break; | |
1554 | case 6: | |
1555 | switch (info->new_level) { | |
1556 | case 4: | |
1557 | case 5: | |
1558 | delta_parity = -1; | |
1559 | case 6: | |
1560 | re->level = 6; | |
1561 | re->before.data_disks = info->array.raid_disks - 2; | |
1562 | re->before.layout = info->array.layout; | |
1563 | break; | |
1564 | default: | |
1565 | return "Impossible level change requested"; | |
1566 | } | |
1567 | break; | |
1568 | } | |
1569 | ||
1570 | /* If we reached here then it looks like a re-stripe is | |
1571 | * happening. We have determined the intermediate level | |
1572 | * and initial raid_disks/layout and stored these in 're'. | |
1573 | * | |
1574 | * We need to deduce the final layout that can be atomically | |
1575 | * converted to the end state. | |
1576 | */ | |
1577 | switch (info->new_level) { | |
1578 | case 0: | |
1579 | /* We can only get to RAID0 from RAID4 or RAID5 | |
1580 | * with appropriate layout and one extra device | |
1581 | */ | |
1582 | if (re->level != 4 && re->level != 5) | |
1583 | return "Cannot covert to RAID0 from this level"; | |
1584 | ||
1585 | switch (re->level) { | |
1586 | case 4: | |
1587 | re->before.layout = 0; | |
1588 | re->after.layout = 0; | |
1589 | break; | |
1590 | case 5: | |
1591 | re->after.layout = ALGORITHM_PARITY_N; | |
1592 | break; | |
1593 | } | |
1594 | break; | |
1595 | ||
1596 | case 4: | |
1597 | /* We can only get to RAID4 from RAID5 */ | |
1598 | if (re->level != 4 && re->level != 5) | |
1599 | return "Cannot convert to RAID4 from this level"; | |
1600 | ||
1601 | switch (re->level) { | |
1602 | case 4: | |
1603 | re->after.layout = 0; | |
1604 | break; | |
1605 | case 5: | |
1606 | re->after.layout = ALGORITHM_PARITY_N; | |
1607 | break; | |
1608 | } | |
1609 | break; | |
1610 | ||
1611 | case 5: | |
1612 | /* We get to RAID5 from RAID5 or RAID6 */ | |
1613 | if (re->level != 5 && re->level != 6) | |
1614 | return "Cannot convert to RAID5 from this level"; | |
1615 | ||
1616 | switch (re->level) { | |
1617 | case 5: | |
1618 | if (info->new_layout == UnSet) | |
1619 | re->after.layout = re->before.layout; | |
1620 | else | |
1621 | re->after.layout = info->new_layout; | |
1622 | break; | |
1623 | case 6: | |
1624 | if (info->new_layout == UnSet) | |
1625 | info->new_layout = re->before.layout; | |
1626 | ||
1627 | /* after.layout needs to be raid6 version of new_layout */ | |
1628 | if (info->new_layout == ALGORITHM_PARITY_N) | |
1629 | re->after.layout = ALGORITHM_PARITY_N; | |
1630 | else { | |
1631 | char layout[40]; | |
1632 | char *ls = map_num(r5layout, info->new_layout); | |
1633 | int l; | |
1634 | if (ls) { | |
1635 | /* Current RAID6 layout has a RAID5 | |
1636 | * equivalent - good | |
1637 | */ | |
1638 | strcat(strcpy(layout, ls), "-6"); | |
1639 | l = map_name(r6layout, layout); | |
1640 | if (l == UnSet) | |
1641 | return "Cannot find RAID6 layout to convert to"; | |
1642 | } else { | |
1643 | /* Current RAID6 has no equivalent. | |
1644 | * If it is already a '-6' layout we | |
1645 | * can leave it unchanged, else we must | |
1646 | * fail | |
1647 | */ | |
1648 | ls = map_num(r6layout, | |
1649 | info->new_layout); | |
1650 | if (!ls || | |
1651 | strcmp(ls+strlen(ls)-2, "-6") != 0) | |
1652 | return "Please specify new layout"; | |
1653 | l = info->new_layout; | |
1654 | } | |
1655 | re->after.layout = l; | |
1656 | } | |
1657 | } | |
1658 | break; | |
1659 | ||
1660 | case 6: | |
1661 | /* We must already be at level 6 */ | |
1662 | if (re->level != 6) | |
1663 | return "Impossible level change"; | |
1664 | if (info->new_layout == UnSet) | |
1665 | re->after.layout = info->array.layout; | |
1666 | else | |
1667 | re->after.layout = info->new_layout; | |
1668 | break; | |
1669 | default: | |
1670 | return "Impossible level change requested"; | |
1671 | } | |
1672 | if (info->delta_disks == UnSet) | |
1673 | info->delta_disks = delta_parity; | |
1674 | ||
1675 | re->after.data_disks = | |
1676 | (re->before.data_disks + info->delta_disks - delta_parity); | |
1677 | ||
1678 | switch (re->level) { | |
1679 | case 6: | |
1680 | re->parity = 2; | |
1681 | break; | |
1682 | case 4: | |
1683 | case 5: | |
1684 | re->parity = 1; | |
1685 | break; | |
1686 | default: | |
1687 | re->parity = 0; | |
1688 | break; | |
1689 | } | |
1690 | /* So we have a restripe operation, we need to calculate the number | |
1691 | * of blocks per reshape operation. | |
1692 | */ | |
1693 | re->new_size = info->component_size * re->before.data_disks; | |
1694 | if (info->new_chunk == 0) | |
1695 | info->new_chunk = info->array.chunk_size; | |
1696 | if (re->after.data_disks == re->before.data_disks && | |
1697 | re->after.layout == re->before.layout && | |
1698 | info->new_chunk == info->array.chunk_size) { | |
1699 | /* Nothing to change, can change level immediately. */ | |
1700 | re->level = info->new_level; | |
1701 | re->backup_blocks = 0; | |
1702 | return NULL; | |
1703 | } | |
1704 | if (re->after.data_disks == 1 && re->before.data_disks == 1) { | |
1705 | /* chunk and layout changes make no difference */ | |
1706 | re->level = info->new_level; | |
1707 | re->backup_blocks = 0; | |
1708 | return NULL; | |
1709 | } | |
1710 | ||
1711 | re->backup_blocks = compute_backup_blocks( | |
1712 | info->new_chunk, info->array.chunk_size, | |
1713 | re->after.data_disks, re->before.data_disks); | |
1714 | re->min_offset_change = re->backup_blocks / re->before.data_disks; | |
1715 | ||
1716 | re->new_size = info->component_size * re->after.data_disks; | |
1717 | return NULL; | |
1718 | } | |
1719 | ||
1720 | static int set_array_size(struct supertype *st, struct mdinfo *sra, | |
1721 | char *text_version) | |
1722 | { | |
1723 | struct mdinfo *info; | |
1724 | char *subarray; | |
1725 | int ret_val = -1; | |
1726 | ||
1727 | if ((st == NULL) || (sra == NULL)) | |
1728 | return ret_val; | |
1729 | ||
1730 | if (text_version == NULL) | |
1731 | text_version = sra->text_version; | |
1732 | subarray = strchr(text_version + 1, '/')+1; | |
1733 | info = st->ss->container_content(st, subarray); | |
1734 | if (info) { | |
1735 | unsigned long long current_size = 0; | |
1736 | unsigned long long new_size = info->custom_array_size/2; | |
1737 | ||
1738 | if (sysfs_get_ll(sra, NULL, "array_size", ¤t_size) == 0 && | |
1739 | new_size > current_size) { | |
1740 | if (sysfs_set_num(sra, NULL, "array_size", new_size) | |
1741 | < 0) | |
1742 | dprintf("Error: Cannot set array size"); | |
1743 | else { | |
1744 | ret_val = 0; | |
1745 | dprintf("Array size changed"); | |
1746 | } | |
1747 | dprintf_cont(" from %llu to %llu.\n", | |
1748 | current_size, new_size); | |
1749 | } | |
1750 | sysfs_free(info); | |
1751 | } else | |
1752 | dprintf("Error: set_array_size(): info pointer in NULL\n"); | |
1753 | ||
1754 | return ret_val; | |
1755 | } | |
1756 | ||
1757 | static int reshape_array(char *container, int fd, char *devname, | |
1758 | struct supertype *st, struct mdinfo *info, | |
1759 | int force, struct mddev_dev *devlist, | |
1760 | unsigned long long data_offset, | |
1761 | char *backup_file, int verbose, int forked, | |
1762 | int restart, int freeze_reshape); | |
1763 | static int reshape_container(char *container, char *devname, | |
1764 | int mdfd, | |
1765 | struct supertype *st, | |
1766 | struct mdinfo *info, | |
1767 | int force, | |
1768 | char *backup_file, int verbose, | |
1769 | int forked, int restart, int freeze_reshape); | |
1770 | ||
1771 | /** | |
1772 | * prepare_external_reshape() - prepares update on external metadata if supported. | |
1773 | * @devname: Device name. | |
1774 | * @subarray: Subarray. | |
1775 | * @st: Supertype. | |
1776 | * @container: Container. | |
1777 | * @cfd: Container file descriptor. | |
1778 | * | |
1779 | * Function checks that the requested reshape is supported on external metadata, | |
1780 | * and performs an initial check that the container holds the pre-requisite | |
1781 | * spare devices (mdmon owns final validation). | |
1782 | * | |
1783 | * Return: 0 on success, else 1 | |
1784 | */ | |
1785 | static int prepare_external_reshape(char *devname, char *subarray, | |
1786 | struct supertype *st, char *container, | |
1787 | const int cfd) | |
1788 | { | |
1789 | struct mdinfo *cc = NULL; | |
1790 | struct mdinfo *content = NULL; | |
1791 | ||
1792 | if (st->ss->load_container(st, cfd, NULL)) { | |
1793 | pr_err("Cannot read superblock for %s\n", devname); | |
1794 | return 1; | |
1795 | } | |
1796 | ||
1797 | if (!st->ss->container_content) | |
1798 | return 1; | |
1799 | ||
1800 | cc = st->ss->container_content(st, subarray); | |
1801 | for (content = cc; content ; content = content->next) { | |
1802 | /* | |
1803 | * check if reshape is allowed based on metadata | |
1804 | * indications stored in content.array.status | |
1805 | */ | |
1806 | if (is_bit_set(&content->array.state, MD_SB_BLOCK_VOLUME) || | |
1807 | is_bit_set(&content->array.state, MD_SB_BLOCK_CONTAINER_RESHAPE)) { | |
1808 | pr_err("Cannot reshape arrays in container with unsupported metadata: %s(%s)\n", | |
1809 | devname, container); | |
1810 | goto error; | |
1811 | } | |
1812 | if (content->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
1813 | pr_err("Operation not supported when ppl consistency policy is enabled\n"); | |
1814 | goto error; | |
1815 | } | |
1816 | if (content->consistency_policy == CONSISTENCY_POLICY_BITMAP) { | |
1817 | pr_err("Operation not supported when write-intent bitmap consistency policy is enabled\n"); | |
1818 | goto error; | |
1819 | } | |
1820 | } | |
1821 | sysfs_free(cc); | |
1822 | if (mdmon_running(container)) | |
1823 | st->update_tail = &st->updates; | |
1824 | return 0; | |
1825 | error: | |
1826 | sysfs_free(cc); | |
1827 | return 1; | |
1828 | } | |
1829 | ||
1830 | int Grow_reshape(char *devname, int fd, | |
1831 | struct mddev_dev *devlist, | |
1832 | struct context *c, struct shape *s) | |
1833 | { | |
1834 | /* Make some changes in the shape of an array. | |
1835 | * The kernel must support the change. | |
1836 | * | |
1837 | * There are three different changes. Each can trigger | |
1838 | * a resync or recovery so we freeze that until we have | |
1839 | * requested everything (if kernel supports freezing - 2.6.30). | |
1840 | * The steps are: | |
1841 | * - change size (i.e. component_size) | |
1842 | * - change level | |
1843 | * - change layout/chunksize/ndisks | |
1844 | * | |
1845 | * The last can require a reshape. It is different on different | |
1846 | * levels so we need to check the level before actioning it. | |
1847 | * Some times the level change needs to be requested after the | |
1848 | * reshape (e.g. raid6->raid5, raid5->raid0) | |
1849 | * | |
1850 | */ | |
1851 | struct mdu_array_info_s array; | |
1852 | int rv = 0; | |
1853 | struct supertype *st; | |
1854 | char *subarray = NULL; | |
1855 | ||
1856 | int frozen = 0; | |
1857 | int changed = 0; | |
1858 | char *container = NULL; | |
1859 | int cfd = -1; | |
1860 | ||
1861 | struct mddev_dev *dv; | |
1862 | int added_disks; | |
1863 | ||
1864 | struct mdinfo info; | |
1865 | struct mdinfo *sra = NULL; | |
1866 | ||
1867 | if (md_get_array_info(fd, &array) < 0) { | |
1868 | pr_err("%s is not an active md array - aborting\n", | |
1869 | devname); | |
1870 | return 1; | |
1871 | } | |
1872 | if (s->level != UnSet && s->chunk) { | |
1873 | pr_err("Cannot change array level in the same operation as changing chunk size.\n"); | |
1874 | return 1; | |
1875 | } | |
1876 | ||
1877 | if (s->data_offset != INVALID_SECTORS && array.level != 10 && | |
1878 | (array.level < 4 || array.level > 6)) { | |
1879 | pr_err("--grow --data-offset not yet supported\n"); | |
1880 | return 1; | |
1881 | } | |
1882 | ||
1883 | if (s->size > 0 && | |
1884 | (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) { | |
1885 | pr_err("cannot change component size at the same time as other changes.\n" | |
1886 | " Change size first, then check data is intact before making other changes.\n"); | |
1887 | return 1; | |
1888 | } | |
1889 | ||
1890 | if (array.level > 1 && s->size > 1 && | |
1891 | (unsigned long long) (array.chunk_size / 1024) > s->size) { | |
1892 | pr_err("component size must be larger than chunk size.\n"); | |
1893 | return 1; | |
1894 | } | |
1895 | ||
1896 | st = super_by_fd(fd, &subarray); | |
1897 | if (!st) { | |
1898 | pr_err("Unable to determine metadata format for %s\n", devname); | |
1899 | return 1; | |
1900 | } | |
1901 | if (s->raiddisks > st->max_devs) { | |
1902 | pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs); | |
1903 | return 1; | |
1904 | } | |
1905 | if (s->level == 0 && (array.state & (1 << MD_SB_BITMAP_PRESENT)) && | |
1906 | !(array.state & (1 << MD_SB_CLUSTERED)) && !st->ss->external) { | |
1907 | array.state &= ~(1 << MD_SB_BITMAP_PRESENT); | |
1908 | if (md_set_array_info(fd, &array) != 0) { | |
1909 | pr_err("failed to remove internal bitmap.\n"); | |
1910 | return 1; | |
1911 | } | |
1912 | } | |
1913 | ||
1914 | if (st->ss->external) { | |
1915 | if (subarray) { | |
1916 | container = st->container_devnm; | |
1917 | cfd = open_dev_excl(st->container_devnm); | |
1918 | } else { | |
1919 | container = st->devnm; | |
1920 | close(fd); | |
1921 | cfd = open_dev_excl(st->devnm); | |
1922 | fd = cfd; | |
1923 | } | |
1924 | if (cfd < 0) { | |
1925 | pr_err("Unable to open container for %s\n", devname); | |
1926 | free(subarray); | |
1927 | return 1; | |
1928 | } | |
1929 | ||
1930 | rv = prepare_external_reshape(devname, subarray, st, | |
1931 | container, cfd); | |
1932 | if (rv > 0) { | |
1933 | free(subarray); | |
1934 | close(cfd); | |
1935 | goto release; | |
1936 | } | |
1937 | ||
1938 | if (s->raiddisks && subarray) { | |
1939 | pr_err("--raid-devices operation can be performed on a container only\n"); | |
1940 | close(cfd); | |
1941 | free(subarray); | |
1942 | return 1; | |
1943 | } | |
1944 | } | |
1945 | ||
1946 | added_disks = 0; | |
1947 | for (dv = devlist; dv; dv = dv->next) | |
1948 | added_disks++; | |
1949 | if (s->raiddisks > array.raid_disks && | |
1950 | array.spare_disks + added_disks < | |
1951 | (s->raiddisks - array.raid_disks) && | |
1952 | !c->force) { | |
1953 | pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n" | |
1954 | " Use --force to over-ride this check.\n", | |
1955 | s->raiddisks - array.raid_disks, | |
1956 | s->raiddisks - array.raid_disks == 1 ? "" : "s", | |
1957 | array.spare_disks + added_disks); | |
1958 | return 1; | |
1959 | } | |
1960 | ||
1961 | sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS | | |
1962 | GET_STATE | GET_VERSION); | |
1963 | if (sra) { | |
1964 | if (st->ss->external && subarray == NULL) { | |
1965 | array.level = LEVEL_CONTAINER; | |
1966 | sra->array.level = LEVEL_CONTAINER; | |
1967 | } | |
1968 | } else { | |
1969 | pr_err("failed to read sysfs parameters for %s\n", | |
1970 | devname); | |
1971 | return 1; | |
1972 | } | |
1973 | frozen = freeze(st); | |
1974 | if (frozen < -1) { | |
1975 | /* freeze() already spewed the reason */ | |
1976 | sysfs_free(sra); | |
1977 | return 1; | |
1978 | } else if (frozen < 0) { | |
1979 | pr_err("%s is performing resync/recovery and cannot be reshaped\n", devname); | |
1980 | sysfs_free(sra); | |
1981 | return 1; | |
1982 | } | |
1983 | ||
1984 | /* ========= set size =============== */ | |
1985 | if (s->size > 0 && | |
1986 | (s->size == MAX_SIZE || s->size != (unsigned)array.size)) { | |
1987 | unsigned long long orig_size = get_component_size(fd)/2; | |
1988 | unsigned long long min_csize; | |
1989 | struct mdinfo *mdi; | |
1990 | int raid0_takeover = 0; | |
1991 | ||
1992 | if (orig_size == 0) | |
1993 | orig_size = (unsigned) array.size; | |
1994 | ||
1995 | if (orig_size == 0) { | |
1996 | pr_err("Cannot set device size in this type of array.\n"); | |
1997 | rv = 1; | |
1998 | goto release; | |
1999 | } | |
2000 | ||
2001 | if (array.level == 0) { | |
2002 | pr_err("Component size change is not supported for RAID0\n"); | |
2003 | rv = 1; | |
2004 | goto release; | |
2005 | } | |
2006 | ||
2007 | if (reshape_super(st, s->size, UnSet, UnSet, 0, 0, UnSet, NULL, | |
2008 | devname, APPLY_METADATA_CHANGES, | |
2009 | c->verbose > 0)) { | |
2010 | rv = 1; | |
2011 | goto release; | |
2012 | } | |
2013 | sync_metadata(st); | |
2014 | if (st->ss->external) { | |
2015 | /* metadata can have size limitation | |
2016 | * update size value according to metadata information | |
2017 | */ | |
2018 | struct mdinfo *sizeinfo = | |
2019 | st->ss->container_content(st, subarray); | |
2020 | if (sizeinfo) { | |
2021 | unsigned long long new_size = | |
2022 | sizeinfo->custom_array_size/2; | |
2023 | int data_disks = get_data_disks( | |
2024 | sizeinfo->array.level, | |
2025 | sizeinfo->array.layout, | |
2026 | sizeinfo->array.raid_disks); | |
2027 | new_size /= data_disks; | |
2028 | dprintf("Metadata size correction from %llu to %llu (%llu)\n", | |
2029 | orig_size, new_size, | |
2030 | new_size * data_disks); | |
2031 | s->size = new_size; | |
2032 | sysfs_free(sizeinfo); | |
2033 | } | |
2034 | } | |
2035 | ||
2036 | /* Update the size of each member device in case | |
2037 | * they have been resized. This will never reduce | |
2038 | * below the current used-size. The "size" attribute | |
2039 | * understands '0' to mean 'max'. | |
2040 | */ | |
2041 | min_csize = 0; | |
2042 | for (mdi = sra->devs; mdi; mdi = mdi->next) { | |
2043 | sysfs_set_num(sra, mdi, "size", | |
2044 | s->size == MAX_SIZE ? 0 : s->size); | |
2045 | if (array.not_persistent == 0 && | |
2046 | array.major_version == 0 && | |
2047 | get_linux_version() < 3001000) { | |
2048 | /* Dangerous to allow size to exceed 2TB */ | |
2049 | unsigned long long csize; | |
2050 | if (sysfs_get_ll(sra, mdi, "size", | |
2051 | &csize) == 0) { | |
2052 | if (csize >= 2ULL*1024*1024*1024) | |
2053 | csize = 2ULL*1024*1024*1024; | |
2054 | if ((min_csize == 0 || | |
2055 | (min_csize > csize))) | |
2056 | min_csize = csize; | |
2057 | } | |
2058 | } | |
2059 | } | |
2060 | if (min_csize && s->size > min_csize) { | |
2061 | pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n"); | |
2062 | rv = 1; | |
2063 | goto size_change_error; | |
2064 | } | |
2065 | if (min_csize && s->size == MAX_SIZE) { | |
2066 | /* Don't let the kernel choose a size - it will get | |
2067 | * it wrong | |
2068 | */ | |
2069 | pr_err("Limited v0.90 array to 2TB per device\n"); | |
2070 | s->size = min_csize; | |
2071 | } | |
2072 | if (st->ss->external) { | |
2073 | if (sra->array.level == 0) { | |
2074 | rv = sysfs_set_str(sra, NULL, "level", "raid5"); | |
2075 | if (!rv) { | |
2076 | raid0_takeover = 1; | |
2077 | /* get array parameters after takeover | |
2078 | * to change one parameter at time only | |
2079 | */ | |
2080 | rv = md_get_array_info(fd, &array); | |
2081 | } | |
2082 | } | |
2083 | /* make sure mdmon is | |
2084 | * aware of the new level */ | |
2085 | if (!mdmon_running(st->container_devnm)) | |
2086 | start_mdmon(st->container_devnm); | |
2087 | ping_monitor(container); | |
2088 | if (mdmon_running(st->container_devnm) == false) { | |
2089 | pr_err("No mdmon found. Grow cannot continue.\n"); | |
2090 | goto release; | |
2091 | } | |
2092 | } | |
2093 | ||
2094 | if (s->size == MAX_SIZE) | |
2095 | s->size = 0; | |
2096 | array.size = s->size; | |
2097 | if (s->size & ~INT32_MAX) { | |
2098 | /* got truncated to 32bit, write to | |
2099 | * component_size instead | |
2100 | */ | |
2101 | rv = sysfs_set_num(sra, NULL, "component_size", s->size); | |
2102 | } else { | |
2103 | rv = md_set_array_info(fd, &array); | |
2104 | ||
2105 | /* manage array size when it is managed externally | |
2106 | */ | |
2107 | if ((rv == 0) && st->ss->external) | |
2108 | rv = set_array_size(st, sra, sra->text_version); | |
2109 | } | |
2110 | ||
2111 | if (raid0_takeover) { | |
2112 | /* do not recync non-existing parity, | |
2113 | * we will drop it anyway | |
2114 | */ | |
2115 | sysfs_set_str(sra, NULL, "sync_action", "frozen"); | |
2116 | /* go back to raid0, drop parity disk | |
2117 | */ | |
2118 | sysfs_set_str(sra, NULL, "level", "raid0"); | |
2119 | md_get_array_info(fd, &array); | |
2120 | } | |
2121 | ||
2122 | size_change_error: | |
2123 | if (rv != 0) { | |
2124 | int err = errno; | |
2125 | ||
2126 | /* restore metadata */ | |
2127 | if (reshape_super(st, orig_size, UnSet, UnSet, 0, 0, | |
2128 | UnSet, NULL, devname, | |
2129 | ROLLBACK_METADATA_CHANGES, | |
2130 | c->verbose) == 0) | |
2131 | sync_metadata(st); | |
2132 | pr_err("Cannot set device size for %s: %s\n", | |
2133 | devname, strerror(err)); | |
2134 | if (err == EBUSY && | |
2135 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2136 | cont_err("Bitmap must be removed before size can be changed\n"); | |
2137 | rv = 1; | |
2138 | goto release; | |
2139 | } | |
2140 | if (s->assume_clean) { | |
2141 | /* This will fail on kernels older than 3.0 unless | |
2142 | * a backport has been arranged. | |
2143 | */ | |
2144 | if (sra == NULL || | |
2145 | sysfs_set_str(sra, NULL, "resync_start", STR_COMMON_NONE) < 0) | |
2146 | pr_err("--assume-clean not supported with --grow on this kernel\n"); | |
2147 | } | |
2148 | md_get_array_info(fd, &array); | |
2149 | s->size = get_component_size(fd)/2; | |
2150 | if (s->size == 0) | |
2151 | s->size = array.size; | |
2152 | if (c->verbose >= 0) { | |
2153 | if (s->size == orig_size) | |
2154 | pr_err("component size of %s unchanged at %lluK\n", | |
2155 | devname, s->size); | |
2156 | else | |
2157 | pr_err("component size of %s has been set to %lluK\n", | |
2158 | devname, s->size); | |
2159 | } | |
2160 | changed = 1; | |
2161 | } else if (!is_container(array.level)) { | |
2162 | s->size = get_component_size(fd)/2; | |
2163 | if (s->size == 0) | |
2164 | s->size = array.size; | |
2165 | } | |
2166 | ||
2167 | /* See if there is anything else to do */ | |
2168 | if ((s->level == UnSet || s->level == array.level) && | |
2169 | (s->layout_str == NULL) && | |
2170 | (s->chunk == 0 || s->chunk == array.chunk_size) && | |
2171 | s->data_offset == INVALID_SECTORS && | |
2172 | (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) { | |
2173 | /* Nothing more to do */ | |
2174 | if (!changed && c->verbose >= 0) | |
2175 | pr_err("%s: no change requested\n", devname); | |
2176 | goto release; | |
2177 | } | |
2178 | ||
2179 | /* ========= check for Raid10/Raid1 -> Raid0 conversion =============== | |
2180 | * current implementation assumes that following conditions must be met: | |
2181 | * - RAID10: | |
2182 | * - far_copies == 1 | |
2183 | * - near_copies == 2 | |
2184 | */ | |
2185 | if ((s->level == 0 && array.level == 10 && sra && | |
2186 | array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) || | |
2187 | (s->level == 0 && array.level == 1 && sra)) { | |
2188 | int err; | |
2189 | ||
2190 | err = remove_disks_for_takeover(st, sra, array.layout); | |
2191 | if (err) { | |
2192 | dprintf("Array cannot be reshaped\n"); | |
2193 | if (cfd > -1) | |
2194 | close(cfd); | |
2195 | rv = 1; | |
2196 | goto release; | |
2197 | } | |
2198 | /* Make sure mdmon has seen the device removal | |
2199 | * and updated metadata before we continue with | |
2200 | * level change | |
2201 | */ | |
2202 | if (container) | |
2203 | ping_monitor(container); | |
2204 | } | |
2205 | ||
2206 | memset(&info, 0, sizeof(info)); | |
2207 | info.array = array; | |
2208 | if (sysfs_init(&info, fd, NULL)) { | |
2209 | pr_err("failed to initialize sysfs.\n"); | |
2210 | rv = 1; | |
2211 | goto release; | |
2212 | } | |
2213 | strcpy(info.text_version, sra->text_version); | |
2214 | info.component_size = s->size*2; | |
2215 | info.new_level = s->level; | |
2216 | info.new_chunk = s->chunk * 1024; | |
2217 | if (is_container(info.array.level)) { | |
2218 | info.delta_disks = UnSet; | |
2219 | info.array.raid_disks = s->raiddisks; | |
2220 | } else if (s->raiddisks) | |
2221 | info.delta_disks = s->raiddisks - info.array.raid_disks; | |
2222 | else | |
2223 | info.delta_disks = UnSet; | |
2224 | if (s->layout_str == NULL) { | |
2225 | info.new_layout = UnSet; | |
2226 | if (info.array.level == 6 && | |
2227 | (info.new_level == 6 || info.new_level == UnSet) && | |
2228 | info.array.layout >= 16) { | |
2229 | pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname); | |
2230 | cont_err("during the reshape, please specify --layout=preserve\n"); | |
2231 | cont_err("If you want to change it, specify a layout or use --layout=normalise\n"); | |
2232 | rv = 1; | |
2233 | goto release; | |
2234 | } | |
2235 | } else if (strcmp(s->layout_str, "normalise") == 0 || | |
2236 | strcmp(s->layout_str, "normalize") == 0) { | |
2237 | /* If we have a -6 RAID6 layout, remove the '-6'. */ | |
2238 | info.new_layout = UnSet; | |
2239 | if (info.array.level == 6 && info.new_level == UnSet) { | |
2240 | char l[40], *h; | |
2241 | strcpy(l, map_num_s(r6layout, info.array.layout)); | |
2242 | h = strrchr(l, '-'); | |
2243 | if (h && strcmp(h, "-6") == 0) { | |
2244 | *h = 0; | |
2245 | info.new_layout = map_name(r6layout, l); | |
2246 | } | |
2247 | } else { | |
2248 | pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str); | |
2249 | rv = 1; | |
2250 | goto release; | |
2251 | } | |
2252 | } else if (strcmp(s->layout_str, "preserve") == 0) { | |
2253 | /* This means that a non-standard RAID6 layout | |
2254 | * is OK. | |
2255 | * In particular: | |
2256 | * - When reshape a RAID6 (e.g. adding a device) | |
2257 | * which is in a non-standard layout, it is OK | |
2258 | * to preserve that layout. | |
2259 | * - When converting a RAID5 to RAID6, leave it in | |
2260 | * the XXX-6 layout, don't re-layout. | |
2261 | */ | |
2262 | if (info.array.level == 6 && info.new_level == UnSet) | |
2263 | info.new_layout = info.array.layout; | |
2264 | else if (info.array.level == 5 && info.new_level == 6) { | |
2265 | char l[40]; | |
2266 | strcpy(l, map_num_s(r5layout, info.array.layout)); | |
2267 | strcat(l, "-6"); | |
2268 | info.new_layout = map_name(r6layout, l); | |
2269 | } else { | |
2270 | pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str); | |
2271 | rv = 1; | |
2272 | goto release; | |
2273 | } | |
2274 | } else { | |
2275 | int l = info.new_level; | |
2276 | if (l == UnSet) | |
2277 | l = info.array.level; | |
2278 | switch (l) { | |
2279 | case 5: | |
2280 | info.new_layout = map_name(r5layout, s->layout_str); | |
2281 | break; | |
2282 | case 6: | |
2283 | info.new_layout = map_name(r6layout, s->layout_str); | |
2284 | break; | |
2285 | case 10: | |
2286 | info.new_layout = parse_layout_10(s->layout_str); | |
2287 | break; | |
2288 | case LEVEL_FAULTY: | |
2289 | info.new_layout = parse_layout_faulty(s->layout_str); | |
2290 | break; | |
2291 | default: | |
2292 | pr_err("layout not meaningful with this level\n"); | |
2293 | rv = 1; | |
2294 | goto release; | |
2295 | } | |
2296 | if (info.new_layout == UnSet) { | |
2297 | pr_err("layout %s not understood for this level\n", | |
2298 | s->layout_str); | |
2299 | rv = 1; | |
2300 | goto release; | |
2301 | } | |
2302 | } | |
2303 | ||
2304 | if (array.level == LEVEL_FAULTY) { | |
2305 | if (s->level != UnSet && s->level != array.level) { | |
2306 | pr_err("cannot change level of Faulty device\n"); | |
2307 | rv =1 ; | |
2308 | } | |
2309 | if (s->chunk) { | |
2310 | pr_err("cannot set chunksize of Faulty device\n"); | |
2311 | rv =1 ; | |
2312 | } | |
2313 | if (s->raiddisks && s->raiddisks != 1) { | |
2314 | pr_err("cannot set raid_disks of Faulty device\n"); | |
2315 | rv =1 ; | |
2316 | } | |
2317 | if (s->layout_str) { | |
2318 | if (md_get_array_info(fd, &array) != 0) { | |
2319 | dprintf("Cannot get array information.\n"); | |
2320 | goto release; | |
2321 | } | |
2322 | array.layout = info.new_layout; | |
2323 | if (md_set_array_info(fd, &array) != 0) { | |
2324 | pr_err("failed to set new layout\n"); | |
2325 | rv = 1; | |
2326 | } else if (c->verbose >= 0) | |
2327 | printf("layout for %s set to %d\n", | |
2328 | devname, array.layout); | |
2329 | } | |
2330 | } else if (is_container(array.level)) { | |
2331 | /* This change is to be applied to every array in the | |
2332 | * container. This is only needed when the metadata imposes | |
2333 | * restraints of the various arrays in the container. | |
2334 | * Currently we only know that IMSM requires all arrays | |
2335 | * to have the same number of devices so changing the | |
2336 | * number of devices (On-Line Capacity Expansion) must be | |
2337 | * performed at the level of the container | |
2338 | */ | |
2339 | close_fd(&fd); | |
2340 | rv = reshape_container(container, devname, -1, st, &info, | |
2341 | c->force, c->backup_file, c->verbose, | |
2342 | 0, 0, 0); | |
2343 | frozen = 0; | |
2344 | } else { | |
2345 | /* get spare devices from external metadata | |
2346 | */ | |
2347 | if (st->ss->external) { | |
2348 | struct mdinfo *info2; | |
2349 | ||
2350 | info2 = st->ss->container_content(st, subarray); | |
2351 | if (info2) { | |
2352 | info.array.spare_disks = | |
2353 | info2->array.spare_disks; | |
2354 | sysfs_free(info2); | |
2355 | } | |
2356 | } | |
2357 | ||
2358 | /* Impose these changes on a single array. First | |
2359 | * check that the metadata is OK with the change. */ | |
2360 | ||
2361 | if (reshape_super(st, 0, info.new_level, | |
2362 | info.new_layout, info.new_chunk, | |
2363 | info.array.raid_disks, info.delta_disks, | |
2364 | c->backup_file, devname, | |
2365 | APPLY_METADATA_CHANGES, c->verbose)) { | |
2366 | rv = 1; | |
2367 | goto release; | |
2368 | } | |
2369 | sync_metadata(st); | |
2370 | rv = reshape_array(container, fd, devname, st, &info, c->force, | |
2371 | devlist, s->data_offset, c->backup_file, | |
2372 | c->verbose, 0, 0, 0); | |
2373 | frozen = 0; | |
2374 | } | |
2375 | release: | |
2376 | sysfs_free(sra); | |
2377 | if (frozen > 0) | |
2378 | unfreeze(st); | |
2379 | return rv; | |
2380 | } | |
2381 | ||
2382 | /* verify_reshape_position() | |
2383 | * Function checks if reshape position in metadata is not farther | |
2384 | * than position in md. | |
2385 | * Return value: | |
2386 | * 0 : not valid sysfs entry | |
2387 | * it can be caused by not started reshape, it should be started | |
2388 | * by reshape array or raid0 array is before takeover | |
2389 | * -1 : error, reshape position is obviously wrong | |
2390 | * 1 : success, reshape progress correct or updated | |
2391 | */ | |
2392 | static int verify_reshape_position(struct mdinfo *info, int level) | |
2393 | { | |
2394 | int ret_val = 0; | |
2395 | char buf[SYSFS_MAX_BUF_SIZE]; | |
2396 | int rv; | |
2397 | ||
2398 | /* read sync_max, failure can mean raid0 array */ | |
2399 | rv = sysfs_get_str(info, NULL, "sync_max", buf, sizeof(buf)); | |
2400 | ||
2401 | if (rv > 0) { | |
2402 | char *ep; | |
2403 | unsigned long long position = strtoull(buf, &ep, 0); | |
2404 | ||
2405 | dprintf("Read sync_max sysfs entry is: %s\n", buf); | |
2406 | if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) { | |
2407 | position *= get_data_disks(level, | |
2408 | info->new_layout, | |
2409 | info->array.raid_disks); | |
2410 | if (info->reshape_progress < position) { | |
2411 | dprintf("Corrected reshape progress (%llu) to md position (%llu)\n", | |
2412 | info->reshape_progress, position); | |
2413 | info->reshape_progress = position; | |
2414 | ret_val = 1; | |
2415 | } else if (info->reshape_progress > position) { | |
2416 | pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n", | |
2417 | position, info->reshape_progress); | |
2418 | ret_val = -1; | |
2419 | } else { | |
2420 | dprintf("Reshape position in md and metadata are the same;"); | |
2421 | ret_val = 1; | |
2422 | } | |
2423 | } | |
2424 | } else if (rv == 0) { | |
2425 | /* for valid sysfs entry, 0-length content | |
2426 | * should be indicated as error | |
2427 | */ | |
2428 | ret_val = -1; | |
2429 | } | |
2430 | ||
2431 | return ret_val; | |
2432 | } | |
2433 | ||
2434 | static unsigned long long choose_offset(unsigned long long lo, | |
2435 | unsigned long long hi, | |
2436 | unsigned long long min, | |
2437 | unsigned long long max) | |
2438 | { | |
2439 | /* Choose a new offset between hi and lo. | |
2440 | * It must be between min and max, but | |
2441 | * we would prefer something near the middle of hi/lo, and also | |
2442 | * prefer to be aligned to a big power of 2. | |
2443 | * | |
2444 | * So we start with the middle, then for each bit, | |
2445 | * starting at '1' and increasing, if it is set, we either | |
2446 | * add it or subtract it if possible, preferring the option | |
2447 | * which is furthest from the boundary. | |
2448 | * | |
2449 | * We stop once we get a 1MB alignment. As units are in sectors, | |
2450 | * 1MB = 2*1024 sectors. | |
2451 | */ | |
2452 | unsigned long long choice = (lo + hi) / 2; | |
2453 | unsigned long long bit = 1; | |
2454 | ||
2455 | for (bit = 1; bit < 2*1024; bit = bit << 1) { | |
2456 | unsigned long long bigger, smaller; | |
2457 | if (! (bit & choice)) | |
2458 | continue; | |
2459 | bigger = choice + bit; | |
2460 | smaller = choice - bit; | |
2461 | if (bigger > max && smaller < min) | |
2462 | break; | |
2463 | if (bigger > max) | |
2464 | choice = smaller; | |
2465 | else if (smaller < min) | |
2466 | choice = bigger; | |
2467 | else if (hi - bigger > smaller - lo) | |
2468 | choice = bigger; | |
2469 | else | |
2470 | choice = smaller; | |
2471 | } | |
2472 | return choice; | |
2473 | } | |
2474 | ||
2475 | static int set_new_data_offset(struct mdinfo *sra, struct supertype *st, | |
2476 | char *devname, int delta_disks, | |
2477 | unsigned long long data_offset, | |
2478 | unsigned long long min, | |
2479 | int can_fallback) | |
2480 | { | |
2481 | struct mdinfo *sd; | |
2482 | int dir = 0; | |
2483 | int err = 0; | |
2484 | unsigned long long before, after; | |
2485 | ||
2486 | /* Need to find min space before and after so same is used | |
2487 | * on all devices | |
2488 | */ | |
2489 | before = UINT64_MAX; | |
2490 | after = UINT64_MAX; | |
2491 | for (sd = sra->devs; sd; sd = sd->next) { | |
2492 | char *dn; | |
2493 | int dfd; | |
2494 | int rv; | |
2495 | struct supertype *st2; | |
2496 | struct mdinfo info2; | |
2497 | ||
2498 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2499 | continue; | |
2500 | dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2501 | dfd = dev_open(dn, O_RDONLY); | |
2502 | if (dfd < 0) { | |
2503 | pr_err("%s: cannot open component %s\n", | |
2504 | devname, dn ? dn : "-unknown-"); | |
2505 | goto release; | |
2506 | } | |
2507 | st2 = dup_super(st); | |
2508 | rv = st2->ss->load_super(st2,dfd, NULL); | |
2509 | close(dfd); | |
2510 | if (rv) { | |
2511 | free(st2); | |
2512 | pr_err("%s: cannot get superblock from %s\n", | |
2513 | devname, dn); | |
2514 | goto release; | |
2515 | } | |
2516 | st2->ss->getinfo_super(st2, &info2, NULL); | |
2517 | st2->ss->free_super(st2); | |
2518 | free(st2); | |
2519 | if (info2.space_before == 0 && | |
2520 | info2.space_after == 0) { | |
2521 | /* Metadata doesn't support data_offset changes */ | |
2522 | if (!can_fallback) | |
2523 | pr_err("%s: Metadata version doesn't support data_offset changes\n", | |
2524 | devname); | |
2525 | goto fallback; | |
2526 | } | |
2527 | if (before > info2.space_before) | |
2528 | before = info2.space_before; | |
2529 | if (after > info2.space_after) | |
2530 | after = info2.space_after; | |
2531 | ||
2532 | if (data_offset != INVALID_SECTORS) { | |
2533 | if (dir == 0) { | |
2534 | if (info2.data_offset == data_offset) { | |
2535 | pr_err("%s: already has that data_offset\n", | |
2536 | dn); | |
2537 | goto release; | |
2538 | } | |
2539 | if (data_offset < info2.data_offset) | |
2540 | dir = -1; | |
2541 | else | |
2542 | dir = 1; | |
2543 | } else if ((data_offset <= info2.data_offset && | |
2544 | dir == 1) || | |
2545 | (data_offset >= info2.data_offset && | |
2546 | dir == -1)) { | |
2547 | pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n", | |
2548 | dn); | |
2549 | goto release; | |
2550 | } | |
2551 | } | |
2552 | } | |
2553 | if (before == UINT64_MAX) | |
2554 | /* impossible really, there must be no devices */ | |
2555 | return 1; | |
2556 | ||
2557 | for (sd = sra->devs; sd; sd = sd->next) { | |
2558 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2559 | unsigned long long new_data_offset; | |
2560 | ||
2561 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2562 | continue; | |
2563 | if (delta_disks < 0) { | |
2564 | /* Don't need any space as array is shrinking | |
2565 | * just move data_offset up by min | |
2566 | */ | |
2567 | if (data_offset == INVALID_SECTORS) | |
2568 | new_data_offset = sd->data_offset + min; | |
2569 | else { | |
2570 | if (data_offset < sd->data_offset + min) { | |
2571 | pr_err("--data-offset too small for %s\n", | |
2572 | dn); | |
2573 | goto release; | |
2574 | } | |
2575 | new_data_offset = data_offset; | |
2576 | } | |
2577 | } else if (delta_disks > 0) { | |
2578 | /* need space before */ | |
2579 | if (before < min) { | |
2580 | if (can_fallback) | |
2581 | goto fallback; | |
2582 | pr_err("Insufficient head-space for reshape on %s\n", | |
2583 | dn); | |
2584 | goto release; | |
2585 | } | |
2586 | if (data_offset == INVALID_SECTORS) | |
2587 | new_data_offset = sd->data_offset - min; | |
2588 | else { | |
2589 | if (data_offset > sd->data_offset - min) { | |
2590 | pr_err("--data-offset too large for %s\n", | |
2591 | dn); | |
2592 | goto release; | |
2593 | } | |
2594 | new_data_offset = data_offset; | |
2595 | } | |
2596 | } else { | |
2597 | if (dir == 0) { | |
2598 | /* can move up or down. If 'data_offset' | |
2599 | * was set we would have already decided, | |
2600 | * so just choose direction with most space. | |
2601 | */ | |
2602 | if (before > after) | |
2603 | dir = -1; | |
2604 | else | |
2605 | dir = 1; | |
2606 | } | |
2607 | sysfs_set_str(sra, NULL, "reshape_direction", | |
2608 | dir == 1 ? "backwards" : "forwards"); | |
2609 | if (dir > 0) { | |
2610 | /* Increase data offset */ | |
2611 | if (after < min) { | |
2612 | if (can_fallback) | |
2613 | goto fallback; | |
2614 | pr_err("Insufficient tail-space for reshape on %s\n", | |
2615 | dn); | |
2616 | goto release; | |
2617 | } | |
2618 | if (data_offset != INVALID_SECTORS && | |
2619 | data_offset < sd->data_offset + min) { | |
2620 | pr_err("--data-offset too small on %s\n", | |
2621 | dn); | |
2622 | goto release; | |
2623 | } | |
2624 | if (data_offset != INVALID_SECTORS) | |
2625 | new_data_offset = data_offset; | |
2626 | else | |
2627 | new_data_offset = choose_offset(sd->data_offset, | |
2628 | sd->data_offset + after, | |
2629 | sd->data_offset + min, | |
2630 | sd->data_offset + after); | |
2631 | } else { | |
2632 | /* Decrease data offset */ | |
2633 | if (before < min) { | |
2634 | if (can_fallback) | |
2635 | goto fallback; | |
2636 | pr_err("insufficient head-room on %s\n", | |
2637 | dn); | |
2638 | goto release; | |
2639 | } | |
2640 | if (data_offset != INVALID_SECTORS && | |
2641 | data_offset > sd->data_offset - min) { | |
2642 | pr_err("--data-offset too large on %s\n", | |
2643 | dn); | |
2644 | goto release; | |
2645 | } | |
2646 | if (data_offset != INVALID_SECTORS) | |
2647 | new_data_offset = data_offset; | |
2648 | else | |
2649 | new_data_offset = choose_offset(sd->data_offset - before, | |
2650 | sd->data_offset, | |
2651 | sd->data_offset - before, | |
2652 | sd->data_offset - min); | |
2653 | } | |
2654 | } | |
2655 | err = sysfs_set_num(sra, sd, "new_offset", new_data_offset); | |
2656 | if (err < 0 && errno == E2BIG) { | |
2657 | /* try again after increasing data size to max */ | |
2658 | err = sysfs_set_num(sra, sd, "size", 0); | |
2659 | if (err < 0 && errno == EINVAL && | |
2660 | !(sd->disk.state & (1<<MD_DISK_SYNC))) { | |
2661 | /* some kernels have a bug where you cannot | |
2662 | * use '0' on spare devices. */ | |
2663 | sysfs_set_num(sra, sd, "size", | |
2664 | (sra->component_size + after)/2); | |
2665 | } | |
2666 | err = sysfs_set_num(sra, sd, "new_offset", | |
2667 | new_data_offset); | |
2668 | } | |
2669 | if (err < 0) { | |
2670 | if (errno == E2BIG && data_offset != INVALID_SECTORS) { | |
2671 | pr_err("data-offset is too big for %s\n", dn); | |
2672 | goto release; | |
2673 | } | |
2674 | if (sd == sra->devs && | |
2675 | (errno == ENOENT || errno == E2BIG)) | |
2676 | /* Early kernel, no 'new_offset' file, | |
2677 | * or kernel doesn't like us. | |
2678 | * For RAID5/6 this is not fatal | |
2679 | */ | |
2680 | return 1; | |
2681 | pr_err("Cannot set new_offset for %s\n", dn); | |
2682 | break; | |
2683 | } | |
2684 | } | |
2685 | return err; | |
2686 | release: | |
2687 | return -1; | |
2688 | fallback: | |
2689 | /* Just use a backup file */ | |
2690 | return 1; | |
2691 | } | |
2692 | ||
2693 | static int raid10_reshape(char *container, int fd, char *devname, | |
2694 | struct supertype *st, struct mdinfo *info, | |
2695 | struct reshape *reshape, | |
2696 | unsigned long long data_offset, | |
2697 | int force, int verbose) | |
2698 | { | |
2699 | /* Changing raid_disks, layout, chunksize or possibly | |
2700 | * just data_offset for a RAID10. | |
2701 | * We must always change data_offset. We change by at least | |
2702 | * ->min_offset_change which is the largest of the old and new | |
2703 | * chunk sizes. | |
2704 | * If raid_disks is increasing, then data_offset must decrease | |
2705 | * by at least this copy size. | |
2706 | * If raid_disks is unchanged, data_offset must increase or | |
2707 | * decrease by at least min_offset_change but preferably by much more. | |
2708 | * We choose half of the available space. | |
2709 | * If raid_disks is decreasing, data_offset must increase by | |
2710 | * at least min_offset_change. To allow of this, component_size | |
2711 | * must be decreased by the same amount. | |
2712 | * | |
2713 | * So we calculate the required minimum and direction, possibly | |
2714 | * reduce the component_size, then iterate through the devices | |
2715 | * and set the new_data_offset. | |
2716 | * If that all works, we set chunk_size, layout, raid_disks, and start | |
2717 | * 'reshape' | |
2718 | */ | |
2719 | struct mdinfo *sra; | |
2720 | unsigned long long min; | |
2721 | int err = 0; | |
2722 | ||
2723 | sra = sysfs_read(fd, NULL, | |
2724 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK | |
2725 | ); | |
2726 | if (!sra) { | |
2727 | pr_err("%s: Cannot get array details from sysfs\n", devname); | |
2728 | goto release; | |
2729 | } | |
2730 | min = reshape->min_offset_change; | |
2731 | ||
2732 | if (info->delta_disks) | |
2733 | sysfs_set_str(sra, NULL, "reshape_direction", | |
2734 | info->delta_disks < 0 ? "backwards" : "forwards"); | |
2735 | if (info->delta_disks < 0 && info->space_after < min) { | |
2736 | int rv = sysfs_set_num(sra, NULL, "component_size", | |
2737 | (sra->component_size - min)/2); | |
2738 | if (rv) { | |
2739 | pr_err("cannot reduce component size\n"); | |
2740 | goto release; | |
2741 | } | |
2742 | } | |
2743 | err = set_new_data_offset(sra, st, devname, info->delta_disks, | |
2744 | data_offset, min, 0); | |
2745 | if (err == 1) { | |
2746 | pr_err("Cannot set new_data_offset: RAID10 reshape not\n"); | |
2747 | cont_err("supported on this kernel\n"); | |
2748 | err = -1; | |
2749 | } | |
2750 | if (err < 0) | |
2751 | goto release; | |
2752 | ||
2753 | if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0) | |
2754 | err = errno; | |
2755 | if (!err && sysfs_set_num(sra, NULL, "layout", | |
2756 | reshape->after.layout) < 0) | |
2757 | err = errno; | |
2758 | if (!err && | |
2759 | sysfs_set_num(sra, NULL, "raid_disks", | |
2760 | info->array.raid_disks + info->delta_disks) < 0) | |
2761 | err = errno; | |
2762 | if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) | |
2763 | err = errno; | |
2764 | if (err) { | |
2765 | pr_err("Cannot set array shape for %s\n", | |
2766 | devname); | |
2767 | if (err == EBUSY && | |
2768 | (info->array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2769 | cont_err(" Bitmap must be removed before shape can be changed\n"); | |
2770 | goto release; | |
2771 | } | |
2772 | sysfs_free(sra); | |
2773 | return 0; | |
2774 | release: | |
2775 | sysfs_free(sra); | |
2776 | return 1; | |
2777 | } | |
2778 | ||
2779 | static void get_space_after(int fd, struct supertype *st, struct mdinfo *info) | |
2780 | { | |
2781 | struct mdinfo *sra, *sd; | |
2782 | /* Initialisation to silence compiler warning */ | |
2783 | unsigned long long min_space_before = 0, min_space_after = 0; | |
2784 | int first = 1; | |
2785 | ||
2786 | sra = sysfs_read(fd, NULL, GET_DEVS); | |
2787 | if (!sra) | |
2788 | return; | |
2789 | for (sd = sra->devs; sd; sd = sd->next) { | |
2790 | char *dn; | |
2791 | int dfd; | |
2792 | struct supertype *st2; | |
2793 | struct mdinfo info2; | |
2794 | ||
2795 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2796 | continue; | |
2797 | dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2798 | dfd = dev_open(dn, O_RDONLY); | |
2799 | if (dfd < 0) | |
2800 | break; | |
2801 | st2 = dup_super(st); | |
2802 | if (st2->ss->load_super(st2,dfd, NULL)) { | |
2803 | close(dfd); | |
2804 | free(st2); | |
2805 | break; | |
2806 | } | |
2807 | close(dfd); | |
2808 | st2->ss->getinfo_super(st2, &info2, NULL); | |
2809 | st2->ss->free_super(st2); | |
2810 | free(st2); | |
2811 | if (first || | |
2812 | min_space_before > info2.space_before) | |
2813 | min_space_before = info2.space_before; | |
2814 | if (first || | |
2815 | min_space_after > info2.space_after) | |
2816 | min_space_after = info2.space_after; | |
2817 | first = 0; | |
2818 | } | |
2819 | if (sd == NULL && !first) { | |
2820 | info->space_after = min_space_after; | |
2821 | info->space_before = min_space_before; | |
2822 | } | |
2823 | sysfs_free(sra); | |
2824 | } | |
2825 | ||
2826 | static void update_cache_size(char *container, struct mdinfo *sra, | |
2827 | struct mdinfo *info, | |
2828 | int disks, unsigned long long blocks) | |
2829 | { | |
2830 | /* Check that the internal stripe cache is | |
2831 | * large enough, or it won't work. | |
2832 | * It must hold at least 4 stripes of the larger | |
2833 | * chunk size | |
2834 | */ | |
2835 | unsigned long cache; | |
2836 | cache = max(info->array.chunk_size, info->new_chunk); | |
2837 | cache *= 4; /* 4 stripes minimum */ | |
2838 | cache /= 512; /* convert to sectors */ | |
2839 | /* make sure there is room for 'blocks' with a bit to spare */ | |
2840 | if (cache < 16 + blocks / disks) | |
2841 | cache = 16 + blocks / disks; | |
2842 | cache /= (4096/512); /* Convert from sectors to pages */ | |
2843 | ||
2844 | if (sra->cache_size < cache) | |
2845 | subarray_set_num(container, sra, "stripe_cache_size", | |
2846 | cache+1); | |
2847 | } | |
2848 | ||
2849 | static int impose_reshape(struct mdinfo *sra, | |
2850 | struct mdinfo *info, | |
2851 | struct supertype *st, | |
2852 | int fd, | |
2853 | int restart, | |
2854 | char *devname, char *container, | |
2855 | struct reshape *reshape) | |
2856 | { | |
2857 | struct mdu_array_info_s array; | |
2858 | ||
2859 | sra->new_chunk = info->new_chunk; | |
2860 | ||
2861 | if (restart) { | |
2862 | /* for external metadata checkpoint saved by mdmon can be lost | |
2863 | * or missed /due to e.g. crash/. Check if md is not during | |
2864 | * restart farther than metadata points to. | |
2865 | * If so, this means metadata information is obsolete. | |
2866 | */ | |
2867 | if (st->ss->external) | |
2868 | verify_reshape_position(info, reshape->level); | |
2869 | sra->reshape_progress = info->reshape_progress; | |
2870 | } else { | |
2871 | sra->reshape_progress = 0; | |
2872 | if (reshape->after.data_disks < reshape->before.data_disks) | |
2873 | /* start from the end of the new array */ | |
2874 | sra->reshape_progress = (sra->component_size | |
2875 | * reshape->after.data_disks); | |
2876 | } | |
2877 | ||
2878 | md_get_array_info(fd, &array); | |
2879 | if (info->array.chunk_size == info->new_chunk && | |
2880 | reshape->before.layout == reshape->after.layout && | |
2881 | st->ss->external == 0) { | |
2882 | /* use SET_ARRAY_INFO but only if reshape hasn't started */ | |
2883 | array.raid_disks = reshape->after.data_disks + reshape->parity; | |
2884 | if (!restart && md_set_array_info(fd, &array) != 0) { | |
2885 | int err = errno; | |
2886 | ||
2887 | pr_err("Cannot set device shape for %s: %s\n", | |
2888 | devname, strerror(errno)); | |
2889 | ||
2890 | if (err == EBUSY && | |
2891 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2892 | cont_err("Bitmap must be removed before shape can be changed\n"); | |
2893 | ||
2894 | goto release; | |
2895 | } | |
2896 | } else if (!restart) { | |
2897 | /* set them all just in case some old 'new_*' value | |
2898 | * persists from some earlier problem. | |
2899 | */ | |
2900 | int err = 0; | |
2901 | if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0) | |
2902 | err = errno; | |
2903 | if (!err && sysfs_set_num(sra, NULL, "layout", | |
2904 | reshape->after.layout) < 0) | |
2905 | err = errno; | |
2906 | if (!err && subarray_set_num(container, sra, "raid_disks", | |
2907 | reshape->after.data_disks + | |
2908 | reshape->parity) < 0) | |
2909 | err = errno; | |
2910 | if (err) { | |
2911 | pr_err("Cannot set device shape for %s\n", devname); | |
2912 | ||
2913 | if (err == EBUSY && | |
2914 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2915 | cont_err("Bitmap must be removed before shape can be changed\n"); | |
2916 | goto release; | |
2917 | } | |
2918 | } | |
2919 | return 0; | |
2920 | release: | |
2921 | return -1; | |
2922 | } | |
2923 | ||
2924 | static int impose_level(int fd, int level, char *devname, int verbose) | |
2925 | { | |
2926 | char *c; | |
2927 | struct mdu_array_info_s array; | |
2928 | struct mdinfo info; | |
2929 | ||
2930 | if (sysfs_init(&info, fd, NULL)) { | |
2931 | pr_err("failed to initialize sysfs.\n"); | |
2932 | return 1; | |
2933 | } | |
2934 | ||
2935 | md_get_array_info(fd, &array); | |
2936 | if (level == 0 && is_level456(array.level)) { | |
2937 | /* To convert to RAID0 we need to fail and | |
2938 | * remove any non-data devices. */ | |
2939 | int found = 0; | |
2940 | int d; | |
2941 | int data_disks = array.raid_disks - 1; | |
2942 | if (array.level == 6) | |
2943 | data_disks -= 1; | |
2944 | if (array.level == 5 && array.layout != ALGORITHM_PARITY_N) | |
2945 | return -1; | |
2946 | if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6) | |
2947 | return -1; | |
2948 | sysfs_set_str(&info, NULL,"sync_action", "idle"); | |
2949 | /* First remove any spares so no recovery starts */ | |
2950 | for (d = 0, found = 0; | |
2951 | d < MAX_DISKS && found < array.nr_disks; d++) { | |
2952 | mdu_disk_info_t disk; | |
2953 | disk.number = d; | |
2954 | if (md_get_disk_info(fd, &disk) < 0) | |
2955 | continue; | |
2956 | if (disk.major == 0 && disk.minor == 0) | |
2957 | continue; | |
2958 | found++; | |
2959 | if ((disk.state & (1 << MD_DISK_ACTIVE)) && | |
2960 | disk.raid_disk < data_disks) | |
2961 | /* keep this */ | |
2962 | continue; | |
2963 | ioctl(fd, HOT_REMOVE_DISK, | |
2964 | makedev(disk.major, disk.minor)); | |
2965 | } | |
2966 | /* Now fail anything left */ | |
2967 | md_get_array_info(fd, &array); | |
2968 | for (d = 0, found = 0; | |
2969 | d < MAX_DISKS && found < array.nr_disks; d++) { | |
2970 | mdu_disk_info_t disk; | |
2971 | disk.number = d; | |
2972 | if (md_get_disk_info(fd, &disk) < 0) | |
2973 | continue; | |
2974 | if (disk.major == 0 && disk.minor == 0) | |
2975 | continue; | |
2976 | found++; | |
2977 | if ((disk.state & (1 << MD_DISK_ACTIVE)) && | |
2978 | disk.raid_disk < data_disks) | |
2979 | /* keep this */ | |
2980 | continue; | |
2981 | ioctl(fd, SET_DISK_FAULTY, | |
2982 | makedev(disk.major, disk.minor)); | |
2983 | hot_remove_disk(fd, makedev(disk.major, disk.minor), 1); | |
2984 | } | |
2985 | } | |
2986 | c = map_num(pers, level); | |
2987 | if (c) { | |
2988 | int err = sysfs_set_str(&info, NULL, "level", c); | |
2989 | if (err) { | |
2990 | err = errno; | |
2991 | pr_err("%s: could not set level to %s\n", | |
2992 | devname, c); | |
2993 | if (err == EBUSY && | |
2994 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2995 | cont_err("Bitmap must be removed before level can be changed\n"); | |
2996 | return err; | |
2997 | } | |
2998 | if (verbose >= 0) | |
2999 | pr_err("level of %s changed to %s\n", devname, c); | |
3000 | } | |
3001 | return 0; | |
3002 | } | |
3003 | ||
3004 | int sigterm = 0; | |
3005 | static void catch_term(int sig) | |
3006 | { | |
3007 | sigterm = 1; | |
3008 | } | |
3009 | ||
3010 | static int reshape_array(char *container, int fd, char *devname, | |
3011 | struct supertype *st, struct mdinfo *info, | |
3012 | int force, struct mddev_dev *devlist, | |
3013 | unsigned long long data_offset, | |
3014 | char *backup_file, int verbose, int forked, | |
3015 | int restart, int freeze_reshape) | |
3016 | { | |
3017 | struct reshape reshape; | |
3018 | int spares_needed; | |
3019 | char *msg; | |
3020 | int orig_level = UnSet; | |
3021 | int odisks; | |
3022 | int delayed; | |
3023 | ||
3024 | struct mdu_array_info_s array; | |
3025 | char *c; | |
3026 | ||
3027 | struct mddev_dev *dv; | |
3028 | int added_disks; | |
3029 | ||
3030 | int *fdlist = NULL; | |
3031 | unsigned long long *offsets = NULL; | |
3032 | int d; | |
3033 | int nrdisks; | |
3034 | int err; | |
3035 | unsigned long blocks; | |
3036 | unsigned long long array_size; | |
3037 | int done; | |
3038 | struct mdinfo *sra = NULL; | |
3039 | char buf[SYSFS_MAX_BUF_SIZE]; | |
3040 | ||
3041 | /* when reshaping a RAID0, the component_size might be zero. | |
3042 | * So try to fix that up. | |
3043 | */ | |
3044 | if (md_get_array_info(fd, &array) != 0) { | |
3045 | dprintf("Cannot get array information.\n"); | |
3046 | goto release; | |
3047 | } | |
3048 | if (st->update_tail == NULL) | |
3049 | st->update_tail = &st->updates; | |
3050 | if (array.level == 0 && info->component_size == 0) { | |
3051 | get_dev_size(fd, NULL, &array_size); | |
3052 | info->component_size = array_size / array.raid_disks; | |
3053 | } | |
3054 | ||
3055 | if (array.level == 10) | |
3056 | /* Need space_after info */ | |
3057 | get_space_after(fd, st, info); | |
3058 | ||
3059 | if (info->reshape_active) { | |
3060 | int new_level = info->new_level; | |
3061 | info->new_level = UnSet; | |
3062 | if (info->delta_disks > 0) | |
3063 | info->array.raid_disks -= info->delta_disks; | |
3064 | msg = analyse_change(devname, info, &reshape); | |
3065 | info->new_level = new_level; | |
3066 | if (info->delta_disks > 0) | |
3067 | info->array.raid_disks += info->delta_disks; | |
3068 | if (!restart) | |
3069 | /* Make sure the array isn't read-only */ | |
3070 | ioctl(fd, RESTART_ARRAY_RW, 0); | |
3071 | } else | |
3072 | msg = analyse_change(devname, info, &reshape); | |
3073 | if (msg) { | |
3074 | /* if msg == "", error has already been printed */ | |
3075 | if (msg[0]) | |
3076 | pr_err("%s\n", msg); | |
3077 | goto release; | |
3078 | } | |
3079 | if (restart && (reshape.level != info->array.level || | |
3080 | reshape.before.layout != info->array.layout || | |
3081 | reshape.before.data_disks + reshape.parity != | |
3082 | info->array.raid_disks - max(0, info->delta_disks))) { | |
3083 | pr_err("reshape info is not in native format - cannot continue.\n"); | |
3084 | goto release; | |
3085 | } | |
3086 | ||
3087 | if (st->ss->external && restart && (info->reshape_progress == 0) && | |
3088 | !((sysfs_get_str(info, NULL, "sync_action", | |
3089 | buf, sizeof(buf)) > 0) && | |
3090 | (strncmp(buf, "reshape", 7) == 0))) { | |
3091 | /* When reshape is restarted from '0', very begin of array | |
3092 | * it is possible that for external metadata reshape and array | |
3093 | * configuration doesn't happen. | |
3094 | * Check if md has the same opinion, and reshape is restarted | |
3095 | * from 0. If so, this is regular reshape start after reshape | |
3096 | * switch in metadata to next array only. | |
3097 | */ | |
3098 | if ((verify_reshape_position(info, reshape.level) >= 0) && | |
3099 | (info->reshape_progress == 0)) | |
3100 | restart = 0; | |
3101 | } | |
3102 | if (restart) { | |
3103 | /* | |
3104 | * reshape already started. just skip to monitoring | |
3105 | * the reshape | |
3106 | */ | |
3107 | if (reshape.backup_blocks == 0) | |
3108 | return 0; | |
3109 | if (restart & RESHAPE_NO_BACKUP) | |
3110 | return 0; | |
3111 | ||
3112 | /* Need 'sra' down at 'started:' */ | |
3113 | sra = sysfs_read(fd, NULL, | |
3114 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE| | |
3115 | GET_CHUNK|GET_CACHE); | |
3116 | if (!sra) { | |
3117 | pr_err("%s: Cannot get array details from sysfs\n", | |
3118 | devname); | |
3119 | goto release; | |
3120 | } | |
3121 | ||
3122 | if (!backup_file) | |
3123 | backup_file = locate_backup(sra->sys_name); | |
3124 | ||
3125 | goto started; | |
3126 | } | |
3127 | /* The container is frozen but the array may not be. | |
3128 | * So freeze the array so spares don't get put to the wrong use | |
3129 | * FIXME there should probably be a cleaner separation between | |
3130 | * freeze_array and freeze_container. | |
3131 | */ | |
3132 | sysfs_freeze_array(info); | |
3133 | /* Check we have enough spares to not be degraded */ | |
3134 | added_disks = 0; | |
3135 | for (dv = devlist; dv ; dv=dv->next) | |
3136 | added_disks++; | |
3137 | spares_needed = max(reshape.before.data_disks, | |
3138 | reshape.after.data_disks) + | |
3139 | reshape.parity - array.raid_disks; | |
3140 | ||
3141 | if (!force && info->new_level > 1 && info->array.level > 1 && | |
3142 | spares_needed > info->array.spare_disks + added_disks) { | |
3143 | pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n" | |
3144 | " Use --force to over-ride this check.\n", | |
3145 | spares_needed, | |
3146 | spares_needed == 1 ? "" : "s", | |
3147 | info->array.spare_disks + added_disks); | |
3148 | goto release; | |
3149 | } | |
3150 | /* Check we have enough spares to not fail */ | |
3151 | spares_needed = max(reshape.before.data_disks, | |
3152 | reshape.after.data_disks) | |
3153 | - array.raid_disks; | |
3154 | if ((info->new_level > 1 || info->new_level == 0) && | |
3155 | spares_needed > info->array.spare_disks +added_disks) { | |
3156 | pr_err("Need %d spare%s to create working array, and only have %d.\n", | |
3157 | spares_needed, spares_needed == 1 ? "" : "s", | |
3158 | info->array.spare_disks + added_disks); | |
3159 | goto release; | |
3160 | } | |
3161 | ||
3162 | if (reshape.level != array.level) { | |
3163 | int err = impose_level(fd, reshape.level, devname, verbose); | |
3164 | if (err) | |
3165 | goto release; | |
3166 | info->new_layout = UnSet; /* after level change, | |
3167 | * layout is meaningless */ | |
3168 | orig_level = array.level; | |
3169 | sysfs_freeze_array(info); | |
3170 | ||
3171 | if (reshape.level > 0 && st->ss->external) { | |
3172 | /* make sure mdmon is aware of the new level */ | |
3173 | if (mdmon_running(container)) | |
3174 | flush_mdmon(container); | |
3175 | ||
3176 | if (!mdmon_running(container)) | |
3177 | start_mdmon(container); | |
3178 | ping_monitor(container); | |
3179 | if (mdmon_running(container) && st->update_tail == NULL) | |
3180 | st->update_tail = &st->updates; | |
3181 | } | |
3182 | } | |
3183 | /* ->reshape_super might have chosen some spares from the | |
3184 | * container that it wants to be part of the new array. | |
3185 | * We can collect them with ->container_content and give | |
3186 | * them to the kernel. | |
3187 | */ | |
3188 | if (st->ss->reshape_super && st->ss->container_content) { | |
3189 | char *subarray = strchr(info->text_version+1, '/')+1; | |
3190 | struct mdinfo *info2 = | |
3191 | st->ss->container_content(st, subarray); | |
3192 | struct mdinfo *d; | |
3193 | ||
3194 | if (info2) { | |
3195 | if (sysfs_init(info2, fd, st->devnm)) { | |
3196 | pr_err("unable to initialize sysfs for %s\n", | |
3197 | st->devnm); | |
3198 | free(info2); | |
3199 | goto release; | |
3200 | } | |
3201 | /* When increasing number of devices, we need to set | |
3202 | * new raid_disks before adding these, or they might | |
3203 | * be rejected. | |
3204 | */ | |
3205 | if (reshape.backup_blocks && | |
3206 | reshape.after.data_disks > | |
3207 | reshape.before.data_disks) | |
3208 | subarray_set_num(container, info2, "raid_disks", | |
3209 | reshape.after.data_disks + | |
3210 | reshape.parity); | |
3211 | for (d = info2->devs; d; d = d->next) { | |
3212 | if (d->disk.state == 0 && | |
3213 | d->disk.raid_disk >= 0) { | |
3214 | /* This is a spare that wants to | |
3215 | * be part of the array. | |
3216 | */ | |
3217 | add_disk(fd, st, info2, d); | |
3218 | } | |
3219 | } | |
3220 | sysfs_free(info2); | |
3221 | } | |
3222 | } | |
3223 | /* We might have been given some devices to add to the | |
3224 | * array. Now that the array has been changed to the right | |
3225 | * level and frozen, we can safely add them. | |
3226 | */ | |
3227 | if (devlist) { | |
3228 | if (Manage_subdevs(devname, fd, devlist, verbose, 0, UOPT_UNDEFINED, 0)) | |
3229 | goto release; | |
3230 | } | |
3231 | ||
3232 | if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS) | |
3233 | reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512; | |
3234 | if (reshape.backup_blocks == 0) { | |
3235 | /* No restriping needed, but we might need to impose | |
3236 | * some more changes: layout, raid_disks, chunk_size | |
3237 | */ | |
3238 | /* read current array info */ | |
3239 | if (md_get_array_info(fd, &array) != 0) { | |
3240 | dprintf("Cannot get array information.\n"); | |
3241 | goto release; | |
3242 | } | |
3243 | /* compare current array info with new values and if | |
3244 | * it is different update them to new */ | |
3245 | if (info->new_layout != UnSet && | |
3246 | info->new_layout != array.layout) { | |
3247 | array.layout = info->new_layout; | |
3248 | if (md_set_array_info(fd, &array) != 0) { | |
3249 | pr_err("failed to set new layout\n"); | |
3250 | goto release; | |
3251 | } else if (verbose >= 0) | |
3252 | printf("layout for %s set to %d\n", | |
3253 | devname, array.layout); | |
3254 | } | |
3255 | if (info->delta_disks != UnSet && info->delta_disks != 0 && | |
3256 | array.raid_disks != | |
3257 | (info->array.raid_disks + info->delta_disks)) { | |
3258 | array.raid_disks += info->delta_disks; | |
3259 | if (md_set_array_info(fd, &array) != 0) { | |
3260 | pr_err("failed to set raid disks\n"); | |
3261 | goto release; | |
3262 | } else if (verbose >= 0) { | |
3263 | printf("raid_disks for %s set to %d\n", | |
3264 | devname, array.raid_disks); | |
3265 | } | |
3266 | } | |
3267 | if (info->new_chunk != 0 && | |
3268 | info->new_chunk != array.chunk_size) { | |
3269 | if (sysfs_set_num(info, NULL, | |
3270 | "chunk_size", info->new_chunk) != 0) { | |
3271 | pr_err("failed to set chunk size\n"); | |
3272 | goto release; | |
3273 | } else if (verbose >= 0) | |
3274 | printf("chunk size for %s set to %d\n", | |
3275 | devname, info->new_chunk); | |
3276 | } | |
3277 | unfreeze(st); | |
3278 | return 0; | |
3279 | } | |
3280 | ||
3281 | /* | |
3282 | * There are three possibilities. | |
3283 | * 1/ The array will shrink. | |
3284 | * We need to ensure the reshape will pause before reaching | |
3285 | * the 'critical section'. We also need to fork and wait for | |
3286 | * that to happen. When it does we | |
3287 | * suspend/backup/complete/unfreeze | |
3288 | * | |
3289 | * 2/ The array will not change size. | |
3290 | * This requires that we keep a backup of a sliding window | |
3291 | * so that we can restore data after a crash. So we need | |
3292 | * to fork and monitor progress. | |
3293 | * In future we will allow the data_offset to change, so | |
3294 | * a sliding backup becomes unnecessary. | |
3295 | * | |
3296 | * 3/ The array will grow. This is relatively easy. | |
3297 | * However the kernel's restripe routines will cheerfully | |
3298 | * overwrite some early data before it is safe. So we | |
3299 | * need to make a backup of the early parts of the array | |
3300 | * and be ready to restore it if rebuild aborts very early. | |
3301 | * For externally managed metadata, we still need a forked | |
3302 | * child to monitor the reshape and suspend IO over the region | |
3303 | * that is being reshaped. | |
3304 | * | |
3305 | * We backup data by writing it to one spare, or to a | |
3306 | * file which was given on command line. | |
3307 | * | |
3308 | * In each case, we first make sure that storage is available | |
3309 | * for the required backup. | |
3310 | * Then we: | |
3311 | * - request the shape change. | |
3312 | * - fork to handle backup etc. | |
3313 | */ | |
3314 | /* Check that we can hold all the data */ | |
3315 | get_dev_size(fd, NULL, &array_size); | |
3316 | if (reshape.new_size < (array_size/512)) { | |
3317 | pr_err("this change will reduce the size of the array.\n" | |
3318 | " use --grow --array-size first to truncate array.\n" | |
3319 | " e.g. mdadm --grow %s --array-size %llu\n", | |
3320 | devname, reshape.new_size/2); | |
3321 | goto release; | |
3322 | } | |
3323 | ||
3324 | if (array.level == 10) { | |
3325 | /* Reshaping RAID10 does not require any data backup by | |
3326 | * user-space. Instead it requires that the data_offset | |
3327 | * is changed to avoid the need for backup. | |
3328 | * So this is handled very separately | |
3329 | */ | |
3330 | if (restart) | |
3331 | /* Nothing to do. */ | |
3332 | return 0; | |
3333 | return raid10_reshape(container, fd, devname, st, info, | |
3334 | &reshape, data_offset, force, verbose); | |
3335 | } | |
3336 | sra = sysfs_read(fd, NULL, | |
3337 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK| | |
3338 | GET_CACHE); | |
3339 | if (!sra) { | |
3340 | pr_err("%s: Cannot get array details from sysfs\n", | |
3341 | devname); | |
3342 | goto release; | |
3343 | } | |
3344 | ||
3345 | if (!backup_file) | |
3346 | switch(set_new_data_offset(sra, st, devname, | |
3347 | reshape.after.data_disks - reshape.before.data_disks, | |
3348 | data_offset, | |
3349 | reshape.min_offset_change, 1)) { | |
3350 | case -1: | |
3351 | goto release; | |
3352 | case 0: | |
3353 | /* Updated data_offset, so it's easy now */ | |
3354 | update_cache_size(container, sra, info, | |
3355 | min(reshape.before.data_disks, | |
3356 | reshape.after.data_disks), | |
3357 | reshape.backup_blocks); | |
3358 | ||
3359 | /* Right, everything seems fine. Let's kick things off. | |
3360 | */ | |
3361 | sync_metadata(st); | |
3362 | ||
3363 | if (impose_reshape(sra, info, st, fd, restart, | |
3364 | devname, container, &reshape) < 0) | |
3365 | goto release; | |
3366 | if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) { | |
3367 | struct mdinfo *sd; | |
3368 | if (errno != EINVAL) { | |
3369 | pr_err("Failed to initiate reshape!\n"); | |
3370 | goto release; | |
3371 | } | |
3372 | /* revert data_offset and try the old way */ | |
3373 | for (sd = sra->devs; sd; sd = sd->next) { | |
3374 | sysfs_set_num(sra, sd, "new_offset", | |
3375 | sd->data_offset); | |
3376 | sysfs_set_str(sra, NULL, "reshape_direction", | |
3377 | "forwards"); | |
3378 | } | |
3379 | break; | |
3380 | } | |
3381 | if (info->new_level == reshape.level) | |
3382 | return 0; | |
3383 | /* need to adjust level when reshape completes */ | |
3384 | switch(fork()) { | |
3385 | case -1: /* ignore error, but don't wait */ | |
3386 | return 0; | |
3387 | default: /* parent */ | |
3388 | return 0; | |
3389 | case 0: | |
3390 | manage_fork_fds(0); | |
3391 | map_fork(); | |
3392 | break; | |
3393 | } | |
3394 | close(fd); | |
3395 | wait_reshape(sra); | |
3396 | fd = open_dev(sra->sys_name); | |
3397 | if (fd >= 0) | |
3398 | impose_level(fd, info->new_level, devname, verbose); | |
3399 | return 0; | |
3400 | case 1: /* Couldn't set data_offset, try the old way */ | |
3401 | if (data_offset != INVALID_SECTORS) { | |
3402 | pr_err("Cannot update data_offset on this array\n"); | |
3403 | goto release; | |
3404 | } | |
3405 | break; | |
3406 | } | |
3407 | ||
3408 | started: | |
3409 | /* Decide how many blocks (sectors) for a reshape | |
3410 | * unit. The number we have so far is just a minimum | |
3411 | */ | |
3412 | blocks = reshape.backup_blocks; | |
3413 | if (reshape.before.data_disks == | |
3414 | reshape.after.data_disks) { | |
3415 | /* Make 'blocks' bigger for better throughput, but | |
3416 | * not so big that we reject it below. | |
3417 | * Try for 16 megabytes | |
3418 | */ | |
3419 | while (blocks * 32 < sra->component_size && blocks < 16*1024*2) | |
3420 | blocks *= 2; | |
3421 | } else | |
3422 | pr_err("Need to backup %luK of critical section..\n", blocks/2); | |
3423 | ||
3424 | if (blocks >= sra->component_size/2) { | |
3425 | pr_err("%s: Something wrong - reshape aborted\n", devname); | |
3426 | goto release; | |
3427 | } | |
3428 | ||
3429 | /* Now we need to open all these devices so we can read/write. | |
3430 | */ | |
3431 | nrdisks = max(reshape.before.data_disks, | |
3432 | reshape.after.data_disks) + reshape.parity | |
3433 | + sra->array.spare_disks; | |
3434 | fdlist = xcalloc((1+nrdisks), sizeof(int)); | |
3435 | offsets = xcalloc((1+nrdisks), sizeof(offsets[0])); | |
3436 | ||
3437 | odisks = reshape.before.data_disks + reshape.parity; | |
3438 | d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks, | |
3439 | backup_file, fdlist, offsets); | |
3440 | if (d < odisks) { | |
3441 | goto release; | |
3442 | } | |
3443 | if ((st->ss->manage_reshape == NULL) || | |
3444 | (st->ss->recover_backup == NULL)) { | |
3445 | if (backup_file == NULL) { | |
3446 | if (reshape.after.data_disks <= | |
3447 | reshape.before.data_disks) { | |
3448 | pr_err("%s: Cannot grow - need backup-file\n", | |
3449 | devname); | |
3450 | pr_err(" Please provide one with \"--backup=...\"\n"); | |
3451 | goto release; | |
3452 | } else if (d == odisks) { | |
3453 | pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname); | |
3454 | goto release; | |
3455 | } | |
3456 | } else { | |
3457 | if (!reshape_open_backup_file(backup_file, fd, devname, | |
3458 | (signed)blocks, | |
3459 | fdlist+d, offsets+d, | |
3460 | sra->sys_name, restart)) { | |
3461 | goto release; | |
3462 | } | |
3463 | d++; | |
3464 | } | |
3465 | } | |
3466 | ||
3467 | update_cache_size(container, sra, info, | |
3468 | min(reshape.before.data_disks, | |
3469 | reshape.after.data_disks), blocks); | |
3470 | ||
3471 | /* Right, everything seems fine. Let's kick things off. | |
3472 | * If only changing raid_disks, use ioctl, else use | |
3473 | * sysfs. | |
3474 | */ | |
3475 | sync_metadata(st); | |
3476 | ||
3477 | if (impose_reshape(sra, info, st, fd, restart, | |
3478 | devname, container, &reshape) < 0) | |
3479 | goto release; | |
3480 | ||
3481 | err = start_reshape(sra, restart, reshape.before.data_disks, | |
3482 | reshape.after.data_disks, st); | |
3483 | if (err) { | |
3484 | pr_err("Cannot %s reshape for %s\n", | |
3485 | restart ? "continue" : "start", devname); | |
3486 | goto release; | |
3487 | } | |
3488 | if (restart) | |
3489 | sysfs_set_str(sra, NULL, "array_state", "active"); | |
3490 | if (freeze_reshape) { | |
3491 | free(fdlist); | |
3492 | free(offsets); | |
3493 | sysfs_free(sra); | |
3494 | pr_err("Reshape has to be continued from location %llu when root filesystem has been mounted.\n", | |
3495 | sra->reshape_progress); | |
3496 | return 1; | |
3497 | } | |
3498 | ||
3499 | if (!forked) | |
3500 | if (continue_via_systemd(container ?: sra->sys_name, | |
3501 | GROW_SERVICE, NULL)) { | |
3502 | free(fdlist); | |
3503 | free(offsets); | |
3504 | sysfs_free(sra); | |
3505 | return 0; | |
3506 | } | |
3507 | ||
3508 | /* Now we just need to kick off the reshape and watch, while | |
3509 | * handling backups of the data... | |
3510 | * This is all done by a forked background process. | |
3511 | */ | |
3512 | switch(forked ? 0 : fork()) { | |
3513 | case -1: | |
3514 | pr_err("Cannot run child to monitor reshape: %s\n", | |
3515 | strerror(errno)); | |
3516 | abort_reshape(sra); | |
3517 | goto release; | |
3518 | default: | |
3519 | free(fdlist); | |
3520 | free(offsets); | |
3521 | sysfs_free(sra); | |
3522 | return 0; | |
3523 | case 0: | |
3524 | map_fork(); | |
3525 | break; | |
3526 | } | |
3527 | ||
3528 | /* Close unused file descriptor in the forked process */ | |
3529 | close_fd(&fd); | |
3530 | ||
3531 | /* If another array on the same devices is busy, the | |
3532 | * reshape will wait for them. This would mean that | |
3533 | * the first section that we suspend will stay suspended | |
3534 | * for a long time. So check on that possibility | |
3535 | * by looking for "DELAYED" in /proc/mdstat, and if found, | |
3536 | * wait a while | |
3537 | */ | |
3538 | do { | |
3539 | struct mdstat_ent *mds, *m; | |
3540 | delayed = 0; | |
3541 | mds = mdstat_read(1, 0); | |
3542 | for (m = mds; m; m = m->next) | |
3543 | if (strcmp(m->devnm, sra->sys_name) == 0) { | |
3544 | if (m->resync && m->percent == RESYNC_DELAYED) | |
3545 | delayed = 1; | |
3546 | if (m->resync == 0) | |
3547 | /* Haven't started the reshape thread | |
3548 | * yet, wait a bit | |
3549 | */ | |
3550 | delayed = 2; | |
3551 | break; | |
3552 | } | |
3553 | free_mdstat(mds); | |
3554 | if (delayed == 1 && get_linux_version() < 3007000) { | |
3555 | pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n" | |
3556 | " You might experience problems until other reshapes complete.\n"); | |
3557 | delayed = 0; | |
3558 | } | |
3559 | if (delayed) | |
3560 | mdstat_wait(30 - (delayed-1) * 25); | |
3561 | } while (delayed); | |
3562 | mdstat_close(); | |
3563 | if (check_env("MDADM_GROW_VERIFY")) | |
3564 | fd = open(devname, O_RDONLY | O_DIRECT); | |
3565 | else | |
3566 | fd = -1; | |
3567 | mlockall(MCL_FUTURE); | |
3568 | ||
3569 | if (signal_s(SIGTERM, catch_term) == SIG_ERR) | |
3570 | goto release; | |
3571 | ||
3572 | if (st->ss->external) { | |
3573 | /* metadata handler takes it from here */ | |
3574 | done = st->ss->manage_reshape( | |
3575 | fd, sra, &reshape, st, blocks, | |
3576 | fdlist, offsets, d - odisks, fdlist + odisks, | |
3577 | offsets + odisks); | |
3578 | } else | |
3579 | done = child_monitor( | |
3580 | fd, sra, &reshape, st, blocks, fdlist, offsets, | |
3581 | d - odisks, fdlist + odisks, offsets + odisks); | |
3582 | ||
3583 | free(fdlist); | |
3584 | free(offsets); | |
3585 | ||
3586 | if (backup_file && done) { | |
3587 | char *bul; | |
3588 | bul = make_backup(sra->sys_name); | |
3589 | if (bul) { | |
3590 | char buf[1024]; | |
3591 | int l = readlink(bul, buf, sizeof(buf) - 1); | |
3592 | if (l > 0) { | |
3593 | buf[l]=0; | |
3594 | unlink(buf); | |
3595 | } | |
3596 | unlink(bul); | |
3597 | free(bul); | |
3598 | } | |
3599 | unlink(backup_file); | |
3600 | } | |
3601 | if (!done) { | |
3602 | abort_reshape(sra); | |
3603 | goto out; | |
3604 | } | |
3605 | ||
3606 | if (!st->ss->external && | |
3607 | !(reshape.before.data_disks != reshape.after.data_disks && | |
3608 | info->custom_array_size) && info->new_level == reshape.level && | |
3609 | !forked) { | |
3610 | /* no need to wait for the reshape to finish as | |
3611 | * there is nothing more to do. | |
3612 | */ | |
3613 | sysfs_free(sra); | |
3614 | exit(0); | |
3615 | } | |
3616 | wait_reshape(sra); | |
3617 | ||
3618 | if (st->ss->external) { | |
3619 | /* Re-load the metadata as much could have changed */ | |
3620 | int cfd = open_dev(st->container_devnm); | |
3621 | if (cfd >= 0) { | |
3622 | flush_mdmon(container); | |
3623 | st->ss->free_super(st); | |
3624 | st->ss->load_container(st, cfd, container); | |
3625 | close(cfd); | |
3626 | } | |
3627 | } | |
3628 | ||
3629 | /* set new array size if required customer_array_size is used | |
3630 | * by this metadata. | |
3631 | */ | |
3632 | if (reshape.before.data_disks != reshape.after.data_disks && | |
3633 | info->custom_array_size) | |
3634 | set_array_size(st, info, info->text_version); | |
3635 | ||
3636 | if (info->new_level != reshape.level) { | |
3637 | if (fd < 0) | |
3638 | fd = open(devname, O_RDONLY); | |
3639 | impose_level(fd, info->new_level, devname, verbose); | |
3640 | close(fd); | |
3641 | if (info->new_level == 0) | |
3642 | st->update_tail = NULL; | |
3643 | } | |
3644 | out: | |
3645 | sysfs_free(sra); | |
3646 | if (forked) | |
3647 | return 0; | |
3648 | unfreeze(st); | |
3649 | exit(0); | |
3650 | ||
3651 | release: | |
3652 | free(fdlist); | |
3653 | free(offsets); | |
3654 | if (orig_level != UnSet && sra) { | |
3655 | c = map_num(pers, orig_level); | |
3656 | if (c && sysfs_set_str(sra, NULL, "level", c) == 0) | |
3657 | pr_err("aborting level change\n"); | |
3658 | } | |
3659 | sysfs_free(sra); | |
3660 | if (!forked) | |
3661 | unfreeze(st); | |
3662 | return 1; | |
3663 | } | |
3664 | ||
3665 | /* mdfd handle is passed to be closed in child process (after fork). | |
3666 | */ | |
3667 | int reshape_container(char *container, char *devname, | |
3668 | int mdfd, | |
3669 | struct supertype *st, | |
3670 | struct mdinfo *info, | |
3671 | int force, | |
3672 | char *backup_file, int verbose, | |
3673 | int forked, int restart, int freeze_reshape) | |
3674 | { | |
3675 | struct mdinfo *cc = NULL; | |
3676 | int rv = restart; | |
3677 | char last_devnm[32] = ""; | |
3678 | ||
3679 | /* component_size is not meaningful for a container, | |
3680 | * so pass '0' meaning 'no change' | |
3681 | */ | |
3682 | if (!restart && | |
3683 | reshape_super(st, 0, info->new_level, | |
3684 | info->new_layout, info->new_chunk, | |
3685 | info->array.raid_disks, info->delta_disks, | |
3686 | backup_file, devname, APPLY_METADATA_CHANGES, | |
3687 | verbose)) { | |
3688 | unfreeze(st); | |
3689 | return 1; | |
3690 | } | |
3691 | ||
3692 | sync_metadata(st); | |
3693 | ||
3694 | /* ping monitor to be sure that update is on disk | |
3695 | */ | |
3696 | ping_monitor(container); | |
3697 | ||
3698 | if (!forked && !freeze_reshape) | |
3699 | if (continue_via_systemd(container, GROW_SERVICE, NULL)) | |
3700 | return 0; | |
3701 | ||
3702 | switch (forked ? 0 : fork()) { | |
3703 | case -1: /* error */ | |
3704 | perror("Cannot fork to complete reshape\n"); | |
3705 | unfreeze(st); | |
3706 | return 1; | |
3707 | default: /* parent */ | |
3708 | if (!freeze_reshape) | |
3709 | printf("%s: multi-array reshape continues in background\n", Name); | |
3710 | return 0; | |
3711 | case 0: /* child */ | |
3712 | manage_fork_fds(0); | |
3713 | map_fork(); | |
3714 | break; | |
3715 | } | |
3716 | ||
3717 | /* close unused handle in child process | |
3718 | */ | |
3719 | if (mdfd > -1) | |
3720 | close(mdfd); | |
3721 | ||
3722 | while(1) { | |
3723 | /* For each member array with reshape_active, | |
3724 | * we need to perform the reshape. | |
3725 | * We pick the first array that needs reshaping and | |
3726 | * reshape it. reshape_array() will re-read the metadata | |
3727 | * so the next time through a different array should be | |
3728 | * ready for reshape. | |
3729 | * It is possible that the 'different' array will not | |
3730 | * be assembled yet. In that case we simple exit. | |
3731 | * When it is assembled, the mdadm which assembles it | |
3732 | * will take over the reshape. | |
3733 | */ | |
3734 | struct mdinfo *content; | |
3735 | int fd; | |
3736 | struct mdstat_ent *mdstat; | |
3737 | char *adev; | |
3738 | dev_t devid; | |
3739 | ||
3740 | sysfs_free(cc); | |
3741 | ||
3742 | cc = st->ss->container_content(st, NULL); | |
3743 | ||
3744 | for (content = cc; content ; content = content->next) { | |
3745 | char *subarray; | |
3746 | if (!content->reshape_active) | |
3747 | continue; | |
3748 | ||
3749 | subarray = strchr(content->text_version+1, '/')+1; | |
3750 | mdstat = mdstat_by_subdev(subarray, container); | |
3751 | if (!mdstat) | |
3752 | continue; | |
3753 | if (mdstat->active == 0) { | |
3754 | pr_err("Skipping inactive array %s.\n", | |
3755 | mdstat->devnm); | |
3756 | free_mdstat(mdstat); | |
3757 | mdstat = NULL; | |
3758 | continue; | |
3759 | } | |
3760 | break; | |
3761 | } | |
3762 | if (!content) | |
3763 | break; | |
3764 | ||
3765 | devid = devnm2devid(mdstat->devnm); | |
3766 | adev = map_dev(major(devid), minor(devid), 0); | |
3767 | if (!adev) | |
3768 | adev = content->text_version; | |
3769 | ||
3770 | fd = open_dev(mdstat->devnm); | |
3771 | if (fd < 0) { | |
3772 | pr_err("Device %s cannot be opened for reshape.\n", | |
3773 | adev); | |
3774 | break; | |
3775 | } | |
3776 | ||
3777 | if (strcmp(last_devnm, mdstat->devnm) == 0) { | |
3778 | /* Do not allow for multiple reshape_array() calls for | |
3779 | * the same array. | |
3780 | * It can happen when reshape_array() returns without | |
3781 | * error, when reshape is not finished (wrong reshape | |
3782 | * starting/continuation conditions). Mdmon doesn't | |
3783 | * switch to next array in container and reentry | |
3784 | * conditions for the same array occur. | |
3785 | * This is possibly interim until the behaviour of | |
3786 | * reshape_array is resolved(). | |
3787 | */ | |
3788 | printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev); | |
3789 | close(fd); | |
3790 | break; | |
3791 | } | |
3792 | strcpy(last_devnm, mdstat->devnm); | |
3793 | ||
3794 | if (sysfs_init(content, fd, mdstat->devnm)) { | |
3795 | pr_err("Unable to initialize sysfs for %s\n", | |
3796 | mdstat->devnm); | |
3797 | rv = 1; | |
3798 | break; | |
3799 | } | |
3800 | ||
3801 | if (mdmon_running(container)) | |
3802 | flush_mdmon(container); | |
3803 | ||
3804 | rv = reshape_array(container, fd, adev, st, | |
3805 | content, force, NULL, INVALID_SECTORS, | |
3806 | backup_file, verbose, 1, restart, | |
3807 | freeze_reshape); | |
3808 | close(fd); | |
3809 | ||
3810 | if (freeze_reshape) { | |
3811 | sysfs_free(cc); | |
3812 | exit(0); | |
3813 | } | |
3814 | ||
3815 | restart = 0; | |
3816 | if (rv) | |
3817 | break; | |
3818 | ||
3819 | if (mdmon_running(container)) | |
3820 | flush_mdmon(container); | |
3821 | } | |
3822 | if (!rv) | |
3823 | unfreeze(st); | |
3824 | sysfs_free(cc); | |
3825 | exit(0); | |
3826 | } | |
3827 | ||
3828 | /* | |
3829 | * We run a child process in the background which performs the following | |
3830 | * steps: | |
3831 | * - wait for resync to reach a certain point | |
3832 | * - suspend io to the following section | |
3833 | * - backup that section | |
3834 | * - allow resync to proceed further | |
3835 | * - resume io | |
3836 | * - discard the backup. | |
3837 | * | |
3838 | * When are combined in slightly different ways in the three cases. | |
3839 | * Grow: | |
3840 | * - suspend/backup/allow/wait/resume/discard | |
3841 | * Shrink: | |
3842 | * - allow/wait/suspend/backup/allow/wait/resume/discard | |
3843 | * same-size: | |
3844 | * - wait/resume/discard/suspend/backup/allow | |
3845 | * | |
3846 | * suspend/backup/allow always come together | |
3847 | * wait/resume/discard do too. | |
3848 | * For the same-size case we have two backups to improve flow. | |
3849 | * | |
3850 | */ | |
3851 | ||
3852 | int progress_reshape(struct mdinfo *info, struct reshape *reshape, | |
3853 | unsigned long long backup_point, | |
3854 | unsigned long long wait_point, | |
3855 | unsigned long long *suspend_point, | |
3856 | unsigned long long *reshape_completed, int *frozen) | |
3857 | { | |
3858 | /* This function is called repeatedly by the reshape manager. | |
3859 | * It determines how much progress can safely be made and allows | |
3860 | * that progress. | |
3861 | * - 'info' identifies the array and particularly records in | |
3862 | * ->reshape_progress the metadata's knowledge of progress | |
3863 | * This is a sector offset from the start of the array | |
3864 | * of the next array block to be relocated. This number | |
3865 | * may increase from 0 or decrease from array_size, depending | |
3866 | * on the type of reshape that is happening. | |
3867 | * Note that in contrast, 'sync_completed' is a block count of the | |
3868 | * reshape so far. It gives the distance between the start point | |
3869 | * (head or tail of device) and the next place that data will be | |
3870 | * written. It always increases. | |
3871 | * - 'reshape' is the structure created by analyse_change | |
3872 | * - 'backup_point' shows how much the metadata manager has backed-up | |
3873 | * data. For reshapes with increasing progress, it is the next address | |
3874 | * to be backed up, previous addresses have been backed-up. For | |
3875 | * decreasing progress, it is the earliest address that has been | |
3876 | * backed up - later address are also backed up. | |
3877 | * So addresses between reshape_progress and backup_point are | |
3878 | * backed up providing those are in the 'correct' order. | |
3879 | * - 'wait_point' is an array address. When reshape_completed | |
3880 | * passes this point, progress_reshape should return. It might | |
3881 | * return earlier if it determines that ->reshape_progress needs | |
3882 | * to be updated or further backup is needed. | |
3883 | * - suspend_point is maintained by progress_reshape and the caller | |
3884 | * should not touch it except to initialise to zero. | |
3885 | * It is an array address and it only increases in 2.6.37 and earlier. | |
3886 | * This makes it difficult to handle reducing reshapes with | |
3887 | * external metadata. | |
3888 | * However: it is similar to backup_point in that it records the | |
3889 | * other end of a suspended region from reshape_progress. | |
3890 | * it is moved to extend the region that is safe to backup and/or | |
3891 | * reshape | |
3892 | * - reshape_completed is read from sysfs and returned. The caller | |
3893 | * should copy this into ->reshape_progress when it has reason to | |
3894 | * believe that the metadata knows this, and any backup outside this | |
3895 | * has been erased. | |
3896 | * | |
3897 | * Return value is: | |
3898 | * 1 if more data from backup_point - but only as far as suspend_point, | |
3899 | * should be backed up | |
3900 | * 0 if things are progressing smoothly | |
3901 | * -1 if the reshape is finished because it is all done, | |
3902 | * -2 if the reshape is finished due to an error. | |
3903 | */ | |
3904 | ||
3905 | int advancing = (reshape->after.data_disks | |
3906 | >= reshape->before.data_disks); | |
3907 | unsigned long long need_backup; /* All data between start of array and | |
3908 | * here will at some point need to | |
3909 | * be backed up. | |
3910 | */ | |
3911 | unsigned long long read_offset, write_offset; | |
3912 | unsigned long long write_range; | |
3913 | unsigned long long max_progress, target, completed; | |
3914 | unsigned long long array_size = (info->component_size | |
3915 | * reshape->before.data_disks); | |
3916 | int fd; | |
3917 | char buf[SYSFS_MAX_BUF_SIZE]; | |
3918 | ||
3919 | /* First, we unsuspend any region that is now known to be safe. | |
3920 | * If suspend_point is on the 'wrong' side of reshape_progress, then | |
3921 | * we don't have or need suspension at the moment. This is true for | |
3922 | * native metadata when we don't need to back-up. | |
3923 | */ | |
3924 | if (advancing) { | |
3925 | if (info->reshape_progress <= *suspend_point) | |
3926 | sysfs_set_num(info, NULL, "suspend_lo", | |
3927 | info->reshape_progress); | |
3928 | } else { | |
3929 | /* Note: this won't work in 2.6.37 and before. | |
3930 | * Something somewhere should make sure we don't need it! | |
3931 | */ | |
3932 | if (info->reshape_progress >= *suspend_point) | |
3933 | sysfs_set_num(info, NULL, "suspend_hi", | |
3934 | info->reshape_progress); | |
3935 | } | |
3936 | ||
3937 | /* Now work out how far it is safe to progress. | |
3938 | * If the read_offset for ->reshape_progress is less than | |
3939 | * 'blocks' beyond the write_offset, we can only progress as far | |
3940 | * as a backup. | |
3941 | * Otherwise we can progress until the write_offset for the new location | |
3942 | * reaches (within 'blocks' of) the read_offset at the current location. | |
3943 | * However that region must be suspended unless we are using native | |
3944 | * metadata. | |
3945 | * If we need to suspend more, we limit it to 128M per device, which is | |
3946 | * rather arbitrary and should be some time-based calculation. | |
3947 | */ | |
3948 | read_offset = info->reshape_progress / reshape->before.data_disks; | |
3949 | write_offset = info->reshape_progress / reshape->after.data_disks; | |
3950 | write_range = info->new_chunk/512; | |
3951 | if (reshape->before.data_disks == reshape->after.data_disks) | |
3952 | need_backup = array_size; | |
3953 | else | |
3954 | need_backup = reshape->backup_blocks; | |
3955 | if (advancing) { | |
3956 | if (read_offset < write_offset + write_range) | |
3957 | max_progress = backup_point; | |
3958 | else | |
3959 | max_progress = | |
3960 | read_offset * reshape->after.data_disks; | |
3961 | } else { | |
3962 | if (read_offset > write_offset - write_range) | |
3963 | /* Can only progress as far as has been backed up, | |
3964 | * which must be suspended */ | |
3965 | max_progress = backup_point; | |
3966 | else if (info->reshape_progress <= need_backup) | |
3967 | max_progress = backup_point; | |
3968 | else { | |
3969 | if (info->array.major_version >= 0) | |
3970 | /* Can progress until backup is needed */ | |
3971 | max_progress = need_backup; | |
3972 | else { | |
3973 | /* Can progress until metadata update is required */ | |
3974 | max_progress = | |
3975 | read_offset * reshape->after.data_disks; | |
3976 | /* but data must be suspended */ | |
3977 | if (max_progress < *suspend_point) | |
3978 | max_progress = *suspend_point; | |
3979 | } | |
3980 | } | |
3981 | } | |
3982 | ||
3983 | /* We know it is safe to progress to 'max_progress' providing | |
3984 | * it is suspended or we are using native metadata. | |
3985 | * Consider extending suspend_point 128M per device if it | |
3986 | * is less than 64M per device beyond reshape_progress. | |
3987 | * But always do a multiple of 'blocks' | |
3988 | * FIXME this is too big - it takes to long to complete | |
3989 | * this much. | |
3990 | */ | |
3991 | target = 64*1024*2 * min(reshape->before.data_disks, | |
3992 | reshape->after.data_disks); | |
3993 | target /= reshape->backup_blocks; | |
3994 | if (target < 2) | |
3995 | target = 2; | |
3996 | target *= reshape->backup_blocks; | |
3997 | ||
3998 | /* For externally managed metadata we always need to suspend IO to | |
3999 | * the area being reshaped so we regularly push suspend_point forward. | |
4000 | * For native metadata we only need the suspend if we are going to do | |
4001 | * a backup. | |
4002 | */ | |
4003 | if (advancing) { | |
4004 | if ((need_backup > info->reshape_progress || | |
4005 | info->array.major_version < 0) && | |
4006 | *suspend_point < info->reshape_progress + target) { | |
4007 | if (need_backup < *suspend_point + 2 * target) | |
4008 | *suspend_point = need_backup; | |
4009 | else if (*suspend_point + 2 * target < array_size) | |
4010 | *suspend_point += 2 * target; | |
4011 | else | |
4012 | *suspend_point = array_size; | |
4013 | sysfs_set_num(info, NULL, "suspend_hi", *suspend_point); | |
4014 | if (max_progress > *suspend_point) | |
4015 | max_progress = *suspend_point; | |
4016 | } | |
4017 | } else { | |
4018 | if (info->array.major_version >= 0) { | |
4019 | /* Only need to suspend when about to backup */ | |
4020 | if (info->reshape_progress < need_backup * 2 && | |
4021 | *suspend_point > 0) { | |
4022 | *suspend_point = 0; | |
4023 | sysfs_set_num(info, NULL, "suspend_lo", 0); | |
4024 | sysfs_set_num(info, NULL, "suspend_hi", | |
4025 | need_backup); | |
4026 | } | |
4027 | } else { | |
4028 | /* Need to suspend continually */ | |
4029 | if (info->reshape_progress < *suspend_point) | |
4030 | *suspend_point = info->reshape_progress; | |
4031 | if (*suspend_point + target < info->reshape_progress) | |
4032 | /* No need to move suspend region yet */; | |
4033 | else { | |
4034 | if (*suspend_point >= 2 * target) | |
4035 | *suspend_point -= 2 * target; | |
4036 | else | |
4037 | *suspend_point = 0; | |
4038 | sysfs_set_num(info, NULL, "suspend_lo", | |
4039 | *suspend_point); | |
4040 | } | |
4041 | if (max_progress < *suspend_point) | |
4042 | max_progress = *suspend_point; | |
4043 | } | |
4044 | } | |
4045 | ||
4046 | /* now set sync_max to allow that progress. sync_max, like | |
4047 | * sync_completed is a count of sectors written per device, so | |
4048 | * we find the difference between max_progress and the start point, | |
4049 | * and divide that by after.data_disks to get a sync_max | |
4050 | * number. | |
4051 | * At the same time we convert wait_point to a similar number | |
4052 | * for comparing against sync_completed. | |
4053 | */ | |
4054 | /* scale down max_progress to per_disk */ | |
4055 | max_progress /= reshape->after.data_disks; | |
4056 | /* | |
4057 | * Round to chunk size as some kernels give an erroneously | |
4058 | * high number | |
4059 | */ | |
4060 | max_progress /= info->new_chunk/512; | |
4061 | max_progress *= info->new_chunk/512; | |
4062 | /* And round to old chunk size as the kernel wants that */ | |
4063 | max_progress /= info->array.chunk_size/512; | |
4064 | max_progress *= info->array.chunk_size/512; | |
4065 | /* Limit progress to the whole device */ | |
4066 | if (max_progress > info->component_size) | |
4067 | max_progress = info->component_size; | |
4068 | wait_point /= reshape->after.data_disks; | |
4069 | if (!advancing) { | |
4070 | /* switch from 'device offset' to 'processed block count' */ | |
4071 | max_progress = info->component_size - max_progress; | |
4072 | wait_point = info->component_size - wait_point; | |
4073 | } | |
4074 | ||
4075 | if (!*frozen) | |
4076 | sysfs_set_num(info, NULL, "sync_max", max_progress); | |
4077 | ||
4078 | /* Now wait. If we have already reached the point that we were | |
4079 | * asked to wait to, don't wait at all, else wait for any change. | |
4080 | * We need to select on 'sync_completed' as that is the place that | |
4081 | * notifications happen, but we are really interested in | |
4082 | * 'reshape_position' | |
4083 | */ | |
4084 | fd = sysfs_get_fd(info, NULL, "sync_completed"); | |
4085 | if (fd < 0) | |
4086 | goto check_progress; | |
4087 | ||
4088 | if (sysfs_fd_get_ll(fd, &completed) < 0) | |
4089 | goto check_progress; | |
4090 | ||
4091 | while (completed < max_progress && completed < wait_point) { | |
4092 | /* Check that sync_action is still 'reshape' to avoid | |
4093 | * waiting forever on a dead array | |
4094 | */ | |
4095 | char action[SYSFS_MAX_BUF_SIZE]; | |
4096 | if (sysfs_get_str(info, NULL, "sync_action", action, sizeof(action)) <= 0 || | |
4097 | strncmp(action, "reshape", 7) != 0) | |
4098 | break; | |
4099 | /* Some kernels reset 'sync_completed' to zero | |
4100 | * before setting 'sync_action' to 'idle'. | |
4101 | * So we need these extra tests. | |
4102 | */ | |
4103 | if (completed == 0 && advancing && | |
4104 | strncmp(action, "idle", 4) == 0 && | |
4105 | info->reshape_progress > 0) | |
4106 | break; | |
4107 | if (completed == 0 && !advancing && | |
4108 | strncmp(action, "idle", 4) == 0 && | |
4109 | info->reshape_progress < | |
4110 | (info->component_size * reshape->after.data_disks)) | |
4111 | break; | |
4112 | sysfs_wait(fd, NULL); | |
4113 | if (sysfs_fd_get_ll(fd, &completed) < 0) | |
4114 | goto check_progress; | |
4115 | } | |
4116 | /* Some kernels reset 'sync_completed' to zero, | |
4117 | * we need to have real point we are in md. | |
4118 | * So in that case, read 'reshape_position' from sysfs. | |
4119 | */ | |
4120 | if (completed == 0) { | |
4121 | unsigned long long reshapep; | |
4122 | char action[SYSFS_MAX_BUF_SIZE]; | |
4123 | if (sysfs_get_str(info, NULL, "sync_action", action, sizeof(action)) > 0 && | |
4124 | strncmp(action, "idle", 4) == 0 && | |
4125 | sysfs_get_ll(info, NULL, | |
4126 | "reshape_position", &reshapep) == 0) | |
4127 | *reshape_completed = reshapep; | |
4128 | } else { | |
4129 | /* some kernels can give an incorrectly high | |
4130 | * 'completed' number, so round down */ | |
4131 | completed /= (info->new_chunk/512); | |
4132 | completed *= (info->new_chunk/512); | |
4133 | /* Convert 'completed' back in to a 'progress' number */ | |
4134 | completed *= reshape->after.data_disks; | |
4135 | if (!advancing) | |
4136 | completed = (info->component_size | |
4137 | * reshape->after.data_disks | |
4138 | - completed); | |
4139 | *reshape_completed = completed; | |
4140 | } | |
4141 | ||
4142 | close(fd); | |
4143 | ||
4144 | /* We return the need_backup flag. Caller will decide | |
4145 | * how much - a multiple of ->backup_blocks up to *suspend_point | |
4146 | */ | |
4147 | if (advancing) | |
4148 | return need_backup > info->reshape_progress; | |
4149 | else | |
4150 | return need_backup >= info->reshape_progress; | |
4151 | ||
4152 | check_progress: | |
4153 | /* if we couldn't read a number from sync_completed, then | |
4154 | * either the reshape did complete, or it aborted. | |
4155 | * We can tell which by checking for 'none' in reshape_position. | |
4156 | * If it did abort, then it might immediately restart if it | |
4157 | * it was just a device failure that leaves us degraded but | |
4158 | * functioning. | |
4159 | */ | |
4160 | if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0 || | |
4161 | str_is_none(buf) == false) { | |
4162 | /* The abort might only be temporary. Wait up to 10 | |
4163 | * seconds for fd to contain a valid number again. | |
4164 | */ | |
4165 | int wait = 10000; | |
4166 | int rv = -2; | |
4167 | unsigned long long new_sync_max; | |
4168 | while (fd >= 0 && rv < 0 && wait > 0) { | |
4169 | if (sysfs_wait(fd, &wait) != 1) | |
4170 | break; | |
4171 | switch (sysfs_fd_get_ll(fd, &completed)) { | |
4172 | case 0: | |
4173 | /* all good again */ | |
4174 | rv = 1; | |
4175 | /* If "sync_max" is no longer max_progress | |
4176 | * we need to freeze things | |
4177 | */ | |
4178 | sysfs_get_ll(info, NULL, "sync_max", | |
4179 | &new_sync_max); | |
4180 | *frozen = (new_sync_max != max_progress); | |
4181 | break; | |
4182 | case -2: /* read error - abort */ | |
4183 | wait = 0; | |
4184 | break; | |
4185 | } | |
4186 | } | |
4187 | if (fd >= 0) | |
4188 | close(fd); | |
4189 | return rv; /* abort */ | |
4190 | } else { | |
4191 | /* Maybe racing with array shutdown - check state */ | |
4192 | if (fd >= 0) | |
4193 | close(fd); | |
4194 | if (sysfs_get_str(info, NULL, "array_state", buf, | |
4195 | sizeof(buf)) < 0 || | |
4196 | strncmp(buf, "inactive", 8) == 0 || | |
4197 | strncmp(buf, "clear",5) == 0) | |
4198 | return -2; /* abort */ | |
4199 | return -1; /* complete */ | |
4200 | } | |
4201 | } | |
4202 | ||
4203 | /* FIXME return status is never checked */ | |
4204 | static int grow_backup(struct mdinfo *sra, | |
4205 | unsigned long long offset, /* per device */ | |
4206 | unsigned long stripes, /* per device, in old chunks */ | |
4207 | int *sources, unsigned long long *offsets, | |
4208 | int disks, int chunk, int level, int layout, | |
4209 | int dests, int *destfd, unsigned long long *destoffsets, | |
4210 | int part, int *degraded, | |
4211 | char *buf) | |
4212 | { | |
4213 | /* Backup 'blocks' sectors at 'offset' on each device of the array, | |
4214 | * to storage 'destfd' (offset 'destoffsets'), after first | |
4215 | * suspending IO. Then allow resync to continue | |
4216 | * over the suspended section. | |
4217 | * Use part 'part' of the backup-super-block. | |
4218 | */ | |
4219 | int odata = disks; | |
4220 | int rv = 0; | |
4221 | int i; | |
4222 | unsigned long long ll; | |
4223 | int new_degraded; | |
4224 | //printf("offset %llu\n", offset); | |
4225 | if (level >= 4) | |
4226 | odata--; | |
4227 | if (level == 6) | |
4228 | odata--; | |
4229 | ||
4230 | /* Check that array hasn't become degraded, else we might backup the wrong data */ | |
4231 | if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0) | |
4232 | return -1; /* FIXME this error is ignored */ | |
4233 | new_degraded = (int)ll; | |
4234 | if (new_degraded != *degraded) { | |
4235 | /* check each device to ensure it is still working */ | |
4236 | struct mdinfo *sd; | |
4237 | for (sd = sra->devs ; sd ; sd = sd->next) { | |
4238 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
4239 | continue; | |
4240 | if (sd->disk.state & (1<<MD_DISK_SYNC)) { | |
4241 | char sbuf[SYSFS_MAX_BUF_SIZE]; | |
4242 | ||
4243 | if (sysfs_get_str(sra, sd, "state", | |
4244 | sbuf, sizeof(sbuf)) < 0 || | |
4245 | strstr(sbuf, "faulty") || | |
4246 | strstr(sbuf, "in_sync") == NULL) { | |
4247 | /* this device is dead */ | |
4248 | sd->disk.state = (1<<MD_DISK_FAULTY); | |
4249 | if (sd->disk.raid_disk >= 0 && | |
4250 | sources[sd->disk.raid_disk] >= 0) { | |
4251 | close(sources[sd->disk.raid_disk]); | |
4252 | sources[sd->disk.raid_disk] = -1; | |
4253 | } | |
4254 | } | |
4255 | } | |
4256 | } | |
4257 | *degraded = new_degraded; | |
4258 | } | |
4259 | if (part) { | |
4260 | bsb.arraystart2 = __cpu_to_le64(offset * odata); | |
4261 | bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata); | |
4262 | } else { | |
4263 | bsb.arraystart = __cpu_to_le64(offset * odata); | |
4264 | bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata); | |
4265 | } | |
4266 | if (part) | |
4267 | bsb.magic[15] = '2'; | |
4268 | for (i = 0; i < dests; i++) | |
4269 | if (part) | |
4270 | lseek64(destfd[i], destoffsets[i] + | |
4271 | __le64_to_cpu(bsb.devstart2)*512, 0); | |
4272 | else | |
4273 | lseek64(destfd[i], destoffsets[i], 0); | |
4274 | ||
4275 | rv = save_stripes(sources, offsets, disks, chunk, level, layout, | |
4276 | dests, destfd, offset * 512 * odata, | |
4277 | stripes * chunk * odata, buf); | |
4278 | ||
4279 | if (rv) | |
4280 | return rv; | |
4281 | bsb.mtime = __cpu_to_le64(time(0)); | |
4282 | for (i = 0; i < dests; i++) { | |
4283 | bsb.devstart = __cpu_to_le64(destoffsets[i]/512); | |
4284 | ||
4285 | bsb.sb_csum = bsb_csum((char*)&bsb, | |
4286 | ((char*)&bsb.sb_csum)-((char*)&bsb)); | |
4287 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0) | |
4288 | bsb.sb_csum2 = bsb_csum((char*)&bsb, | |
4289 | ((char*)&bsb.sb_csum2)-((char*)&bsb)); | |
4290 | ||
4291 | rv = -1; | |
4292 | if ((unsigned long long)lseek64(destfd[i], | |
4293 | destoffsets[i] - 4096, 0) != | |
4294 | destoffsets[i] - 4096) | |
4295 | break; | |
4296 | if (write(destfd[i], &bsb, 512) != 512) | |
4297 | break; | |
4298 | if (destoffsets[i] > 4096) { | |
4299 | if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) != | |
4300 | destoffsets[i]+stripes*chunk*odata) | |
4301 | break; | |
4302 | if (write(destfd[i], &bsb, 512) != 512) | |
4303 | break; | |
4304 | } | |
4305 | fsync(destfd[i]); | |
4306 | rv = 0; | |
4307 | } | |
4308 | ||
4309 | return rv; | |
4310 | } | |
4311 | ||
4312 | /* in 2.6.30, the value reported by sync_completed can be | |
4313 | * less that it should be by one stripe. | |
4314 | * This only happens when reshape hits sync_max and pauses. | |
4315 | * So allow wait_backup to either extent sync_max further | |
4316 | * than strictly necessary, or return before the | |
4317 | * sync has got quite as far as we would really like. | |
4318 | * This is what 'blocks2' is for. | |
4319 | * The various caller give appropriate values so that | |
4320 | * every works. | |
4321 | */ | |
4322 | /* FIXME return value is often ignored */ | |
4323 | static int forget_backup(int dests, int *destfd, | |
4324 | unsigned long long *destoffsets, | |
4325 | int part) | |
4326 | { | |
4327 | /* | |
4328 | * Erase backup 'part' (which is 0 or 1) | |
4329 | */ | |
4330 | int i; | |
4331 | int rv; | |
4332 | ||
4333 | if (part) { | |
4334 | bsb.arraystart2 = __cpu_to_le64(0); | |
4335 | bsb.length2 = __cpu_to_le64(0); | |
4336 | } else { | |
4337 | bsb.arraystart = __cpu_to_le64(0); | |
4338 | bsb.length = __cpu_to_le64(0); | |
4339 | } | |
4340 | bsb.mtime = __cpu_to_le64(time(0)); | |
4341 | rv = 0; | |
4342 | for (i = 0; i < dests; i++) { | |
4343 | bsb.devstart = __cpu_to_le64(destoffsets[i]/512); | |
4344 | bsb.sb_csum = bsb_csum((char*)&bsb, | |
4345 | ((char*)&bsb.sb_csum)-((char*)&bsb)); | |
4346 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0) | |
4347 | bsb.sb_csum2 = bsb_csum((char*)&bsb, | |
4348 | ((char*)&bsb.sb_csum2)-((char*)&bsb)); | |
4349 | if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) != | |
4350 | destoffsets[i]-4096) | |
4351 | rv = -1; | |
4352 | if (rv == 0 && write(destfd[i], &bsb, 512) != 512) | |
4353 | rv = -1; | |
4354 | fsync(destfd[i]); | |
4355 | } | |
4356 | return rv; | |
4357 | } | |
4358 | ||
4359 | static void fail(char *msg) | |
4360 | { | |
4361 | int rv; | |
4362 | rv = (write(2, msg, strlen(msg)) != (int)strlen(msg)); | |
4363 | rv |= (write(2, "\n", 1) != 1); | |
4364 | exit(rv ? 1 : 2); | |
4365 | } | |
4366 | ||
4367 | static char *abuf, *bbuf; | |
4368 | static unsigned long long abuflen; | |
4369 | static void validate(int afd, int bfd, unsigned long long offset) | |
4370 | { | |
4371 | /* check that the data in the backup against the array. | |
4372 | * This is only used for regression testing and should not | |
4373 | * be used while the array is active | |
4374 | */ | |
4375 | if (afd < 0) | |
4376 | return; | |
4377 | lseek64(bfd, offset - 4096, 0); | |
4378 | if (read(bfd, &bsb2, 512) != 512) | |
4379 | fail("cannot read bsb"); | |
4380 | if (bsb2.sb_csum != bsb_csum((char*)&bsb2, | |
4381 | ((char*)&bsb2.sb_csum)-((char*)&bsb2))) | |
4382 | fail("first csum bad"); | |
4383 | if (memcmp(bsb2.magic, "md_backup_data", 14) != 0) | |
4384 | fail("magic is bad"); | |
4385 | if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 && | |
4386 | bsb2.sb_csum2 != bsb_csum((char*)&bsb2, | |
4387 | ((char*)&bsb2.sb_csum2)-((char*)&bsb2))) | |
4388 | fail("second csum bad"); | |
4389 | ||
4390 | if (__le64_to_cpu(bsb2.devstart)*512 != offset) | |
4391 | fail("devstart is wrong"); | |
4392 | ||
4393 | if (bsb2.length) { | |
4394 | unsigned long long len = __le64_to_cpu(bsb2.length)*512; | |
4395 | ||
4396 | if (abuflen < len) { | |
4397 | free(abuf); | |
4398 | free(bbuf); | |
4399 | abuflen = len; | |
4400 | if (posix_memalign((void**)&abuf, 4096, abuflen) || | |
4401 | posix_memalign((void**)&bbuf, 4096, abuflen)) { | |
4402 | abuflen = 0; | |
4403 | /* just stop validating on mem-alloc failure */ | |
4404 | return; | |
4405 | } | |
4406 | } | |
4407 | ||
4408 | lseek64(bfd, offset, 0); | |
4409 | if ((unsigned long long)read(bfd, bbuf, len) != len) { | |
4410 | //printf("len %llu\n", len); | |
4411 | fail("read first backup failed"); | |
4412 | } | |
4413 | lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0); | |
4414 | if ((unsigned long long)read(afd, abuf, len) != len) | |
4415 | fail("read first from array failed"); | |
4416 | if (memcmp(bbuf, abuf, len) != 0) | |
4417 | fail("data1 compare failed"); | |
4418 | } | |
4419 | if (bsb2.length2) { | |
4420 | unsigned long long len = __le64_to_cpu(bsb2.length2)*512; | |
4421 | ||
4422 | if (abuflen < len) { | |
4423 | free(abuf); | |
4424 | free(bbuf); | |
4425 | abuflen = len; | |
4426 | abuf = xmalloc(abuflen); | |
4427 | bbuf = xmalloc(abuflen); | |
4428 | } | |
4429 | ||
4430 | lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0); | |
4431 | if ((unsigned long long)read(bfd, bbuf, len) != len) | |
4432 | fail("read second backup failed"); | |
4433 | lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0); | |
4434 | if ((unsigned long long)read(afd, abuf, len) != len) | |
4435 | fail("read second from array failed"); | |
4436 | if (memcmp(bbuf, abuf, len) != 0) | |
4437 | fail("data2 compare failed"); | |
4438 | } | |
4439 | } | |
4440 | ||
4441 | int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape, | |
4442 | struct supertype *st, unsigned long blocks, | |
4443 | int *fds, unsigned long long *offsets, | |
4444 | int dests, int *destfd, unsigned long long *destoffsets) | |
4445 | { | |
4446 | /* Monitor a reshape where backup is being performed using | |
4447 | * 'native' mechanism - either to a backup file, or | |
4448 | * to some space in a spare. | |
4449 | */ | |
4450 | char *buf; | |
4451 | int degraded = -1; | |
4452 | unsigned long long speed; | |
4453 | unsigned long long suspend_point, array_size; | |
4454 | unsigned long long backup_point, wait_point; | |
4455 | unsigned long long reshape_completed; | |
4456 | int done = 0; | |
4457 | int increasing = reshape->after.data_disks >= | |
4458 | reshape->before.data_disks; | |
4459 | int part = 0; /* The next part of the backup area to fill. It | |
4460 | * may already be full, so we need to check */ | |
4461 | int level = reshape->level; | |
4462 | int layout = reshape->before.layout; | |
4463 | int data = reshape->before.data_disks; | |
4464 | int disks = reshape->before.data_disks + reshape->parity; | |
4465 | int chunk = sra->array.chunk_size; | |
4466 | struct mdinfo *sd; | |
4467 | unsigned long stripes; | |
4468 | int uuid[4]; | |
4469 | int frozen = 0; | |
4470 | ||
4471 | /* set up the backup-super-block. This requires the | |
4472 | * uuid from the array. | |
4473 | */ | |
4474 | /* Find a superblock */ | |
4475 | for (sd = sra->devs; sd; sd = sd->next) { | |
4476 | char *dn; | |
4477 | int devfd; | |
4478 | int ok; | |
4479 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
4480 | continue; | |
4481 | dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
4482 | devfd = dev_open(dn, O_RDONLY); | |
4483 | if (devfd < 0) | |
4484 | continue; | |
4485 | ok = st->ss->load_super(st, devfd, NULL); | |
4486 | close(devfd); | |
4487 | if (ok == 0) | |
4488 | break; | |
4489 | } | |
4490 | if (!sd) { | |
4491 | pr_err("Cannot find a superblock\n"); | |
4492 | return 0; | |
4493 | } | |
4494 | ||
4495 | memset(&bsb, 0, 512); | |
4496 | memcpy(bsb.magic, "md_backup_data-1", 16); | |
4497 | st->ss->uuid_from_super(st, uuid); | |
4498 | memcpy(bsb.set_uuid, uuid, 16); | |
4499 | bsb.mtime = __cpu_to_le64(time(0)); | |
4500 | bsb.devstart2 = blocks; | |
4501 | ||
4502 | stripes = blocks / (sra->array.chunk_size/512) / | |
4503 | reshape->before.data_disks; | |
4504 | ||
4505 | if (posix_memalign((void**)&buf, 4096, disks * chunk)) | |
4506 | /* Don't start the 'reshape' */ | |
4507 | return 0; | |
4508 | if (reshape->before.data_disks == reshape->after.data_disks) { | |
4509 | sysfs_get_ll(sra, NULL, "sync_speed_min", &speed); | |
4510 | sysfs_set_num(sra, NULL, "sync_speed_min", 200000); | |
4511 | } | |
4512 | ||
4513 | if (increasing) { | |
4514 | array_size = sra->component_size * reshape->after.data_disks; | |
4515 | backup_point = sra->reshape_progress; | |
4516 | suspend_point = 0; | |
4517 | } else { | |
4518 | array_size = sra->component_size * reshape->before.data_disks; | |
4519 | backup_point = reshape->backup_blocks; | |
4520 | suspend_point = array_size; | |
4521 | } | |
4522 | ||
4523 | while (!done) { | |
4524 | int rv; | |
4525 | ||
4526 | /* Want to return as soon the oldest backup slot can | |
4527 | * be released as that allows us to start backing up | |
4528 | * some more, providing suspend_point has been | |
4529 | * advanced, which it should have. | |
4530 | */ | |
4531 | if (increasing) { | |
4532 | wait_point = array_size; | |
4533 | if (part == 0 && __le64_to_cpu(bsb.length) > 0) | |
4534 | wait_point = (__le64_to_cpu(bsb.arraystart) + | |
4535 | __le64_to_cpu(bsb.length)); | |
4536 | if (part == 1 && __le64_to_cpu(bsb.length2) > 0) | |
4537 | wait_point = (__le64_to_cpu(bsb.arraystart2) + | |
4538 | __le64_to_cpu(bsb.length2)); | |
4539 | } else { | |
4540 | wait_point = 0; | |
4541 | if (part == 0 && __le64_to_cpu(bsb.length) > 0) | |
4542 | wait_point = __le64_to_cpu(bsb.arraystart); | |
4543 | if (part == 1 && __le64_to_cpu(bsb.length2) > 0) | |
4544 | wait_point = __le64_to_cpu(bsb.arraystart2); | |
4545 | } | |
4546 | ||
4547 | reshape_completed = sra->reshape_progress; | |
4548 | rv = progress_reshape(sra, reshape, | |
4549 | backup_point, wait_point, | |
4550 | &suspend_point, &reshape_completed, | |
4551 | &frozen); | |
4552 | /* external metadata would need to ping_monitor here */ | |
4553 | sra->reshape_progress = reshape_completed; | |
4554 | ||
4555 | /* Clear any backup region that is before 'here' */ | |
4556 | if (increasing) { | |
4557 | if (__le64_to_cpu(bsb.length) > 0 && | |
4558 | reshape_completed >= (__le64_to_cpu(bsb.arraystart) + | |
4559 | __le64_to_cpu(bsb.length))) | |
4560 | forget_backup(dests, destfd, | |
4561 | destoffsets, 0); | |
4562 | if (__le64_to_cpu(bsb.length2) > 0 && | |
4563 | reshape_completed >= (__le64_to_cpu(bsb.arraystart2) + | |
4564 | __le64_to_cpu(bsb.length2))) | |
4565 | forget_backup(dests, destfd, | |
4566 | destoffsets, 1); | |
4567 | } else { | |
4568 | if (__le64_to_cpu(bsb.length) > 0 && | |
4569 | reshape_completed <= (__le64_to_cpu(bsb.arraystart))) | |
4570 | forget_backup(dests, destfd, | |
4571 | destoffsets, 0); | |
4572 | if (__le64_to_cpu(bsb.length2) > 0 && | |
4573 | reshape_completed <= (__le64_to_cpu(bsb.arraystart2))) | |
4574 | forget_backup(dests, destfd, | |
4575 | destoffsets, 1); | |
4576 | } | |
4577 | if (sigterm) | |
4578 | rv = -2; | |
4579 | if (rv < 0) { | |
4580 | if (rv == -1) | |
4581 | done = 1; | |
4582 | break; | |
4583 | } | |
4584 | if (rv == 0 && increasing && !st->ss->external) { | |
4585 | /* No longer need to monitor this reshape */ | |
4586 | sysfs_set_str(sra, NULL, "sync_max", "max"); | |
4587 | done = 1; | |
4588 | break; | |
4589 | } | |
4590 | ||
4591 | while (rv) { | |
4592 | unsigned long long offset; | |
4593 | unsigned long actual_stripes; | |
4594 | /* Need to backup some data. | |
4595 | * If 'part' is not used and the desired | |
4596 | * backup size is suspended, do a backup, | |
4597 | * then consider the next part. | |
4598 | */ | |
4599 | /* Check that 'part' is unused */ | |
4600 | if (part == 0 && __le64_to_cpu(bsb.length) != 0) | |
4601 | break; | |
4602 | if (part == 1 && __le64_to_cpu(bsb.length2) != 0) | |
4603 | break; | |
4604 | ||
4605 | offset = backup_point / data; | |
4606 | actual_stripes = stripes; | |
4607 | if (increasing) { | |
4608 | if (offset + actual_stripes * (chunk/512) > | |
4609 | sra->component_size) | |
4610 | actual_stripes = ((sra->component_size - offset) | |
4611 | / (chunk/512)); | |
4612 | if (offset + actual_stripes * (chunk/512) > | |
4613 | suspend_point/data) | |
4614 | break; | |
4615 | } else { | |
4616 | if (offset < actual_stripes * (chunk/512)) | |
4617 | actual_stripes = offset / (chunk/512); | |
4618 | offset -= actual_stripes * (chunk/512); | |
4619 | if (offset < suspend_point/data) | |
4620 | break; | |
4621 | } | |
4622 | if (actual_stripes == 0) | |
4623 | break; | |
4624 | grow_backup(sra, offset, actual_stripes, fds, offsets, | |
4625 | disks, chunk, level, layout, dests, destfd, | |
4626 | destoffsets, part, °raded, buf); | |
4627 | validate(afd, destfd[0], destoffsets[0]); | |
4628 | /* record where 'part' is up to */ | |
4629 | part = !part; | |
4630 | if (increasing) | |
4631 | backup_point += actual_stripes * (chunk/512) * data; | |
4632 | else | |
4633 | backup_point -= actual_stripes * (chunk/512) * data; | |
4634 | } | |
4635 | } | |
4636 | ||
4637 | /* FIXME maybe call progress_reshape one more time instead */ | |
4638 | /* remove any remaining suspension */ | |
4639 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
4640 | sysfs_set_num(sra, NULL, "suspend_hi", 0); | |
4641 | sysfs_set_num(sra, NULL, "suspend_lo", 0); | |
4642 | sysfs_set_num(sra, NULL, "sync_min", 0); | |
4643 | ||
4644 | if (reshape->before.data_disks == reshape->after.data_disks) | |
4645 | sysfs_set_num(sra, NULL, "sync_speed_min", speed); | |
4646 | free(buf); | |
4647 | return done; | |
4648 | } | |
4649 | ||
4650 | /* | |
4651 | * If any spare contains md_back_data-1 which is recent wrt mtime, | |
4652 | * write that data into the array and update the super blocks with | |
4653 | * the new reshape_progress | |
4654 | */ | |
4655 | int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, | |
4656 | int cnt, char *backup_file, int verbose) | |
4657 | { | |
4658 | int i, j; | |
4659 | int old_disks; | |
4660 | unsigned long long *offsets; | |
4661 | unsigned long long nstripe, ostripe; | |
4662 | int ndata, odata; | |
4663 | ||
4664 | odata = info->array.raid_disks - info->delta_disks - 1; | |
4665 | if (info->array.level == 6) | |
4666 | odata--; /* number of data disks */ | |
4667 | ndata = info->array.raid_disks - 1; | |
4668 | if (info->new_level == 6) | |
4669 | ndata--; | |
4670 | ||
4671 | old_disks = info->array.raid_disks - info->delta_disks; | |
4672 | ||
4673 | if (info->delta_disks <= 0) | |
4674 | /* Didn't grow, so the backup file must have | |
4675 | * been used | |
4676 | */ | |
4677 | old_disks = cnt; | |
4678 | for (i=old_disks-(backup_file?1:0); i<cnt; i++) { | |
4679 | struct mdinfo dinfo; | |
4680 | int fd; | |
4681 | int bsbsize; | |
4682 | char *devname, namebuf[20]; | |
4683 | unsigned long long lo, hi; | |
4684 | ||
4685 | /* This was a spare and may have some saved data on it. | |
4686 | * Load the superblock, find and load the | |
4687 | * backup_super_block. | |
4688 | * If either fail, go on to next device. | |
4689 | * If the backup contains no new info, just return | |
4690 | * else restore data and update all superblocks | |
4691 | */ | |
4692 | if (i == old_disks-1) { | |
4693 | fd = open(backup_file, O_RDONLY); | |
4694 | if (fd<0) { | |
4695 | pr_err("backup file %s inaccessible: %s\n", | |
4696 | backup_file, strerror(errno)); | |
4697 | continue; | |
4698 | } | |
4699 | devname = backup_file; | |
4700 | } else { | |
4701 | fd = fdlist[i]; | |
4702 | if (fd < 0) | |
4703 | continue; | |
4704 | if (st->ss->load_super(st, fd, NULL)) | |
4705 | continue; | |
4706 | ||
4707 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4708 | st->ss->free_super(st); | |
4709 | ||
4710 | if (lseek64(fd, | |
4711 | (dinfo.data_offset + dinfo.component_size - 8) <<9, | |
4712 | 0) < 0) { | |
4713 | pr_err("Cannot seek on device %d\n", i); | |
4714 | continue; /* Cannot seek */ | |
4715 | } | |
4716 | sprintf(namebuf, "device-%d", i); | |
4717 | devname = namebuf; | |
4718 | } | |
4719 | if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) { | |
4720 | if (verbose) | |
4721 | pr_err("Cannot read from %s\n", devname); | |
4722 | continue; /* Cannot read */ | |
4723 | } | |
4724 | if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 && | |
4725 | memcmp(bsb.magic, "md_backup_data-2", 16) != 0) { | |
4726 | if (verbose) | |
4727 | pr_err("No backup metadata on %s\n", devname); | |
4728 | continue; | |
4729 | } | |
4730 | if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) { | |
4731 | if (verbose) | |
4732 | pr_err("Bad backup-metadata checksum on %s\n", | |
4733 | devname); | |
4734 | continue; /* bad checksum */ | |
4735 | } | |
4736 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 && | |
4737 | bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) { | |
4738 | if (verbose) | |
4739 | pr_err("Bad backup-metadata checksum2 on %s\n", | |
4740 | devname); | |
4741 | continue; /* Bad second checksum */ | |
4742 | } | |
4743 | if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) { | |
4744 | if (verbose) | |
4745 | pr_err("Wrong uuid on backup-metadata on %s\n", | |
4746 | devname); | |
4747 | continue; /* Wrong uuid */ | |
4748 | } | |
4749 | ||
4750 | /* | |
4751 | * array utime and backup-mtime should be updated at | |
4752 | * much the same time, but it seems that sometimes | |
4753 | * they aren't... So allow considerable flexability in | |
4754 | * matching, and allow this test to be overridden by | |
4755 | * an environment variable. | |
4756 | */ | |
4757 | if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) || | |
4758 | time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) { | |
4759 | if (check_env("MDADM_GROW_ALLOW_OLD")) { | |
4760 | pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n", | |
4761 | (unsigned long)__le64_to_cpu(bsb.mtime), | |
4762 | (unsigned long)info->array.utime); | |
4763 | } else { | |
4764 | pr_err("too-old timestamp on backup-metadata on %s\n", devname); | |
4765 | pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n"); | |
4766 | continue; /* time stamp is too bad */ | |
4767 | } | |
4768 | } | |
4769 | ||
4770 | if (bsb.magic[15] == '1') { | |
4771 | if (bsb.length == 0) | |
4772 | continue; | |
4773 | if (info->delta_disks >= 0) { | |
4774 | /* reshape_progress is increasing */ | |
4775 | if (__le64_to_cpu(bsb.arraystart) | |
4776 | + __le64_to_cpu(bsb.length) | |
4777 | < info->reshape_progress) { | |
4778 | nonew: | |
4779 | if (verbose) | |
4780 | pr_err("backup-metadata found on %s but is not needed\n", devname); | |
4781 | continue; /* No new data here */ | |
4782 | } | |
4783 | } else { | |
4784 | /* reshape_progress is decreasing */ | |
4785 | if (__le64_to_cpu(bsb.arraystart) >= | |
4786 | info->reshape_progress) | |
4787 | goto nonew; /* No new data here */ | |
4788 | } | |
4789 | } else { | |
4790 | if (bsb.length == 0 && bsb.length2 == 0) | |
4791 | continue; | |
4792 | if (info->delta_disks >= 0) { | |
4793 | /* reshape_progress is increasing */ | |
4794 | if ((__le64_to_cpu(bsb.arraystart) | |
4795 | + __le64_to_cpu(bsb.length) | |
4796 | < info->reshape_progress) && | |
4797 | (__le64_to_cpu(bsb.arraystart2) | |
4798 | + __le64_to_cpu(bsb.length2) | |
4799 | < info->reshape_progress)) | |
4800 | goto nonew; /* No new data here */ | |
4801 | } else { | |
4802 | /* reshape_progress is decreasing */ | |
4803 | if (__le64_to_cpu(bsb.arraystart) >= | |
4804 | info->reshape_progress && | |
4805 | __le64_to_cpu(bsb.arraystart2) >= | |
4806 | info->reshape_progress) | |
4807 | goto nonew; /* No new data here */ | |
4808 | } | |
4809 | } | |
4810 | if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) { | |
4811 | second_fail: | |
4812 | if (verbose) | |
4813 | pr_err("Failed to verify secondary backup-metadata block on %s\n", | |
4814 | devname); | |
4815 | continue; /* Cannot seek */ | |
4816 | } | |
4817 | /* There should be a duplicate backup superblock 4k before here */ | |
4818 | if (lseek64(fd, -4096, 1) < 0 || | |
4819 | read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2)) | |
4820 | goto second_fail; /* Cannot find leading superblock */ | |
4821 | if (bsb.magic[15] == '1') | |
4822 | bsbsize = offsetof(struct mdp_backup_super, pad1); | |
4823 | else | |
4824 | bsbsize = offsetof(struct mdp_backup_super, pad); | |
4825 | if (memcmp(&bsb2, &bsb, bsbsize) != 0) | |
4826 | goto second_fail; /* Cannot find leading superblock */ | |
4827 | ||
4828 | /* Now need the data offsets for all devices. */ | |
4829 | offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks); | |
4830 | for(j=0; j<info->array.raid_disks; j++) { | |
4831 | if (fdlist[j] < 0) | |
4832 | continue; | |
4833 | if (st->ss->load_super(st, fdlist[j], NULL)) | |
4834 | /* FIXME should be this be an error */ | |
4835 | continue; | |
4836 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4837 | st->ss->free_super(st); | |
4838 | offsets[j] = dinfo.data_offset * 512; | |
4839 | } | |
4840 | printf("%s: restoring critical section\n", Name); | |
4841 | ||
4842 | if (restore_stripes(fdlist, offsets, info->array.raid_disks, | |
4843 | info->new_chunk, info->new_level, | |
4844 | info->new_layout, fd, | |
4845 | __le64_to_cpu(bsb.devstart)*512, | |
4846 | __le64_to_cpu(bsb.arraystart)*512, | |
4847 | __le64_to_cpu(bsb.length)*512, NULL)) { | |
4848 | /* didn't succeed, so giveup */ | |
4849 | if (verbose) | |
4850 | pr_err("Error restoring backup from %s\n", | |
4851 | devname); | |
4852 | free(offsets); | |
4853 | return 1; | |
4854 | } | |
4855 | ||
4856 | if (bsb.magic[15] == '2' && | |
4857 | restore_stripes(fdlist, offsets, info->array.raid_disks, | |
4858 | info->new_chunk, info->new_level, | |
4859 | info->new_layout, fd, | |
4860 | __le64_to_cpu(bsb.devstart)*512 + | |
4861 | __le64_to_cpu(bsb.devstart2)*512, | |
4862 | __le64_to_cpu(bsb.arraystart2)*512, | |
4863 | __le64_to_cpu(bsb.length2)*512, NULL)) { | |
4864 | /* didn't succeed, so giveup */ | |
4865 | if (verbose) | |
4866 | pr_err("Error restoring second backup from %s\n", | |
4867 | devname); | |
4868 | free(offsets); | |
4869 | return 1; | |
4870 | } | |
4871 | ||
4872 | free(offsets); | |
4873 | ||
4874 | /* Ok, so the data is restored. Let's update those superblocks. */ | |
4875 | ||
4876 | lo = hi = 0; | |
4877 | if (bsb.length) { | |
4878 | lo = __le64_to_cpu(bsb.arraystart); | |
4879 | hi = lo + __le64_to_cpu(bsb.length); | |
4880 | } | |
4881 | if (bsb.magic[15] == '2' && bsb.length2) { | |
4882 | unsigned long long lo1, hi1; | |
4883 | lo1 = __le64_to_cpu(bsb.arraystart2); | |
4884 | hi1 = lo1 + __le64_to_cpu(bsb.length2); | |
4885 | if (lo == hi) { | |
4886 | lo = lo1; | |
4887 | hi = hi1; | |
4888 | } else if (lo < lo1) | |
4889 | hi = hi1; | |
4890 | else | |
4891 | lo = lo1; | |
4892 | } | |
4893 | if (lo < hi && (info->reshape_progress < lo || | |
4894 | info->reshape_progress > hi)) | |
4895 | /* backup does not affect reshape_progress*/ ; | |
4896 | else if (info->delta_disks >= 0) { | |
4897 | info->reshape_progress = __le64_to_cpu(bsb.arraystart) + | |
4898 | __le64_to_cpu(bsb.length); | |
4899 | if (bsb.magic[15] == '2') { | |
4900 | unsigned long long p2; | |
4901 | ||
4902 | p2 = __le64_to_cpu(bsb.arraystart2) + | |
4903 | __le64_to_cpu(bsb.length2); | |
4904 | if (p2 > info->reshape_progress) | |
4905 | info->reshape_progress = p2; | |
4906 | } | |
4907 | } else { | |
4908 | info->reshape_progress = __le64_to_cpu(bsb.arraystart); | |
4909 | if (bsb.magic[15] == '2') { | |
4910 | unsigned long long p2; | |
4911 | ||
4912 | p2 = __le64_to_cpu(bsb.arraystart2); | |
4913 | if (p2 < info->reshape_progress) | |
4914 | info->reshape_progress = p2; | |
4915 | } | |
4916 | } | |
4917 | for (j=0; j<info->array.raid_disks; j++) { | |
4918 | if (fdlist[j] < 0) | |
4919 | continue; | |
4920 | if (st->ss->load_super(st, fdlist[j], NULL)) | |
4921 | continue; | |
4922 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4923 | dinfo.reshape_progress = info->reshape_progress; | |
4924 | st->ss->update_super(st, &dinfo, | |
4925 | UOPT_SPEC__RESHAPE_PROGRESS, | |
4926 | NULL,0, 0, NULL); | |
4927 | st->ss->store_super(st, fdlist[j]); | |
4928 | st->ss->free_super(st); | |
4929 | } | |
4930 | return 0; | |
4931 | } | |
4932 | /* Didn't find any backup data, try to see if any | |
4933 | * was needed. | |
4934 | */ | |
4935 | if (info->delta_disks < 0) { | |
4936 | /* When shrinking, the critical section is at the end. | |
4937 | * So see if we are before the critical section. | |
4938 | */ | |
4939 | unsigned long long first_block; | |
4940 | nstripe = ostripe = 0; | |
4941 | first_block = 0; | |
4942 | while (ostripe >= nstripe) { | |
4943 | ostripe += info->array.chunk_size / 512; | |
4944 | first_block = ostripe * odata; | |
4945 | nstripe = first_block / ndata / (info->new_chunk/512) * | |
4946 | (info->new_chunk/512); | |
4947 | } | |
4948 | ||
4949 | if (info->reshape_progress >= first_block) | |
4950 | return 0; | |
4951 | } | |
4952 | if (info->delta_disks > 0) { | |
4953 | /* See if we are beyond the critical section. */ | |
4954 | unsigned long long last_block; | |
4955 | nstripe = ostripe = 0; | |
4956 | last_block = 0; | |
4957 | while (nstripe >= ostripe) { | |
4958 | nstripe += info->new_chunk / 512; | |
4959 | last_block = nstripe * ndata; | |
4960 | ostripe = last_block / odata / (info->array.chunk_size/512) * | |
4961 | (info->array.chunk_size/512); | |
4962 | } | |
4963 | ||
4964 | if (info->reshape_progress >= last_block) | |
4965 | return 0; | |
4966 | } | |
4967 | /* needed to recover critical section! */ | |
4968 | if (verbose) | |
4969 | pr_err("Failed to find backup of critical section\n"); | |
4970 | return 1; | |
4971 | } | |
4972 | ||
4973 | int Grow_continue_command(char *devname, int fd, | |
4974 | char *backup_file, int verbose) | |
4975 | { | |
4976 | int ret_val = 0; | |
4977 | struct supertype *st = NULL; | |
4978 | struct mdinfo *content = NULL; | |
4979 | struct mdinfo array; | |
4980 | char *subarray = NULL; | |
4981 | struct mdinfo *cc = NULL; | |
4982 | struct mdstat_ent *mdstat = NULL; | |
4983 | int cfd = -1; | |
4984 | int fd2; | |
4985 | ||
4986 | dprintf("Grow continue from command line called for %s\n", devname); | |
4987 | ||
4988 | st = super_by_fd(fd, &subarray); | |
4989 | if (!st || !st->ss) { | |
4990 | pr_err("Unable to determine metadata format for %s\n", devname); | |
4991 | return 1; | |
4992 | } | |
4993 | dprintf("Grow continue is run for "); | |
4994 | if (st->ss->external == 0) { | |
4995 | int d; | |
4996 | int cnt = 5; | |
4997 | dprintf_cont("native array (%s)\n", devname); | |
4998 | if (md_get_array_info(fd, &array.array) < 0) { | |
4999 | pr_err("%s is not an active md array - aborting\n", | |
5000 | devname); | |
5001 | ret_val = 1; | |
5002 | goto Grow_continue_command_exit; | |
5003 | } | |
5004 | content = &array; | |
5005 | sysfs_init(content, fd, NULL); | |
5006 | /* Need to load a superblock. | |
5007 | * FIXME we should really get what we need from | |
5008 | * sysfs | |
5009 | */ | |
5010 | do { | |
5011 | for (d = 0; d < MAX_DISKS; d++) { | |
5012 | mdu_disk_info_t disk; | |
5013 | char *dv; | |
5014 | int err; | |
5015 | disk.number = d; | |
5016 | if (md_get_disk_info(fd, &disk) < 0) | |
5017 | continue; | |
5018 | if (disk.major == 0 && disk.minor == 0) | |
5019 | continue; | |
5020 | if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0) | |
5021 | continue; | |
5022 | dv = map_dev(disk.major, disk.minor, 1); | |
5023 | if (!dv) | |
5024 | continue; | |
5025 | fd2 = dev_open(dv, O_RDONLY); | |
5026 | if (fd2 < 0) | |
5027 | continue; | |
5028 | err = st->ss->load_super(st, fd2, NULL); | |
5029 | close(fd2); | |
5030 | if (err) | |
5031 | continue; | |
5032 | break; | |
5033 | } | |
5034 | if (d == MAX_DISKS) { | |
5035 | pr_err("Unable to load metadata for %s\n", | |
5036 | devname); | |
5037 | ret_val = 1; | |
5038 | goto Grow_continue_command_exit; | |
5039 | } | |
5040 | st->ss->getinfo_super(st, content, NULL); | |
5041 | if (!content->reshape_active) | |
5042 | sleep_for(3, 0, true); | |
5043 | else | |
5044 | break; | |
5045 | } while (cnt-- > 0); | |
5046 | } else { | |
5047 | char *container; | |
5048 | ||
5049 | if (subarray) { | |
5050 | dprintf_cont("subarray (%s)\n", subarray); | |
5051 | container = st->container_devnm; | |
5052 | cfd = open_dev_excl(st->container_devnm); | |
5053 | } else { | |
5054 | container = st->devnm; | |
5055 | close(fd); | |
5056 | cfd = open_dev_excl(st->devnm); | |
5057 | dprintf_cont("container (%s)\n", container); | |
5058 | fd = cfd; | |
5059 | } | |
5060 | if (cfd < 0) { | |
5061 | pr_err("Unable to open container for %s\n", devname); | |
5062 | ret_val = 1; | |
5063 | goto Grow_continue_command_exit; | |
5064 | } | |
5065 | ||
5066 | /* find in container array under reshape | |
5067 | */ | |
5068 | ret_val = st->ss->load_container(st, cfd, NULL); | |
5069 | if (ret_val) { | |
5070 | pr_err("Cannot read superblock for %s\n", devname); | |
5071 | ret_val = 1; | |
5072 | goto Grow_continue_command_exit; | |
5073 | } | |
5074 | ||
5075 | cc = st->ss->container_content(st, subarray); | |
5076 | for (content = cc; content ; content = content->next) { | |
5077 | char *array_name; | |
5078 | int allow_reshape = 1; | |
5079 | ||
5080 | if (content->reshape_active == 0) | |
5081 | continue; | |
5082 | /* The decision about array or container wide | |
5083 | * reshape is taken in Grow_continue based | |
5084 | * content->reshape_active state, therefore we | |
5085 | * need to check_reshape based on | |
5086 | * reshape_active and subarray name | |
5087 | */ | |
5088 | if (content->array.state & (1<<MD_SB_BLOCK_VOLUME)) | |
5089 | allow_reshape = 0; | |
5090 | if (content->reshape_active == CONTAINER_RESHAPE && | |
5091 | (content->array.state | |
5092 | & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))) | |
5093 | allow_reshape = 0; | |
5094 | ||
5095 | if (!allow_reshape) { | |
5096 | pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n", | |
5097 | devname, container); | |
5098 | ret_val = 1; | |
5099 | goto Grow_continue_command_exit; | |
5100 | } | |
5101 | ||
5102 | array_name = strchr(content->text_version+1, '/')+1; | |
5103 | mdstat = mdstat_by_subdev(array_name, container); | |
5104 | if (!mdstat) | |
5105 | continue; | |
5106 | if (mdstat->active == 0) { | |
5107 | pr_err("Skipping inactive array %s.\n", | |
5108 | mdstat->devnm); | |
5109 | free_mdstat(mdstat); | |
5110 | mdstat = NULL; | |
5111 | continue; | |
5112 | } | |
5113 | break; | |
5114 | } | |
5115 | if (!content) { | |
5116 | pr_err("Unable to determine reshaped array for %s\n", devname); | |
5117 | ret_val = 1; | |
5118 | goto Grow_continue_command_exit; | |
5119 | } | |
5120 | fd2 = open_dev(mdstat->devnm); | |
5121 | if (fd2 < 0) { | |
5122 | pr_err("cannot open (%s)\n", mdstat->devnm); | |
5123 | ret_val = 1; | |
5124 | goto Grow_continue_command_exit; | |
5125 | } | |
5126 | ||
5127 | if (sysfs_init(content, fd2, mdstat->devnm)) { | |
5128 | pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n", | |
5129 | mdstat->devnm); | |
5130 | ret_val = 1; | |
5131 | close(fd2); | |
5132 | goto Grow_continue_command_exit; | |
5133 | } | |
5134 | ||
5135 | close(fd2); | |
5136 | ||
5137 | /* start mdmon in case it is not running | |
5138 | */ | |
5139 | if (!mdmon_running(container)) | |
5140 | start_mdmon(container); | |
5141 | ping_monitor(container); | |
5142 | ||
5143 | if (mdmon_running(container) == false) { | |
5144 | pr_err("No mdmon found. Grow cannot continue.\n"); | |
5145 | ret_val = 1; | |
5146 | goto Grow_continue_command_exit; | |
5147 | } | |
5148 | } | |
5149 | ||
5150 | /* verify that array under reshape is started from | |
5151 | * correct position | |
5152 | */ | |
5153 | if (verify_reshape_position(content, content->array.level) < 0) { | |
5154 | ret_val = 1; | |
5155 | goto Grow_continue_command_exit; | |
5156 | } | |
5157 | ||
5158 | /* continue reshape | |
5159 | */ | |
5160 | ret_val = Grow_continue(fd, st, content, backup_file, 1, 0); | |
5161 | ||
5162 | Grow_continue_command_exit: | |
5163 | if (cfd > -1) | |
5164 | close(cfd); | |
5165 | st->ss->free_super(st); | |
5166 | free_mdstat(mdstat); | |
5167 | sysfs_free(cc); | |
5168 | free(subarray); | |
5169 | ||
5170 | return ret_val; | |
5171 | } | |
5172 | ||
5173 | int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info, | |
5174 | char *backup_file, int forked, int freeze_reshape) | |
5175 | { | |
5176 | int ret_val = 2; | |
5177 | ||
5178 | if (!info->reshape_active) | |
5179 | return ret_val; | |
5180 | ||
5181 | if (st->ss->external) { | |
5182 | int cfd = open_dev(st->container_devnm); | |
5183 | ||
5184 | if (cfd < 0) | |
5185 | return 1; | |
5186 | ||
5187 | st->ss->load_container(st, cfd, st->container_devnm); | |
5188 | close(cfd); | |
5189 | ret_val = reshape_container(st->container_devnm, NULL, mdfd, | |
5190 | st, info, 0, backup_file, 0, | |
5191 | forked, 1 | info->reshape_active, | |
5192 | freeze_reshape); | |
5193 | } else | |
5194 | ret_val = reshape_array(NULL, mdfd, "array", st, info, 1, | |
5195 | NULL, INVALID_SECTORS, backup_file, | |
5196 | 0, forked, 1 | info->reshape_active, | |
5197 | freeze_reshape); | |
5198 | ||
5199 | return ret_val; | |
5200 | } | |
5201 | ||
5202 | char *make_backup(char *name) | |
5203 | { | |
5204 | char *base = "backup_file-"; | |
5205 | int len; | |
5206 | char *fname; | |
5207 | ||
5208 | len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1; | |
5209 | fname = xmalloc(len); | |
5210 | sprintf(fname, "%s/%s%s", MAP_DIR, base, name); | |
5211 | return fname; | |
5212 | } | |
5213 | ||
5214 | char *locate_backup(char *name) | |
5215 | { | |
5216 | char *fl = make_backup(name); | |
5217 | struct stat stb; | |
5218 | ||
5219 | if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode)) | |
5220 | return fl; | |
5221 | ||
5222 | free(fl); | |
5223 | return NULL; | |
5224 | } |