]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * mdadm - manage Linux "md" devices aka RAID arrays. | |
3 | * | |
4 | * Copyright (C) 2001-2013 Neil Brown <neilb@suse.de> | |
5 | * | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | * | |
21 | * Author: Neil Brown | |
22 | * Email: <neilb@suse.de> | |
23 | */ | |
24 | #include "mdadm.h" | |
25 | #include "dlink.h" | |
26 | #include "xmalloc.h" | |
27 | ||
28 | #include <sys/mman.h> | |
29 | #include <stddef.h> | |
30 | #include <stdint.h> | |
31 | #include <sys/wait.h> | |
32 | ||
33 | int restore_backup(struct supertype *st, | |
34 | struct mdinfo *content, | |
35 | int working_disks, | |
36 | int next_spare, | |
37 | char **backup_filep, | |
38 | int verbose) | |
39 | { | |
40 | int i; | |
41 | int *fdlist; | |
42 | struct mdinfo *dev; | |
43 | int err; | |
44 | int disk_count = next_spare + working_disks; | |
45 | char *backup_file = *backup_filep; | |
46 | ||
47 | dprintf("Called restore_backup()\n"); | |
48 | fdlist = xmalloc(sizeof(int) * disk_count); | |
49 | ||
50 | enable_fds(next_spare); | |
51 | for (i = 0; i < next_spare; i++) | |
52 | fdlist[i] = -1; | |
53 | for (dev = content->devs; dev; dev = dev->next) { | |
54 | char buf[22]; | |
55 | int fd; | |
56 | ||
57 | sprintf(buf, "%d:%d", dev->disk.major, dev->disk.minor); | |
58 | fd = dev_open(buf, O_RDWR); | |
59 | ||
60 | if (dev->disk.raid_disk >= 0) | |
61 | fdlist[dev->disk.raid_disk] = fd; | |
62 | else | |
63 | fdlist[next_spare++] = fd; | |
64 | } | |
65 | ||
66 | if (!backup_file) { | |
67 | backup_file = locate_backup(content->sys_name); | |
68 | *backup_filep = backup_file; | |
69 | } | |
70 | ||
71 | if (st->ss->external && st->ss->recover_backup) | |
72 | err = st->ss->recover_backup(st, content); | |
73 | else | |
74 | err = Grow_restart(st, content, fdlist, next_spare, | |
75 | backup_file, verbose > 0); | |
76 | ||
77 | while (next_spare > 0) { | |
78 | next_spare--; | |
79 | if (fdlist[next_spare] >= 0) | |
80 | close(fdlist[next_spare]); | |
81 | } | |
82 | free(fdlist); | |
83 | if (err) { | |
84 | pr_err("Failed to restore critical section for reshape - sorry.\n"); | |
85 | if (!backup_file) | |
86 | pr_err("Possibly you need to specify a --backup-file\n"); | |
87 | return 1; | |
88 | } | |
89 | ||
90 | dprintf("restore_backup() returns status OK.\n"); | |
91 | return 0; | |
92 | } | |
93 | ||
94 | int Grow_Add_device(char *devname, int fd, char *newdev) | |
95 | { | |
96 | /* Add a device to an active array. | |
97 | * Currently, just extend a linear array. | |
98 | * This requires writing a new superblock on the | |
99 | * new device, calling the kernel to add the device, | |
100 | * and if that succeeds, update the superblock on | |
101 | * all other devices. | |
102 | * This means that we need to *find* all other devices. | |
103 | */ | |
104 | struct mdinfo info; | |
105 | ||
106 | dev_t rdev; | |
107 | int nfd, fd2; | |
108 | int d, nd; | |
109 | struct supertype *st = NULL; | |
110 | char *subarray = NULL; | |
111 | ||
112 | if (md_get_array_info(fd, &info.array) < 0) { | |
113 | pr_err("cannot get array info for %s\n", devname); | |
114 | return 1; | |
115 | } | |
116 | ||
117 | if (info.array.level != -1) { | |
118 | pr_err("can only add devices to linear arrays\n"); | |
119 | return 1; | |
120 | } | |
121 | ||
122 | st = super_by_fd(fd, &subarray); | |
123 | if (!st) { | |
124 | pr_err("cannot handle arrays with superblock version %d\n", | |
125 | info.array.major_version); | |
126 | return 1; | |
127 | } | |
128 | ||
129 | if (subarray) { | |
130 | pr_err("Cannot grow linear sub-arrays yet\n"); | |
131 | free(subarray); | |
132 | free(st); | |
133 | return 1; | |
134 | } | |
135 | ||
136 | nfd = open(newdev, O_RDWR|O_EXCL|O_DIRECT); | |
137 | if (nfd < 0) { | |
138 | pr_err("cannot open %s\n", newdev); | |
139 | free(st); | |
140 | return 1; | |
141 | } | |
142 | if (!fstat_is_blkdev(nfd, newdev, &rdev)) { | |
143 | close(nfd); | |
144 | free(st); | |
145 | return 1; | |
146 | } | |
147 | /* now check out all the devices and make sure we can read the | |
148 | * superblock */ | |
149 | for (d=0 ; d < info.array.raid_disks ; d++) { | |
150 | mdu_disk_info_t disk; | |
151 | char *dv; | |
152 | ||
153 | st->ss->free_super(st); | |
154 | ||
155 | disk.number = d; | |
156 | if (md_get_disk_info(fd, &disk) < 0) { | |
157 | pr_err("cannot get device detail for device %d\n", d); | |
158 | close(nfd); | |
159 | free(st); | |
160 | return 1; | |
161 | } | |
162 | dv = map_dev(disk.major, disk.minor, 1); | |
163 | if (!dv) { | |
164 | pr_err("cannot find device file for device %d\n", d); | |
165 | close(nfd); | |
166 | free(st); | |
167 | return 1; | |
168 | } | |
169 | fd2 = dev_open(dv, O_RDWR); | |
170 | if (fd2 < 0) { | |
171 | pr_err("cannot open device file %s\n", dv); | |
172 | close(nfd); | |
173 | free(st); | |
174 | return 1; | |
175 | } | |
176 | ||
177 | if (st->ss->load_super(st, fd2, NULL)) { | |
178 | pr_err("cannot find super block on %s\n", dv); | |
179 | close(nfd); | |
180 | close(fd2); | |
181 | free(st); | |
182 | return 1; | |
183 | } | |
184 | close(fd2); | |
185 | } | |
186 | /* Ok, looks good. Lets update the superblock and write it out to | |
187 | * newdev. | |
188 | */ | |
189 | ||
190 | info.disk.number = d; | |
191 | info.disk.major = major(rdev); | |
192 | info.disk.minor = minor(rdev); | |
193 | info.disk.raid_disk = d; | |
194 | info.disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE); | |
195 | if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_NEW, newdev, | |
196 | 0, 0, NULL) != 0) { | |
197 | pr_err("Preparing new metadata failed on %s\n", newdev); | |
198 | close(nfd); | |
199 | return 1; | |
200 | } | |
201 | ||
202 | if (st->ss->store_super(st, nfd)) { | |
203 | pr_err("Cannot store new superblock on %s\n", newdev); | |
204 | close(nfd); | |
205 | return 1; | |
206 | } | |
207 | close(nfd); | |
208 | ||
209 | if (ioctl(fd, ADD_NEW_DISK, &info.disk) != 0) { | |
210 | pr_err("Cannot add new disk to this array\n"); | |
211 | return 1; | |
212 | } | |
213 | /* Well, that seems to have worked. | |
214 | * Now go through and update all superblocks | |
215 | */ | |
216 | ||
217 | if (md_get_array_info(fd, &info.array) < 0) { | |
218 | pr_err("cannot get array info for %s\n", devname); | |
219 | return 1; | |
220 | } | |
221 | ||
222 | nd = d; | |
223 | for (d=0 ; d < info.array.raid_disks ; d++) { | |
224 | mdu_disk_info_t disk; | |
225 | char *dv; | |
226 | ||
227 | disk.number = d; | |
228 | if (md_get_disk_info(fd, &disk) < 0) { | |
229 | pr_err("cannot get device detail for device %d\n", d); | |
230 | return 1; | |
231 | } | |
232 | dv = map_dev(disk.major, disk.minor, 1); | |
233 | if (!dv) { | |
234 | pr_err("cannot find device file for device %d\n", d); | |
235 | return 1; | |
236 | } | |
237 | fd2 = dev_open(dv, O_RDWR); | |
238 | if (fd2 < 0) { | |
239 | pr_err("cannot open device file %s\n", dv); | |
240 | return 1; | |
241 | } | |
242 | if (st->ss->load_super(st, fd2, NULL)) { | |
243 | pr_err("cannot find super block on %s\n", dv); | |
244 | close(fd); | |
245 | close(fd2); | |
246 | return 1; | |
247 | } | |
248 | info.array.raid_disks = nd+1; | |
249 | info.array.nr_disks = nd+1; | |
250 | info.array.active_disks = nd+1; | |
251 | info.array.working_disks = nd+1; | |
252 | ||
253 | if (st->ss->update_super(st, &info, UOPT_SPEC_LINEAR_GROW_UPDATE, dv, | |
254 | 0, 0, NULL) != 0) { | |
255 | pr_err("Updating metadata failed on %s\n", dv); | |
256 | close(fd2); | |
257 | return 1; | |
258 | } | |
259 | ||
260 | if (st->ss->store_super(st, fd2)) { | |
261 | pr_err("Cannot store new superblock on %s\n", dv); | |
262 | close(fd2); | |
263 | return 1; | |
264 | } | |
265 | close(fd2); | |
266 | } | |
267 | ||
268 | return 0; | |
269 | } | |
270 | ||
271 | int Grow_addbitmap(char *devname, int fd, struct context *c, struct shape *s) | |
272 | { | |
273 | /* | |
274 | * First check that array doesn't have a bitmap | |
275 | * Then create the bitmap | |
276 | * Then add it | |
277 | * | |
278 | * For internal bitmaps, we need to check the version, | |
279 | * find all the active devices, and write the bitmap block | |
280 | * to all devices | |
281 | */ | |
282 | mdu_array_info_t array; | |
283 | struct supertype *st; | |
284 | char *subarray = NULL; | |
285 | int major = BITMAP_MAJOR_HI; | |
286 | unsigned long long bitmapsize, array_size; | |
287 | struct mdinfo *mdi; | |
288 | ||
289 | /* | |
290 | * We only ever get called if bitmap is not none, so this check | |
291 | * is just here to quiet down static code checkers. | |
292 | */ | |
293 | if (s->btype == BitmapUnknown) | |
294 | return 1; | |
295 | ||
296 | if (s->btype == BitmapCluster) | |
297 | major = BITMAP_MAJOR_CLUSTERED; | |
298 | ||
299 | if (md_get_array_info(fd, &array) != 0) { | |
300 | pr_err("cannot get array status for %s\n", devname); | |
301 | return 1; | |
302 | } | |
303 | if (array.state & (1 << MD_SB_BITMAP_PRESENT)) { | |
304 | if (s->btype == BitmapNone) { | |
305 | array.state &= ~(1 << MD_SB_BITMAP_PRESENT); | |
306 | if (md_set_array_info(fd, &array) != 0) { | |
307 | if (array.state & (1 << MD_SB_CLUSTERED)) | |
308 | pr_err("failed to remove clustered bitmap.\n"); | |
309 | else | |
310 | pr_err("failed to remove internal bitmap.\n"); | |
311 | return 1; | |
312 | } | |
313 | return 0; | |
314 | } | |
315 | pr_err("bitmap already present on %s\n", devname); | |
316 | return 1; | |
317 | } | |
318 | ||
319 | if (s->btype == BitmapNone) { | |
320 | pr_err("no bitmap found on %s\n", devname); | |
321 | return 1; | |
322 | } | |
323 | ||
324 | if (array.level <= 0) { | |
325 | pr_err("Bitmaps not meaningful with level %s\n", | |
326 | map_num(pers, array.level)?:"of this array"); | |
327 | return 1; | |
328 | } | |
329 | bitmapsize = array.size; | |
330 | bitmapsize <<= 1; | |
331 | if (get_dev_size(fd, NULL, &array_size) && | |
332 | array_size > (0x7fffffffULL << 9)) { | |
333 | /* Array is big enough that we cannot trust array.size | |
334 | * try other approaches | |
335 | */ | |
336 | bitmapsize = get_component_size(fd); | |
337 | } | |
338 | if (bitmapsize == 0) { | |
339 | pr_err("Cannot reliably determine size of array to create bitmap - sorry.\n"); | |
340 | return 1; | |
341 | } | |
342 | ||
343 | if (array.level == 10) { | |
344 | int ncopies; | |
345 | ||
346 | ncopies = (array.layout & 255) * ((array.layout >> 8) & 255); | |
347 | bitmapsize = bitmapsize * array.raid_disks / ncopies; | |
348 | ||
349 | if (s->btype == BitmapCluster && | |
350 | !is_near_layout_10(array.layout)) { | |
351 | pr_err("only near layout is supported with clustered raid10\n"); | |
352 | return 1; | |
353 | } | |
354 | } | |
355 | ||
356 | st = super_by_fd(fd, &subarray); | |
357 | if (!st) { | |
358 | pr_err("Cannot understand version %d.%d\n", | |
359 | array.major_version, array.minor_version); | |
360 | return 1; | |
361 | } | |
362 | if (subarray) { | |
363 | pr_err("Cannot add bitmaps to sub-arrays yet\n"); | |
364 | free(subarray); | |
365 | free(st); | |
366 | return 1; | |
367 | } | |
368 | ||
369 | mdi = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY); | |
370 | if (mdi) { | |
371 | if (mdi->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
372 | pr_err("Cannot add bitmap to array with PPL\n"); | |
373 | free(mdi); | |
374 | free(st); | |
375 | return 1; | |
376 | } | |
377 | free(mdi); | |
378 | } | |
379 | ||
380 | if (s->btype == BitmapInternal || s->btype == BitmapCluster) { | |
381 | int rv; | |
382 | int d; | |
383 | int offset_setable = 0; | |
384 | if (st->ss->add_internal_bitmap == NULL) { | |
385 | pr_err("Internal bitmaps not supported with %s metadata\n", st->ss->name); | |
386 | return 1; | |
387 | } | |
388 | st->nodes = c->nodes; | |
389 | st->cluster_name = c->homecluster; | |
390 | mdi = sysfs_read(fd, NULL, GET_BITMAP_LOCATION); | |
391 | if (mdi) | |
392 | offset_setable = 1; | |
393 | for (d = 0; d < st->max_devs; d++) { | |
394 | mdu_disk_info_t disk; | |
395 | char *dv; | |
396 | int fd2; | |
397 | ||
398 | disk.number = d; | |
399 | if (md_get_disk_info(fd, &disk) < 0) | |
400 | continue; | |
401 | if (disk.major == 0 && disk.minor == 0) | |
402 | continue; | |
403 | if ((disk.state & (1 << MD_DISK_SYNC)) == 0) | |
404 | continue; | |
405 | dv = map_dev(disk.major, disk.minor, 1); | |
406 | if (!dv) | |
407 | continue; | |
408 | if ((disk.state & (1 << MD_DISK_WRITEMOSTLY)) && | |
409 | s->btype == BitmapCluster) { | |
410 | pr_err("%s disks marked write-mostly are not supported with clustered bitmap\n",devname); | |
411 | free(mdi); | |
412 | return 1; | |
413 | } | |
414 | fd2 = dev_open(dv, O_RDWR); | |
415 | if (fd2 < 0) | |
416 | continue; | |
417 | rv = st->ss->load_super(st, fd2, NULL); | |
418 | if (!rv) { | |
419 | rv = st->ss->add_internal_bitmap( | |
420 | st, &s->bitmap_chunk, c->delay, | |
421 | s->write_behind, bitmapsize, | |
422 | offset_setable, major); | |
423 | if (!rv) { | |
424 | st->ss->write_bitmap(st, fd2, | |
425 | NodeNumUpdate); | |
426 | } else { | |
427 | pr_err("failed to create internal bitmap - chunksize problem.\n"); | |
428 | } | |
429 | } else { | |
430 | pr_err("failed to load super-block.\n"); | |
431 | } | |
432 | close(fd2); | |
433 | if (rv) { | |
434 | free(mdi); | |
435 | return 1; | |
436 | } | |
437 | } | |
438 | if (offset_setable) { | |
439 | st->ss->getinfo_super(st, mdi, NULL); | |
440 | if (sysfs_init(mdi, fd, NULL)) { | |
441 | pr_err("failed to initialize sysfs.\n"); | |
442 | free(mdi); | |
443 | } | |
444 | rv = sysfs_set_num_signed(mdi, NULL, "bitmap/location", | |
445 | mdi->bitmap_offset); | |
446 | free(mdi); | |
447 | } else { | |
448 | if (s->btype == BitmapCluster) | |
449 | array.state |= (1 << MD_SB_CLUSTERED); | |
450 | array.state |= (1 << MD_SB_BITMAP_PRESENT); | |
451 | rv = md_set_array_info(fd, &array); | |
452 | } | |
453 | if (rv < 0) { | |
454 | if (errno == EBUSY) | |
455 | pr_err("Cannot add bitmap while array is resyncing or reshaping etc.\n"); | |
456 | pr_err("failed to set internal bitmap.\n"); | |
457 | return 1; | |
458 | } | |
459 | } | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | int Grow_consistency_policy(char *devname, int fd, struct context *c, struct shape *s) | |
465 | { | |
466 | struct supertype *st; | |
467 | struct mdinfo *sra; | |
468 | struct mdinfo *sd; | |
469 | char *subarray = NULL; | |
470 | int ret = 0; | |
471 | char container_dev[PATH_MAX]; | |
472 | char buf[SYSFS_MAX_BUF_SIZE]; | |
473 | ||
474 | if (s->consistency_policy != CONSISTENCY_POLICY_RESYNC && | |
475 | s->consistency_policy != CONSISTENCY_POLICY_PPL) { | |
476 | pr_err("Operation not supported for consistency policy %s\n", | |
477 | map_num_s(consistency_policies, s->consistency_policy)); | |
478 | return 1; | |
479 | } | |
480 | ||
481 | st = super_by_fd(fd, &subarray); | |
482 | if (!st) | |
483 | return 1; | |
484 | ||
485 | sra = sysfs_read(fd, NULL, GET_CONSISTENCY_POLICY|GET_LEVEL| | |
486 | GET_DEVS|GET_STATE); | |
487 | if (!sra) { | |
488 | ret = 1; | |
489 | goto free_st; | |
490 | } | |
491 | ||
492 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL && | |
493 | !st->ss->write_init_ppl) { | |
494 | pr_err("%s metadata does not support PPL\n", st->ss->name); | |
495 | ret = 1; | |
496 | goto free_info; | |
497 | } | |
498 | ||
499 | if (sra->array.level != 5) { | |
500 | pr_err("Operation not supported for array level %d\n", | |
501 | sra->array.level); | |
502 | ret = 1; | |
503 | goto free_info; | |
504 | } | |
505 | ||
506 | if (sra->consistency_policy == (unsigned)s->consistency_policy) { | |
507 | pr_err("Consistency policy is already %s\n", | |
508 | map_num_s(consistency_policies, s->consistency_policy)); | |
509 | ret = 1; | |
510 | goto free_info; | |
511 | } else if (sra->consistency_policy != CONSISTENCY_POLICY_RESYNC && | |
512 | sra->consistency_policy != CONSISTENCY_POLICY_PPL) { | |
513 | pr_err("Current consistency policy is %s, cannot change to %s\n", | |
514 | map_num_s(consistency_policies, sra->consistency_policy), | |
515 | map_num_s(consistency_policies, s->consistency_policy)); | |
516 | ret = 1; | |
517 | goto free_info; | |
518 | } | |
519 | ||
520 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
521 | if (sysfs_get_str(sra, NULL, "sync_action", buf, sizeof(buf)) <= 0) { | |
522 | ret = 1; | |
523 | goto free_info; | |
524 | } else if (strcmp(buf, "reshape\n") == 0) { | |
525 | pr_err("PPL cannot be enabled when reshape is in progress\n"); | |
526 | ret = 1; | |
527 | goto free_info; | |
528 | } | |
529 | } | |
530 | ||
531 | if (subarray) { | |
532 | enum update_opt update; | |
533 | ||
534 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) | |
535 | update = UOPT_PPL; | |
536 | else | |
537 | update = UOPT_NO_PPL; | |
538 | ||
539 | sprintf(container_dev, "/dev/%s", st->container_devnm); | |
540 | ||
541 | ret = Update_subarray(container_dev, subarray, update, NULL, | |
542 | c->verbose); | |
543 | if (ret) | |
544 | goto free_info; | |
545 | } | |
546 | ||
547 | if (s->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
548 | struct mdinfo info; | |
549 | ||
550 | if (subarray) { | |
551 | struct mdinfo *mdi; | |
552 | int cfd; | |
553 | ||
554 | cfd = open(container_dev, O_RDWR|O_EXCL); | |
555 | if (cfd < 0) { | |
556 | pr_err("Failed to open %s\n", container_dev); | |
557 | ret = 1; | |
558 | goto free_info; | |
559 | } | |
560 | ||
561 | ret = st->ss->load_container(st, cfd, st->container_devnm); | |
562 | close(cfd); | |
563 | ||
564 | if (ret) { | |
565 | pr_err("Cannot read superblock for %s\n", | |
566 | container_dev); | |
567 | goto free_info; | |
568 | } | |
569 | ||
570 | mdi = st->ss->container_content(st, subarray); | |
571 | info = *mdi; | |
572 | free(mdi); | |
573 | } | |
574 | ||
575 | for (sd = sra->devs; sd; sd = sd->next) { | |
576 | int dfd; | |
577 | char *devpath; | |
578 | ||
579 | devpath = map_dev(sd->disk.major, sd->disk.minor, 0); | |
580 | dfd = dev_open(devpath, O_RDWR); | |
581 | if (dfd < 0) { | |
582 | pr_err("Failed to open %s\n", devpath); | |
583 | ret = 1; | |
584 | goto free_info; | |
585 | } | |
586 | ||
587 | if (!subarray) { | |
588 | ret = st->ss->load_super(st, dfd, NULL); | |
589 | if (ret) { | |
590 | pr_err("Failed to load super-block.\n"); | |
591 | close(dfd); | |
592 | goto free_info; | |
593 | } | |
594 | ||
595 | ret = st->ss->update_super(st, sra, UOPT_PPL, | |
596 | devname, | |
597 | c->verbose, 0, NULL); | |
598 | if (ret) { | |
599 | close(dfd); | |
600 | st->ss->free_super(st); | |
601 | goto free_info; | |
602 | } | |
603 | st->ss->getinfo_super(st, &info, NULL); | |
604 | } | |
605 | ||
606 | ret |= sysfs_set_num(sra, sd, "ppl_sector", | |
607 | info.ppl_sector); | |
608 | ret |= sysfs_set_num(sra, sd, "ppl_size", | |
609 | info.ppl_size); | |
610 | ||
611 | if (ret) { | |
612 | pr_err("Failed to set PPL attributes for %s\n", | |
613 | sd->sys_name); | |
614 | close(dfd); | |
615 | st->ss->free_super(st); | |
616 | goto free_info; | |
617 | } | |
618 | ||
619 | ret = st->ss->write_init_ppl(st, &info, dfd); | |
620 | if (ret) | |
621 | pr_err("Failed to write PPL\n"); | |
622 | ||
623 | close(dfd); | |
624 | ||
625 | if (!subarray) | |
626 | st->ss->free_super(st); | |
627 | ||
628 | if (ret) | |
629 | goto free_info; | |
630 | } | |
631 | } | |
632 | ||
633 | ret = sysfs_set_str(sra, NULL, "consistency_policy", | |
634 | map_num_s(consistency_policies, | |
635 | s->consistency_policy)); | |
636 | if (ret) | |
637 | pr_err("Failed to change array consistency policy\n"); | |
638 | ||
639 | free_info: | |
640 | sysfs_free(sra); | |
641 | free_st: | |
642 | free(st); | |
643 | free(subarray); | |
644 | ||
645 | return ret; | |
646 | } | |
647 | ||
648 | /* | |
649 | * When reshaping an array we might need to backup some data. | |
650 | * This is written to all spares with a 'super_block' describing it. | |
651 | * The superblock goes 4K from the end of the used space on the | |
652 | * device. | |
653 | * It if written after the backup is complete. | |
654 | * It has the following structure. | |
655 | */ | |
656 | ||
657 | static struct mdp_backup_super { | |
658 | char magic[16]; /* md_backup_data-1 or -2 */ | |
659 | __u8 set_uuid[16]; | |
660 | __u64 mtime; | |
661 | /* start/sizes in 512byte sectors */ | |
662 | __u64 devstart; /* address on backup device/file of data */ | |
663 | __u64 arraystart; | |
664 | __u64 length; | |
665 | __u32 sb_csum; /* csum of preceeding bytes. */ | |
666 | __u32 pad1; | |
667 | __u64 devstart2; /* offset in to data of second section */ | |
668 | __u64 arraystart2; | |
669 | __u64 length2; | |
670 | __u32 sb_csum2; /* csum of preceeding bytes. */ | |
671 | __u8 pad[512-68-32]; | |
672 | } __attribute__((aligned(512))) bsb, bsb2; | |
673 | ||
674 | static __u32 bsb_csum(char *buf, int len) | |
675 | { | |
676 | int i; | |
677 | int csum = 0; | |
678 | for (i = 0; i < len; i++) | |
679 | csum = (csum<<3) + buf[0]; | |
680 | return __cpu_to_le32(csum); | |
681 | } | |
682 | ||
683 | static int check_idle(struct supertype *st) | |
684 | { | |
685 | /* Check that all member arrays for this container, or the | |
686 | * container of this array, are idle | |
687 | */ | |
688 | char *container = (st->container_devnm[0] | |
689 | ? st->container_devnm : st->devnm); | |
690 | struct mdstat_ent *ent, *e; | |
691 | int is_idle = 1; | |
692 | ||
693 | ent = mdstat_read(0, 0); | |
694 | for (e = ent ; e; e = e->next) { | |
695 | if (!is_container_member(e, container)) | |
696 | continue; | |
697 | /* frozen array is not idle*/ | |
698 | if (e->percent >= 0 || e->metadata_version[9] == '-') { | |
699 | is_idle = 0; | |
700 | break; | |
701 | } | |
702 | } | |
703 | free_mdstat(ent); | |
704 | return is_idle; | |
705 | } | |
706 | ||
707 | static int freeze_container(struct supertype *st) | |
708 | { | |
709 | char *container = (st->container_devnm[0] | |
710 | ? st->container_devnm : st->devnm); | |
711 | ||
712 | if (!check_idle(st)) | |
713 | return -1; | |
714 | ||
715 | if (block_monitor(container, 1)) { | |
716 | pr_err("failed to freeze container\n"); | |
717 | return -2; | |
718 | } | |
719 | ||
720 | return 1; | |
721 | } | |
722 | ||
723 | static void unfreeze_container(struct supertype *st) | |
724 | { | |
725 | char *container = (st->container_devnm[0] | |
726 | ? st->container_devnm : st->devnm); | |
727 | ||
728 | unblock_monitor(container, 1); | |
729 | } | |
730 | ||
731 | static int freeze(struct supertype *st) | |
732 | { | |
733 | /* Try to freeze resync/rebuild on this array/container. | |
734 | * Return -1 if the array is busy, | |
735 | * return -2 container cannot be frozen, | |
736 | * return 0 if this kernel doesn't support 'frozen' | |
737 | * return 1 if it worked. | |
738 | */ | |
739 | if (st->ss->external) | |
740 | return freeze_container(st); | |
741 | else { | |
742 | struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION); | |
743 | int err; | |
744 | char buf[SYSFS_MAX_BUF_SIZE]; | |
745 | ||
746 | if (!sra) | |
747 | return -1; | |
748 | /* Need to clear any 'read-auto' status */ | |
749 | if (sysfs_get_str(sra, NULL, "array_state", buf, sizeof(buf)) > 0 && | |
750 | strncmp(buf, "read-auto", 9) == 0) | |
751 | sysfs_set_str(sra, NULL, "array_state", "clean"); | |
752 | ||
753 | err = sysfs_freeze_array(sra); | |
754 | sysfs_free(sra); | |
755 | return err; | |
756 | } | |
757 | } | |
758 | ||
759 | static void unfreeze(struct supertype *st) | |
760 | { | |
761 | if (st->ss->external) | |
762 | return unfreeze_container(st); | |
763 | else { | |
764 | struct mdinfo *sra = sysfs_read(-1, st->devnm, GET_VERSION); | |
765 | char buf[SYSFS_MAX_BUF_SIZE]; | |
766 | ||
767 | if (sra && | |
768 | sysfs_get_str(sra, NULL, "sync_action", buf, sizeof(buf)) > 0 && | |
769 | strcmp(buf, "frozen\n") == 0) | |
770 | sysfs_set_str(sra, NULL, "sync_action", "idle"); | |
771 | sysfs_free(sra); | |
772 | } | |
773 | } | |
774 | ||
775 | static void wait_reshape(struct mdinfo *sra) | |
776 | { | |
777 | int fd = sysfs_get_fd(sra, NULL, "sync_action"); | |
778 | char action[SYSFS_MAX_BUF_SIZE]; | |
779 | ||
780 | if (fd < 0) | |
781 | return; | |
782 | ||
783 | while (sysfs_fd_get_str(fd, action, sizeof(action)) > 0 && | |
784 | strncmp(action, "reshape", 7) == 0) | |
785 | sysfs_wait(fd, NULL); | |
786 | close(fd); | |
787 | } | |
788 | ||
789 | static int reshape_super(struct supertype *st, struct shape *shape, struct context *c) | |
790 | { | |
791 | /* nothing extra to check in the native case */ | |
792 | if (!st->ss->external) | |
793 | return 0; | |
794 | if (!st->ss->reshape_super || !st->ss->manage_reshape) { | |
795 | pr_err("%s metadata does not support reshape\n", | |
796 | st->ss->name); | |
797 | return 1; | |
798 | } | |
799 | ||
800 | return st->ss->reshape_super(st, shape, c); | |
801 | } | |
802 | ||
803 | /** | |
804 | * reshape_super_size() - Reshape array, size only. | |
805 | * | |
806 | * @st: supertype. | |
807 | * @devname: device name. | |
808 | * @size: component size. | |
809 | * @dir metadata changes direction | |
810 | * Returns: 0 on success, 1 otherwise. | |
811 | * | |
812 | * This function is solely used to change size of the volume. | |
813 | * Setting size is not valid for container. | |
814 | * Size is only change that can be rolled back, thus the @dir param. | |
815 | */ | |
816 | static int reshape_super_size(struct supertype *st, char *devname, | |
817 | unsigned long long size, change_dir_t direction, | |
818 | struct context *c) | |
819 | { | |
820 | struct shape shape = {0}; | |
821 | ||
822 | shape.level = UnSet; | |
823 | shape.layout = UnSet; | |
824 | shape.delta_disks = UnSet; | |
825 | shape.dev = devname; | |
826 | shape.size = size; | |
827 | shape.direction = direction; | |
828 | ||
829 | return reshape_super(st, &shape, c); | |
830 | } | |
831 | ||
832 | /** | |
833 | * reshape_super_non_size() - Reshape array, non size changes. | |
834 | * | |
835 | * @st: supertype. | |
836 | * @devname: device name. | |
837 | * @info: superblock info. | |
838 | * Returns: 0 on success, 1 otherwise. | |
839 | * | |
840 | * This function is used for any external array changes but size. | |
841 | * It handles both volumes and containers. | |
842 | * For changes other than size, rollback is not possible. | |
843 | */ | |
844 | static int reshape_super_non_size(struct supertype *st, char *devname, | |
845 | struct mdinfo *info, struct context *c) | |
846 | { | |
847 | struct shape shape = {0}; | |
848 | /* Size already set to zero, not updating size */ | |
849 | shape.level = info->new_level; | |
850 | shape.layout = info->new_layout; | |
851 | shape.chunk = info->new_chunk; | |
852 | shape.raiddisks = info->array.raid_disks; | |
853 | shape.delta_disks = info->delta_disks; | |
854 | shape.dev = devname; | |
855 | /* Rollback not possible for non size changes */ | |
856 | shape.direction = APPLY_METADATA_CHANGES; | |
857 | ||
858 | return reshape_super(st, &shape, c); | |
859 | } | |
860 | ||
861 | static void sync_metadata(struct supertype *st) | |
862 | { | |
863 | if (st->ss->external) { | |
864 | if (st->update_tail) { | |
865 | flush_metadata_updates(st); | |
866 | st->update_tail = &st->updates; | |
867 | } else | |
868 | st->ss->sync_metadata(st); | |
869 | } | |
870 | } | |
871 | ||
872 | static int subarray_set_num(char *container, struct mdinfo *sra, char *name, int n) | |
873 | { | |
874 | /* when dealing with external metadata subarrays we need to be | |
875 | * prepared to handle EAGAIN. The kernel may need to wait for | |
876 | * mdmon to mark the array active so the kernel can handle | |
877 | * allocations/writeback when preparing the reshape action | |
878 | * (md_allow_write()). We temporarily disable safe_mode_delay | |
879 | * to close a race with the array_state going clean before the | |
880 | * next write to raid_disks / stripe_cache_size | |
881 | */ | |
882 | char safe[SYSFS_MAX_BUF_SIZE]; | |
883 | int rc; | |
884 | ||
885 | /* only 'raid_disks' and 'stripe_cache_size' trigger md_allow_write */ | |
886 | if (!container || | |
887 | (strcmp(name, "raid_disks") != 0 && | |
888 | strcmp(name, "stripe_cache_size") != 0)) | |
889 | return sysfs_set_num(sra, NULL, name, n); | |
890 | ||
891 | rc = sysfs_get_str(sra, NULL, "safe_mode_delay", safe, sizeof(safe)); | |
892 | if (rc <= 0) | |
893 | return -1; | |
894 | sysfs_set_num(sra, NULL, "safe_mode_delay", 0); | |
895 | rc = sysfs_set_num(sra, NULL, name, n); | |
896 | if (rc < 0 && errno == EAGAIN) { | |
897 | ping_monitor(container); | |
898 | /* if we get EAGAIN here then the monitor is not active | |
899 | * so stop trying | |
900 | */ | |
901 | rc = sysfs_set_num(sra, NULL, name, n); | |
902 | } | |
903 | sysfs_set_str(sra, NULL, "safe_mode_delay", safe); | |
904 | return rc; | |
905 | } | |
906 | ||
907 | int start_reshape(struct mdinfo *sra, int already_running, | |
908 | int before_data_disks, int data_disks, struct supertype *st) | |
909 | { | |
910 | int err; | |
911 | unsigned long long sync_max_to_set; | |
912 | ||
913 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
914 | err = sysfs_set_num(sra, NULL, "suspend_hi", sra->reshape_progress); | |
915 | err = err ?: sysfs_set_num(sra, NULL, "suspend_lo", | |
916 | sra->reshape_progress); | |
917 | if (before_data_disks <= data_disks) | |
918 | sync_max_to_set = sra->reshape_progress / data_disks; | |
919 | else | |
920 | sync_max_to_set = (sra->component_size * data_disks | |
921 | - sra->reshape_progress) / data_disks; | |
922 | ||
923 | if (!already_running) | |
924 | sysfs_set_num(sra, NULL, "sync_min", sync_max_to_set); | |
925 | ||
926 | if (st->ss->external) | |
927 | err = err ?: sysfs_set_num(sra, NULL, "sync_max", sync_max_to_set); | |
928 | else | |
929 | err = err ?: sysfs_set_str(sra, NULL, "sync_max", "max"); | |
930 | ||
931 | if (!already_running && err == 0) { | |
932 | int cnt = 5; | |
933 | do { | |
934 | err = sysfs_set_str(sra, NULL, "sync_action", | |
935 | "reshape"); | |
936 | if (err) | |
937 | sleep_for(1, 0, true); | |
938 | } while (err && errno == EBUSY && cnt-- > 0); | |
939 | } | |
940 | return err; | |
941 | } | |
942 | ||
943 | void abort_reshape(struct mdinfo *sra) | |
944 | { | |
945 | sysfs_set_str(sra, NULL, "sync_action", "idle"); | |
946 | /* | |
947 | * Prior to kernel commit: 23ddff3792f6 ("md: allow suspend_lo and | |
948 | * suspend_hi to decrease as well as increase.") | |
949 | * you could only increase suspend_{lo,hi} unless the region they | |
950 | * covered was empty. So to reset to 0, you need to push suspend_lo | |
951 | * up past suspend_hi first. So to maximize the chance of mdadm | |
952 | * working on all kernels, we want to keep doing that. | |
953 | */ | |
954 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
955 | sysfs_set_num(sra, NULL, "suspend_hi", 0); | |
956 | sysfs_set_num(sra, NULL, "suspend_lo", 0); | |
957 | sysfs_set_num(sra, NULL, "sync_min", 0); | |
958 | // It isn't safe to reset sync_max as we aren't monitoring. | |
959 | // Array really should be stopped at this point. | |
960 | } | |
961 | ||
962 | int remove_disks_for_takeover(struct supertype *st, | |
963 | struct mdinfo *sra, | |
964 | int layout) | |
965 | { | |
966 | int nr_of_copies; | |
967 | struct mdinfo *remaining; | |
968 | int slot; | |
969 | ||
970 | if (st->ss->external) { | |
971 | int rv = 0; | |
972 | struct mdinfo *arrays = st->ss->container_content(st, NULL); | |
973 | /* | |
974 | * containter_content returns list of arrays in container | |
975 | * If arrays->next is not NULL it means that there are | |
976 | * 2 arrays in container and operation should be blocked | |
977 | */ | |
978 | if (arrays) { | |
979 | if (arrays->next) | |
980 | rv = 1; | |
981 | sysfs_free(arrays); | |
982 | if (rv) { | |
983 | pr_err("Error. Cannot perform operation on %s- for this operation " | |
984 | "it MUST be single array in container\n", st->devnm); | |
985 | return rv; | |
986 | } | |
987 | } | |
988 | } | |
989 | ||
990 | if (sra->array.level == 10) | |
991 | nr_of_copies = layout & 0xff; | |
992 | else if (sra->array.level == 1) | |
993 | nr_of_copies = sra->array.raid_disks; | |
994 | else | |
995 | return 1; | |
996 | ||
997 | remaining = sra->devs; | |
998 | sra->devs = NULL; | |
999 | /* for each 'copy', select one device and remove from the list. */ | |
1000 | for (slot = 0; slot < sra->array.raid_disks; slot += nr_of_copies) { | |
1001 | struct mdinfo **diskp; | |
1002 | int found = 0; | |
1003 | ||
1004 | /* Find a working device to keep */ | |
1005 | for (diskp = &remaining; *diskp ; diskp = &(*diskp)->next) { | |
1006 | struct mdinfo *disk = *diskp; | |
1007 | ||
1008 | if (disk->disk.raid_disk < slot) | |
1009 | continue; | |
1010 | if (disk->disk.raid_disk >= slot + nr_of_copies) | |
1011 | continue; | |
1012 | if (disk->disk.state & (1<<MD_DISK_REMOVED)) | |
1013 | continue; | |
1014 | if (disk->disk.state & (1<<MD_DISK_FAULTY)) | |
1015 | continue; | |
1016 | if (!(disk->disk.state & (1<<MD_DISK_SYNC))) | |
1017 | continue; | |
1018 | ||
1019 | /* We have found a good disk to use! */ | |
1020 | *diskp = disk->next; | |
1021 | disk->next = sra->devs; | |
1022 | sra->devs = disk; | |
1023 | found = 1; | |
1024 | break; | |
1025 | } | |
1026 | if (!found) | |
1027 | break; | |
1028 | } | |
1029 | ||
1030 | if (slot < sra->array.raid_disks) { | |
1031 | /* didn't find all slots */ | |
1032 | struct mdinfo **e; | |
1033 | e = &remaining; | |
1034 | while (*e) | |
1035 | e = &(*e)->next; | |
1036 | *e = sra->devs; | |
1037 | sra->devs = remaining; | |
1038 | return 1; | |
1039 | } | |
1040 | ||
1041 | /* Remove all 'remaining' devices from the array */ | |
1042 | while (remaining) { | |
1043 | struct mdinfo *sd = remaining; | |
1044 | remaining = sd->next; | |
1045 | ||
1046 | sysfs_set_str(sra, sd, "state", "faulty"); | |
1047 | sysfs_set_str(sra, sd, "slot", STR_COMMON_NONE); | |
1048 | /* for external metadata disks should be removed in mdmon */ | |
1049 | if (!st->ss->external) | |
1050 | sysfs_set_str(sra, sd, "state", "remove"); | |
1051 | sd->disk.state |= (1<<MD_DISK_REMOVED); | |
1052 | sd->disk.state &= ~(1<<MD_DISK_SYNC); | |
1053 | sd->next = sra->devs; | |
1054 | sra->devs = sd; | |
1055 | } | |
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | void reshape_free_fdlist(int *fdlist, | |
1060 | unsigned long long *offsets, | |
1061 | int size) | |
1062 | { | |
1063 | int i; | |
1064 | ||
1065 | for (i = 0; i < size; i++) | |
1066 | if (fdlist[i] >= 0) | |
1067 | close(fdlist[i]); | |
1068 | ||
1069 | free(fdlist); | |
1070 | free(offsets); | |
1071 | } | |
1072 | ||
1073 | int reshape_prepare_fdlist(char *devname, | |
1074 | struct mdinfo *sra, | |
1075 | int raid_disks, | |
1076 | int nrdisks, | |
1077 | unsigned long blocks, | |
1078 | char *backup_file, | |
1079 | int *fdlist, | |
1080 | unsigned long long *offsets) | |
1081 | { | |
1082 | int d = 0; | |
1083 | struct mdinfo *sd; | |
1084 | ||
1085 | enable_fds(nrdisks); | |
1086 | for (d = 0; d <= nrdisks; d++) | |
1087 | fdlist[d] = -1; | |
1088 | d = raid_disks; | |
1089 | for (sd = sra->devs; sd; sd = sd->next) { | |
1090 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
1091 | continue; | |
1092 | if (sd->disk.state & (1<<MD_DISK_SYNC) && | |
1093 | sd->disk.raid_disk < raid_disks) { | |
1094 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
1095 | fdlist[sd->disk.raid_disk] = dev_open(dn, O_RDONLY); | |
1096 | offsets[sd->disk.raid_disk] = sd->data_offset*512; | |
1097 | if (fdlist[sd->disk.raid_disk] < 0) { | |
1098 | pr_err("%s: cannot open component %s\n", | |
1099 | devname, dn ? dn : "-unknown-"); | |
1100 | d = -1; | |
1101 | goto release; | |
1102 | } | |
1103 | } else if (backup_file == NULL) { | |
1104 | /* spare */ | |
1105 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
1106 | fdlist[d] = dev_open(dn, O_RDWR); | |
1107 | offsets[d] = (sd->data_offset + sra->component_size - blocks - 8)*512; | |
1108 | if (fdlist[d] < 0) { | |
1109 | pr_err("%s: cannot open component %s\n", | |
1110 | devname, dn ? dn : "-unknown-"); | |
1111 | d = -1; | |
1112 | goto release; | |
1113 | } | |
1114 | d++; | |
1115 | } | |
1116 | } | |
1117 | release: | |
1118 | return d; | |
1119 | } | |
1120 | ||
1121 | int reshape_open_backup_file(char *backup_file, | |
1122 | int fd, | |
1123 | char *devname, | |
1124 | long blocks, | |
1125 | int *fdlist, | |
1126 | unsigned long long *offsets, | |
1127 | char *sys_name, | |
1128 | int restart) | |
1129 | { | |
1130 | /* Return 1 on success, 0 on any form of failure */ | |
1131 | /* need to check backup file is large enough */ | |
1132 | char buf[512]; | |
1133 | struct stat stb; | |
1134 | unsigned int dev; | |
1135 | int i; | |
1136 | ||
1137 | *fdlist = open(backup_file, O_RDWR|O_CREAT|(restart ? O_TRUNC : O_EXCL), | |
1138 | S_IRUSR | S_IWUSR); | |
1139 | *offsets = 8 * 512; | |
1140 | if (*fdlist < 0) { | |
1141 | pr_err("%s: cannot create backup file %s: %s\n", | |
1142 | devname, backup_file, strerror(errno)); | |
1143 | return 0; | |
1144 | } | |
1145 | /* Guard against backup file being on array device. | |
1146 | * If array is partitioned or if LVM etc is in the | |
1147 | * way this will not notice, but it is better than | |
1148 | * nothing. | |
1149 | */ | |
1150 | if (fstat(*fdlist, &stb) != 0) | |
1151 | goto error; | |
1152 | dev = stb.st_dev; | |
1153 | if (fstat(fd, &stb) != 0) | |
1154 | goto error; | |
1155 | if (stb.st_rdev == dev) { | |
1156 | pr_err("backup file must NOT be on the array being reshaped.\n"); | |
1157 | goto error; | |
1158 | } | |
1159 | ||
1160 | memset(buf, 0, 512); | |
1161 | for (i=0; i < blocks + 8 ; i++) { | |
1162 | if (write(*fdlist, buf, 512) != 512) { | |
1163 | pr_err("%s: cannot create backup file %s: %s\n", | |
1164 | devname, backup_file, strerror(errno)); | |
1165 | return 0; | |
1166 | } | |
1167 | } | |
1168 | if (fsync(*fdlist) != 0) { | |
1169 | pr_err("%s: cannot create backup file %s: %s\n", | |
1170 | devname, backup_file, strerror(errno)); | |
1171 | return 0; | |
1172 | } | |
1173 | ||
1174 | if (!restart && strncmp(backup_file, MAP_DIR, strlen(MAP_DIR)) != 0) { | |
1175 | char *bu = make_backup(sys_name); | |
1176 | if (symlink(backup_file, bu)) | |
1177 | pr_err("Recording backup file in " MAP_DIR " failed: %s\n", | |
1178 | strerror(errno)); | |
1179 | free(bu); | |
1180 | } | |
1181 | ||
1182 | return 1; | |
1183 | error: | |
1184 | close(*fdlist); | |
1185 | return 0; | |
1186 | } | |
1187 | ||
1188 | unsigned long compute_backup_blocks(int nchunk, int ochunk, | |
1189 | unsigned int ndata, unsigned int odata) | |
1190 | { | |
1191 | unsigned long a, b, blocks; | |
1192 | /* So how much do we need to backup. | |
1193 | * We need an amount of data which is both a whole number of | |
1194 | * old stripes and a whole number of new stripes. | |
1195 | * So LCM for (chunksize*datadisks). | |
1196 | */ | |
1197 | a = (ochunk/512) * odata; | |
1198 | b = (nchunk/512) * ndata; | |
1199 | /* Find GCD */ | |
1200 | a = GCD(a, b); | |
1201 | /* LCM == product / GCD */ | |
1202 | blocks = (unsigned long)(ochunk/512) * (unsigned long)(nchunk/512) * | |
1203 | odata * ndata / a; | |
1204 | ||
1205 | return blocks; | |
1206 | } | |
1207 | ||
1208 | char *analyse_change(char *devname, struct mdinfo *info, struct reshape *re) | |
1209 | { | |
1210 | /* Based on the current array state in info->array and | |
1211 | * the changes in info->new_* etc, determine: | |
1212 | * - whether the change is possible | |
1213 | * - Intermediate level/raid_disks/layout | |
1214 | * - whether a restriping reshape is needed | |
1215 | * - number of sectors in minimum change unit. This | |
1216 | * will cover a whole number of stripes in 'before' and | |
1217 | * 'after'. | |
1218 | * | |
1219 | * Return message if the change should be rejected | |
1220 | * NULL if the change can be achieved | |
1221 | * | |
1222 | * This can be called as part of starting a reshape, or | |
1223 | * when assembling an array that is undergoing reshape. | |
1224 | */ | |
1225 | int near, far, offset, copies; | |
1226 | int new_disks; | |
1227 | int old_chunk, new_chunk; | |
1228 | /* delta_parity records change in number of devices | |
1229 | * caused by level change | |
1230 | */ | |
1231 | int delta_parity = 0; | |
1232 | ||
1233 | memset(re, 0, sizeof(*re)); | |
1234 | ||
1235 | /* If a new level not explicitly given, we assume no-change */ | |
1236 | if (info->new_level == UnSet) | |
1237 | info->new_level = info->array.level; | |
1238 | ||
1239 | if (info->new_chunk) | |
1240 | switch (info->new_level) { | |
1241 | case 0: | |
1242 | case 4: | |
1243 | case 5: | |
1244 | case 6: | |
1245 | case 10: | |
1246 | /* chunk size is meaningful, must divide component_size | |
1247 | * evenly | |
1248 | */ | |
1249 | if (info->component_size % (info->new_chunk/512)) { | |
1250 | unsigned long long shrink = info->component_size; | |
1251 | shrink &= ~(unsigned long long)(info->new_chunk/512-1); | |
1252 | pr_err("New chunk size (%dK) does not evenly divide device size (%lluk)\n", | |
1253 | info->new_chunk/1024, info->component_size/2); | |
1254 | pr_err("After shrinking any filesystem, \"mdadm --grow %s --size %llu\"\n", | |
1255 | devname, shrink/2); | |
1256 | pr_err("will shrink the array so the given chunk size would work.\n"); | |
1257 | return ""; | |
1258 | } | |
1259 | break; | |
1260 | default: | |
1261 | return "chunk size not meaningful for this level"; | |
1262 | } | |
1263 | else | |
1264 | info->new_chunk = info->array.chunk_size; | |
1265 | ||
1266 | switch (info->array.level) { | |
1267 | default: | |
1268 | return "No reshape is possibly for this RAID level"; | |
1269 | case LEVEL_LINEAR: | |
1270 | if (info->delta_disks != UnSet) | |
1271 | return "Only --add is supported for LINEAR, setting --raid-disks is not needed"; | |
1272 | else | |
1273 | return "Only --add is supported for LINEAR, other --grow options are not meaningful"; | |
1274 | case 1: | |
1275 | /* RAID1 can convert to RAID1 with different disks, or | |
1276 | * raid5 with 2 disks, or | |
1277 | * raid0 with 1 disk | |
1278 | */ | |
1279 | if (info->new_level > 1 && (info->component_size & 7)) | |
1280 | return "Cannot convert RAID1 of this size - reduce size to multiple of 4K first."; | |
1281 | if (info->new_level == 0) { | |
1282 | if (info->delta_disks != UnSet && | |
1283 | info->delta_disks != 0) | |
1284 | return "Cannot change number of disks with RAID1->RAID0 conversion"; | |
1285 | re->level = 0; | |
1286 | re->before.data_disks = 1; | |
1287 | re->after.data_disks = 1; | |
1288 | return NULL; | |
1289 | } | |
1290 | if (info->new_level == 1) { | |
1291 | if (info->delta_disks == UnSet) | |
1292 | /* Don't know what to do */ | |
1293 | return "no change requested for Growing RAID1"; | |
1294 | re->level = 1; | |
1295 | return NULL; | |
1296 | } | |
1297 | if (info->array.raid_disks != 2 && info->new_level == 5) | |
1298 | return "Can only convert a 2-device array to RAID5"; | |
1299 | if (info->array.raid_disks == 2 && info->new_level == 5) { | |
1300 | re->level = 5; | |
1301 | re->before.data_disks = 1; | |
1302 | if (info->delta_disks != UnSet && | |
1303 | info->delta_disks != 0) | |
1304 | re->after.data_disks = 1 + info->delta_disks; | |
1305 | else | |
1306 | re->after.data_disks = 1; | |
1307 | if (re->after.data_disks < 1) | |
1308 | return "Number of disks too small for RAID5"; | |
1309 | ||
1310 | re->before.layout = ALGORITHM_LEFT_SYMMETRIC; | |
1311 | info->array.chunk_size = 65536; | |
1312 | break; | |
1313 | } | |
1314 | /* Could do some multi-stage conversions, but leave that to | |
1315 | * later. | |
1316 | */ | |
1317 | return "Impossibly level change request for RAID1"; | |
1318 | ||
1319 | case 10: | |
1320 | /* RAID10 can be converted from near mode to | |
1321 | * RAID0 by removing some devices. | |
1322 | * It can also be reshaped if the kernel supports | |
1323 | * new_data_offset. | |
1324 | */ | |
1325 | switch (info->new_level) { | |
1326 | case 0: | |
1327 | if ((info->array.layout & ~0xff) != 0x100) | |
1328 | return "Cannot Grow RAID10 with far/offset layout"; | |
1329 | /* | |
1330 | * number of devices must be multiple of | |
1331 | * number of copies | |
1332 | */ | |
1333 | if (info->array.raid_disks % | |
1334 | (info->array.layout & 0xff)) | |
1335 | return "RAID10 layout too complex for Grow operation"; | |
1336 | ||
1337 | new_disks = (info->array.raid_disks / | |
1338 | (info->array.layout & 0xff)); | |
1339 | if (info->delta_disks == UnSet) | |
1340 | info->delta_disks = (new_disks | |
1341 | - info->array.raid_disks); | |
1342 | ||
1343 | if (info->delta_disks != | |
1344 | new_disks - info->array.raid_disks) | |
1345 | return "New number of raid-devices impossible for RAID10"; | |
1346 | if (info->new_chunk && | |
1347 | info->new_chunk != info->array.chunk_size) | |
1348 | return "Cannot change chunk-size with RAID10 Grow"; | |
1349 | ||
1350 | /* looks good */ | |
1351 | re->level = 0; | |
1352 | re->before.data_disks = new_disks; | |
1353 | re->after.data_disks = re->before.data_disks; | |
1354 | return NULL; | |
1355 | ||
1356 | case 10: | |
1357 | near = info->array.layout & 0xff; | |
1358 | far = (info->array.layout >> 8) & 0xff; | |
1359 | offset = info->array.layout & 0x10000; | |
1360 | if (far > 1 && !offset) | |
1361 | return "Cannot reshape RAID10 in far-mode"; | |
1362 | copies = near * far; | |
1363 | ||
1364 | old_chunk = info->array.chunk_size * far; | |
1365 | ||
1366 | if (info->new_layout == UnSet) | |
1367 | info->new_layout = info->array.layout; | |
1368 | else { | |
1369 | near = info->new_layout & 0xff; | |
1370 | far = (info->new_layout >> 8) & 0xff; | |
1371 | offset = info->new_layout & 0x10000; | |
1372 | if (far > 1 && !offset) | |
1373 | return "Cannot reshape RAID10 to far-mode"; | |
1374 | if (near * far != copies) | |
1375 | return "Cannot change number of copies when reshaping RAID10"; | |
1376 | } | |
1377 | if (info->delta_disks == UnSet) | |
1378 | info->delta_disks = 0; | |
1379 | new_disks = (info->array.raid_disks + | |
1380 | info->delta_disks); | |
1381 | ||
1382 | new_chunk = info->new_chunk * far; | |
1383 | ||
1384 | re->level = 10; | |
1385 | re->before.layout = info->array.layout; | |
1386 | re->before.data_disks = info->array.raid_disks; | |
1387 | re->after.layout = info->new_layout; | |
1388 | re->after.data_disks = new_disks; | |
1389 | /* For RAID10 we don't do backup but do allow reshape, | |
1390 | * so set backup_blocks to INVALID_SECTORS rather than | |
1391 | * zero. | |
1392 | * And there is no need to synchronise stripes on both | |
1393 | * 'old' and 'new'. So the important | |
1394 | * number is the minimum data_offset difference | |
1395 | * which is the larger of (offset copies * chunk). | |
1396 | */ | |
1397 | re->backup_blocks = INVALID_SECTORS; | |
1398 | re->min_offset_change = max(old_chunk, new_chunk) / 512; | |
1399 | if (new_disks < re->before.data_disks && | |
1400 | info->space_after < re->min_offset_change) | |
1401 | /* Reduce component size by one chunk */ | |
1402 | re->new_size = (info->component_size - | |
1403 | re->min_offset_change); | |
1404 | else | |
1405 | re->new_size = info->component_size; | |
1406 | re->new_size = re->new_size * new_disks / copies; | |
1407 | return NULL; | |
1408 | ||
1409 | default: | |
1410 | return "RAID10 can only be changed to RAID0"; | |
1411 | } | |
1412 | case 0: | |
1413 | /* RAID0 can be converted to RAID10, or to RAID456 */ | |
1414 | if (info->new_level == 10) { | |
1415 | if (info->new_layout == UnSet && | |
1416 | info->delta_disks == UnSet) { | |
1417 | /* Assume near=2 layout */ | |
1418 | info->new_layout = 0x102; | |
1419 | info->delta_disks = info->array.raid_disks; | |
1420 | } | |
1421 | if (info->new_layout == UnSet) { | |
1422 | int copies = 1 + (info->delta_disks | |
1423 | / info->array.raid_disks); | |
1424 | if (info->array.raid_disks * (copies-1) != | |
1425 | info->delta_disks) | |
1426 | return "Impossible number of devices for RAID0->RAID10"; | |
1427 | info->new_layout = 0x100 + copies; | |
1428 | } | |
1429 | if (info->delta_disks == UnSet) { | |
1430 | int copies = info->new_layout & 0xff; | |
1431 | if (info->new_layout != 0x100 + copies) | |
1432 | return "New layout impossible for RAID0->RAID10";; | |
1433 | info->delta_disks = (copies - 1) * | |
1434 | info->array.raid_disks; | |
1435 | } | |
1436 | if (info->new_chunk && | |
1437 | info->new_chunk != info->array.chunk_size) | |
1438 | return "Cannot change chunk-size with RAID0->RAID10"; | |
1439 | /* looks good */ | |
1440 | re->level = 10; | |
1441 | re->before.data_disks = (info->array.raid_disks + | |
1442 | info->delta_disks); | |
1443 | re->after.data_disks = re->before.data_disks; | |
1444 | re->before.layout = info->new_layout; | |
1445 | return NULL; | |
1446 | } | |
1447 | ||
1448 | /* RAID0 can also covert to RAID0/4/5/6 by first converting to | |
1449 | * a raid4 style layout of the final level. | |
1450 | */ | |
1451 | switch (info->new_level) { | |
1452 | case 4: | |
1453 | delta_parity = 1; | |
1454 | case 0: | |
1455 | re->level = 4; | |
1456 | re->before.layout = 0; | |
1457 | break; | |
1458 | case 5: | |
1459 | delta_parity = 1; | |
1460 | re->level = 5; | |
1461 | re->before.layout = ALGORITHM_PARITY_N; | |
1462 | if (info->new_layout == UnSet) | |
1463 | info->new_layout = map_name(r5layout, "default"); | |
1464 | break; | |
1465 | case 6: | |
1466 | delta_parity = 2; | |
1467 | re->level = 6; | |
1468 | re->before.layout = ALGORITHM_PARITY_N; | |
1469 | if (info->new_layout == UnSet) | |
1470 | info->new_layout = map_name(r6layout, "default"); | |
1471 | break; | |
1472 | default: | |
1473 | return "Impossible level change requested"; | |
1474 | } | |
1475 | re->before.data_disks = info->array.raid_disks; | |
1476 | /* determining 'after' layout happens outside this 'switch' */ | |
1477 | break; | |
1478 | ||
1479 | case 4: | |
1480 | info->array.layout = ALGORITHM_PARITY_N; | |
1481 | case 5: | |
1482 | switch (info->new_level) { | |
1483 | case 0: | |
1484 | delta_parity = -1; | |
1485 | case 4: | |
1486 | re->level = info->array.level; | |
1487 | re->before.data_disks = info->array.raid_disks - 1; | |
1488 | re->before.layout = info->array.layout; | |
1489 | break; | |
1490 | case 5: | |
1491 | re->level = 5; | |
1492 | re->before.data_disks = info->array.raid_disks - 1; | |
1493 | re->before.layout = info->array.layout; | |
1494 | break; | |
1495 | case 6: | |
1496 | delta_parity = 1; | |
1497 | re->level = 6; | |
1498 | re->before.data_disks = info->array.raid_disks - 1; | |
1499 | switch (info->array.layout) { | |
1500 | case ALGORITHM_LEFT_ASYMMETRIC: | |
1501 | re->before.layout = ALGORITHM_LEFT_ASYMMETRIC_6; | |
1502 | break; | |
1503 | case ALGORITHM_RIGHT_ASYMMETRIC: | |
1504 | re->before.layout = ALGORITHM_RIGHT_ASYMMETRIC_6; | |
1505 | break; | |
1506 | case ALGORITHM_LEFT_SYMMETRIC: | |
1507 | re->before.layout = ALGORITHM_LEFT_SYMMETRIC_6; | |
1508 | break; | |
1509 | case ALGORITHM_RIGHT_SYMMETRIC: | |
1510 | re->before.layout = ALGORITHM_RIGHT_SYMMETRIC_6; | |
1511 | break; | |
1512 | case ALGORITHM_PARITY_0: | |
1513 | re->before.layout = ALGORITHM_PARITY_0_6; | |
1514 | break; | |
1515 | case ALGORITHM_PARITY_N: | |
1516 | re->before.layout = ALGORITHM_PARITY_N_6; | |
1517 | break; | |
1518 | default: | |
1519 | return "Cannot convert an array with this layout"; | |
1520 | } | |
1521 | break; | |
1522 | case 1: | |
1523 | if (info->array.raid_disks != 2) | |
1524 | return "Can only convert a 2-device array to RAID1"; | |
1525 | if (info->delta_disks != UnSet && | |
1526 | info->delta_disks != 0) | |
1527 | return "Cannot set raid_disk when converting RAID5->RAID1"; | |
1528 | re->level = 1; | |
1529 | info->new_chunk = 0; | |
1530 | return NULL; | |
1531 | default: | |
1532 | return "Impossible level change requested"; | |
1533 | } | |
1534 | break; | |
1535 | case 6: | |
1536 | switch (info->new_level) { | |
1537 | case 4: | |
1538 | case 5: | |
1539 | delta_parity = -1; | |
1540 | case 6: | |
1541 | re->level = 6; | |
1542 | re->before.data_disks = info->array.raid_disks - 2; | |
1543 | re->before.layout = info->array.layout; | |
1544 | break; | |
1545 | default: | |
1546 | return "Impossible level change requested"; | |
1547 | } | |
1548 | break; | |
1549 | } | |
1550 | ||
1551 | /* If we reached here then it looks like a re-stripe is | |
1552 | * happening. We have determined the intermediate level | |
1553 | * and initial raid_disks/layout and stored these in 're'. | |
1554 | * | |
1555 | * We need to deduce the final layout that can be atomically | |
1556 | * converted to the end state. | |
1557 | */ | |
1558 | switch (info->new_level) { | |
1559 | case 0: | |
1560 | /* We can only get to RAID0 from RAID4 or RAID5 | |
1561 | * with appropriate layout and one extra device | |
1562 | */ | |
1563 | if (re->level != 4 && re->level != 5) | |
1564 | return "Cannot covert to RAID0 from this level"; | |
1565 | ||
1566 | switch (re->level) { | |
1567 | case 4: | |
1568 | re->before.layout = 0; | |
1569 | re->after.layout = 0; | |
1570 | break; | |
1571 | case 5: | |
1572 | re->after.layout = ALGORITHM_PARITY_N; | |
1573 | break; | |
1574 | } | |
1575 | break; | |
1576 | ||
1577 | case 4: | |
1578 | /* We can only get to RAID4 from RAID5 */ | |
1579 | if (re->level != 4 && re->level != 5) | |
1580 | return "Cannot convert to RAID4 from this level"; | |
1581 | ||
1582 | switch (re->level) { | |
1583 | case 4: | |
1584 | re->after.layout = 0; | |
1585 | break; | |
1586 | case 5: | |
1587 | re->after.layout = ALGORITHM_PARITY_N; | |
1588 | break; | |
1589 | } | |
1590 | break; | |
1591 | ||
1592 | case 5: | |
1593 | /* We get to RAID5 from RAID5 or RAID6 */ | |
1594 | if (re->level != 5 && re->level != 6) | |
1595 | return "Cannot convert to RAID5 from this level"; | |
1596 | ||
1597 | switch (re->level) { | |
1598 | case 5: | |
1599 | if (info->new_layout == UnSet) | |
1600 | re->after.layout = re->before.layout; | |
1601 | else | |
1602 | re->after.layout = info->new_layout; | |
1603 | break; | |
1604 | case 6: | |
1605 | if (info->new_layout == UnSet) | |
1606 | info->new_layout = re->before.layout; | |
1607 | ||
1608 | /* after.layout needs to be raid6 version of new_layout */ | |
1609 | if (info->new_layout == ALGORITHM_PARITY_N) | |
1610 | re->after.layout = ALGORITHM_PARITY_N; | |
1611 | else { | |
1612 | char layout[40]; | |
1613 | char *ls = map_num(r5layout, info->new_layout); | |
1614 | int l; | |
1615 | if (ls) { | |
1616 | /* Current RAID6 layout has a RAID5 | |
1617 | * equivalent - good | |
1618 | */ | |
1619 | snprintf(layout, 40, "%s-6", ls); | |
1620 | l = map_name(r6layout, layout); | |
1621 | if (l == UnSet) | |
1622 | return "Cannot find RAID6 layout to convert to"; | |
1623 | } else { | |
1624 | /* Current RAID6 has no equivalent. | |
1625 | * If it is already a '-6' layout we | |
1626 | * can leave it unchanged, else we must | |
1627 | * fail | |
1628 | */ | |
1629 | ls = map_num(r6layout, | |
1630 | info->new_layout); | |
1631 | if (!ls || | |
1632 | strcmp(ls+strlen(ls)-2, "-6") != 0) | |
1633 | return "Please specify new layout"; | |
1634 | l = info->new_layout; | |
1635 | } | |
1636 | re->after.layout = l; | |
1637 | } | |
1638 | } | |
1639 | break; | |
1640 | ||
1641 | case 6: | |
1642 | /* We must already be at level 6 */ | |
1643 | if (re->level != 6) | |
1644 | return "Impossible level change"; | |
1645 | if (info->new_layout == UnSet) | |
1646 | re->after.layout = info->array.layout; | |
1647 | else | |
1648 | re->after.layout = info->new_layout; | |
1649 | break; | |
1650 | default: | |
1651 | return "Impossible level change requested"; | |
1652 | } | |
1653 | if (info->delta_disks == UnSet) | |
1654 | info->delta_disks = delta_parity; | |
1655 | ||
1656 | re->after.data_disks = | |
1657 | (re->before.data_disks + info->delta_disks - delta_parity); | |
1658 | ||
1659 | switch (re->level) { | |
1660 | case 6: | |
1661 | re->parity = 2; | |
1662 | break; | |
1663 | case 4: | |
1664 | case 5: | |
1665 | re->parity = 1; | |
1666 | break; | |
1667 | default: | |
1668 | re->parity = 0; | |
1669 | break; | |
1670 | } | |
1671 | /* So we have a restripe operation, we need to calculate the number | |
1672 | * of blocks per reshape operation. | |
1673 | */ | |
1674 | re->new_size = info->component_size * re->before.data_disks; | |
1675 | if (info->new_chunk == 0) | |
1676 | info->new_chunk = info->array.chunk_size; | |
1677 | if (re->after.data_disks == re->before.data_disks && | |
1678 | re->after.layout == re->before.layout && | |
1679 | info->new_chunk == info->array.chunk_size) { | |
1680 | /* Nothing to change, can change level immediately. */ | |
1681 | re->level = info->new_level; | |
1682 | re->backup_blocks = 0; | |
1683 | return NULL; | |
1684 | } | |
1685 | if (re->after.data_disks == 1 && re->before.data_disks == 1) { | |
1686 | /* chunk and layout changes make no difference */ | |
1687 | re->level = info->new_level; | |
1688 | re->backup_blocks = 0; | |
1689 | return NULL; | |
1690 | } | |
1691 | ||
1692 | re->backup_blocks = compute_backup_blocks( | |
1693 | info->new_chunk, info->array.chunk_size, | |
1694 | re->after.data_disks, re->before.data_disks); | |
1695 | re->min_offset_change = re->backup_blocks / re->before.data_disks; | |
1696 | ||
1697 | re->new_size = info->component_size * re->after.data_disks; | |
1698 | return NULL; | |
1699 | } | |
1700 | ||
1701 | static int set_array_size(struct supertype *st, struct mdinfo *sra, | |
1702 | char *text_version) | |
1703 | { | |
1704 | struct mdinfo *info; | |
1705 | char *subarray; | |
1706 | int ret_val = -1; | |
1707 | ||
1708 | if ((st == NULL) || (sra == NULL)) | |
1709 | return ret_val; | |
1710 | ||
1711 | if (text_version == NULL) | |
1712 | text_version = sra->text_version; | |
1713 | subarray = strchr(text_version + 1, '/')+1; | |
1714 | info = st->ss->container_content(st, subarray); | |
1715 | if (info) { | |
1716 | unsigned long long current_size = 0; | |
1717 | unsigned long long new_size = info->custom_array_size/2; | |
1718 | ||
1719 | if (sysfs_get_ll(sra, NULL, "array_size", ¤t_size) == 0 && | |
1720 | new_size > current_size) { | |
1721 | if (sysfs_set_num(sra, NULL, "array_size", new_size) | |
1722 | < 0) | |
1723 | dprintf("Error: Cannot set array size"); | |
1724 | else { | |
1725 | ret_val = 0; | |
1726 | dprintf("Array size changed"); | |
1727 | } | |
1728 | dprintf_cont(" from %llu to %llu.\n", | |
1729 | current_size, new_size); | |
1730 | } | |
1731 | sysfs_free(info); | |
1732 | } else | |
1733 | dprintf("Error: set_array_size(): info pointer in NULL\n"); | |
1734 | ||
1735 | return ret_val; | |
1736 | } | |
1737 | ||
1738 | static int reshape_array(char *container, int fd, char *devname, | |
1739 | struct supertype *st, struct mdinfo *info, | |
1740 | int force, struct mddev_dev *devlist, | |
1741 | unsigned long long data_offset, | |
1742 | char *backup_file, int verbose, int forked, | |
1743 | int restart); | |
1744 | static int reshape_container(char *container, char *devname, | |
1745 | int mdfd, | |
1746 | struct supertype *st, | |
1747 | struct mdinfo *info, | |
1748 | struct context *c, | |
1749 | int forked, int restart); | |
1750 | ||
1751 | /** | |
1752 | * prepare_external_reshape() - prepares update on external metadata if supported. | |
1753 | * @devname: Device name. | |
1754 | * @subarray: Subarray. | |
1755 | * @st: Supertype. | |
1756 | * @container: Container. | |
1757 | * @cfd: Container file descriptor. | |
1758 | * | |
1759 | * Function checks that the requested reshape is supported on external metadata, | |
1760 | * and performs an initial check that the container holds the pre-requisite | |
1761 | * spare devices (mdmon owns final validation). | |
1762 | * | |
1763 | * Return: 0 on success, else 1 | |
1764 | */ | |
1765 | static int prepare_external_reshape(char *devname, char *subarray, | |
1766 | struct supertype *st, char *container, | |
1767 | const int cfd) | |
1768 | { | |
1769 | struct mdinfo *cc = NULL; | |
1770 | struct mdinfo *content = NULL; | |
1771 | ||
1772 | if (st->ss->load_container(st, cfd, NULL)) { | |
1773 | pr_err("Cannot read superblock for %s\n", devname); | |
1774 | return 1; | |
1775 | } | |
1776 | ||
1777 | if (!st->ss->container_content) | |
1778 | return 1; | |
1779 | ||
1780 | cc = st->ss->container_content(st, subarray); | |
1781 | for (content = cc; content ; content = content->next) { | |
1782 | /* | |
1783 | * check if reshape is allowed based on metadata | |
1784 | * indications stored in content.array.status | |
1785 | */ | |
1786 | if (is_bit_set(&content->array.state, MD_SB_BLOCK_VOLUME) || | |
1787 | is_bit_set(&content->array.state, MD_SB_BLOCK_CONTAINER_RESHAPE)) { | |
1788 | pr_err("Cannot reshape arrays in container with unsupported metadata: %s(%s)\n", | |
1789 | devname, container); | |
1790 | goto error; | |
1791 | } | |
1792 | if (content->consistency_policy == CONSISTENCY_POLICY_PPL) { | |
1793 | pr_err("Operation not supported when ppl consistency policy is enabled\n"); | |
1794 | goto error; | |
1795 | } | |
1796 | if (content->consistency_policy == CONSISTENCY_POLICY_BITMAP) { | |
1797 | pr_err("Operation not supported when write-intent bitmap consistency policy is enabled\n"); | |
1798 | goto error; | |
1799 | } | |
1800 | } | |
1801 | sysfs_free(cc); | |
1802 | if (mdmon_running(container)) | |
1803 | st->update_tail = &st->updates; | |
1804 | return 0; | |
1805 | error: | |
1806 | sysfs_free(cc); | |
1807 | return 1; | |
1808 | } | |
1809 | ||
1810 | int Grow_reshape(char *devname, int fd, | |
1811 | struct mddev_dev *devlist, | |
1812 | struct context *c, struct shape *s) | |
1813 | { | |
1814 | /* Make some changes in the shape of an array. | |
1815 | * The kernel must support the change. | |
1816 | * | |
1817 | * There are three different changes. Each can trigger | |
1818 | * a resync or recovery so we freeze that until we have | |
1819 | * requested everything (if kernel supports freezing - 2.6.30). | |
1820 | * The steps are: | |
1821 | * - change size (i.e. component_size) | |
1822 | * - change level | |
1823 | * - change layout/chunksize/ndisks | |
1824 | * | |
1825 | * The last can require a reshape. It is different on different | |
1826 | * levels so we need to check the level before actioning it. | |
1827 | * Some times the level change needs to be requested after the | |
1828 | * reshape (e.g. raid6->raid5, raid5->raid0) | |
1829 | * | |
1830 | */ | |
1831 | struct mdu_array_info_s array; | |
1832 | int rv = 0; | |
1833 | struct supertype *st; | |
1834 | char *subarray = NULL; | |
1835 | ||
1836 | int frozen = 0; | |
1837 | int changed = 0; | |
1838 | char *container = NULL; | |
1839 | int cfd = -1; | |
1840 | ||
1841 | struct mddev_dev *dv; | |
1842 | int added_disks; | |
1843 | ||
1844 | struct mdinfo info; | |
1845 | struct mdinfo *sra = NULL; | |
1846 | ||
1847 | if (md_get_array_info(fd, &array) < 0) { | |
1848 | pr_err("%s is not an active md array - aborting\n", | |
1849 | devname); | |
1850 | return 1; | |
1851 | } | |
1852 | if (s->level != UnSet && s->chunk) { | |
1853 | pr_err("Cannot change array level in the same operation as changing chunk size.\n"); | |
1854 | return 1; | |
1855 | } | |
1856 | ||
1857 | if (s->data_offset != INVALID_SECTORS && array.level != 10 && | |
1858 | (array.level < 4 || array.level > 6)) { | |
1859 | pr_err("--grow --data-offset not yet supported\n"); | |
1860 | return 1; | |
1861 | } | |
1862 | ||
1863 | if (s->size > 0 && | |
1864 | (s->chunk || s->level!= UnSet || s->layout_str || s->raiddisks)) { | |
1865 | pr_err("cannot change component size at the same time as other changes.\n" | |
1866 | " Change size first, then check data is intact before making other changes.\n"); | |
1867 | return 1; | |
1868 | } | |
1869 | ||
1870 | if (array.level > 1 && s->size > 1 && | |
1871 | (unsigned long long) (array.chunk_size / 1024) > s->size) { | |
1872 | pr_err("component size must be larger than chunk size.\n"); | |
1873 | return 1; | |
1874 | } | |
1875 | ||
1876 | st = super_by_fd(fd, &subarray); | |
1877 | if (!st) { | |
1878 | pr_err("Unable to determine metadata format for %s\n", devname); | |
1879 | return 1; | |
1880 | } | |
1881 | if (s->raiddisks > st->max_devs) { | |
1882 | pr_err("Cannot increase raid-disks on this array beyond %d\n", st->max_devs); | |
1883 | return 1; | |
1884 | } | |
1885 | if (s->level == 0 && (array.state & (1 << MD_SB_BITMAP_PRESENT)) && | |
1886 | !(array.state & (1 << MD_SB_CLUSTERED)) && !st->ss->external) { | |
1887 | array.state &= ~(1 << MD_SB_BITMAP_PRESENT); | |
1888 | if (md_set_array_info(fd, &array) != 0) { | |
1889 | pr_err("failed to remove internal bitmap.\n"); | |
1890 | return 1; | |
1891 | } | |
1892 | } | |
1893 | ||
1894 | if (st->ss->external) { | |
1895 | if (subarray) { | |
1896 | container = st->container_devnm; | |
1897 | cfd = open_dev_excl(st->container_devnm); | |
1898 | } else { | |
1899 | container = st->devnm; | |
1900 | close(fd); | |
1901 | cfd = open_dev_excl(st->devnm); | |
1902 | fd = cfd; | |
1903 | } | |
1904 | if (cfd < 0) { | |
1905 | pr_err("Unable to open container for %s\n", devname); | |
1906 | free(subarray); | |
1907 | return 1; | |
1908 | } | |
1909 | ||
1910 | rv = prepare_external_reshape(devname, subarray, st, | |
1911 | container, cfd); | |
1912 | if (rv > 0) { | |
1913 | free(subarray); | |
1914 | close(cfd); | |
1915 | goto release; | |
1916 | } | |
1917 | ||
1918 | if (s->raiddisks && subarray) { | |
1919 | pr_err("--raid-devices operation can be performed on a container only\n"); | |
1920 | close(cfd); | |
1921 | free(subarray); | |
1922 | return 1; | |
1923 | } | |
1924 | } | |
1925 | ||
1926 | added_disks = 0; | |
1927 | for (dv = devlist; dv; dv = dv->next) | |
1928 | added_disks++; | |
1929 | if (s->raiddisks > array.raid_disks && | |
1930 | array.spare_disks + added_disks < | |
1931 | (s->raiddisks - array.raid_disks) && | |
1932 | !c->force) { | |
1933 | pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n" | |
1934 | " Use --force to over-ride this check.\n", | |
1935 | s->raiddisks - array.raid_disks, | |
1936 | s->raiddisks - array.raid_disks == 1 ? "" : "s", | |
1937 | array.spare_disks + added_disks); | |
1938 | return 1; | |
1939 | } | |
1940 | ||
1941 | sra = sysfs_read(fd, NULL, GET_LEVEL | GET_DISKS | GET_DEVS | | |
1942 | GET_STATE | GET_VERSION); | |
1943 | if (sra) { | |
1944 | if (st->ss->external && subarray == NULL) { | |
1945 | array.level = LEVEL_CONTAINER; | |
1946 | sra->array.level = LEVEL_CONTAINER; | |
1947 | } | |
1948 | } else { | |
1949 | pr_err("failed to read sysfs parameters for %s\n", | |
1950 | devname); | |
1951 | return 1; | |
1952 | } | |
1953 | frozen = freeze(st); | |
1954 | if (frozen < -1) { | |
1955 | /* freeze() already spewed the reason */ | |
1956 | sysfs_free(sra); | |
1957 | return 1; | |
1958 | } else if (frozen < 0) { | |
1959 | pr_err("%s is performing resync/recovery and cannot be %s\n", devname, | |
1960 | (s->level != UnSet && s->level != array.level) ? "taken over" : "reshaped"); | |
1961 | sysfs_free(sra); | |
1962 | return 1; | |
1963 | } | |
1964 | ||
1965 | /* ========= set size =============== */ | |
1966 | if (s->size > 0 && (s->size == MAX_SIZE || s->size != (unsigned)array.size)) { | |
1967 | unsigned long long orig_size = get_component_size(fd) / 2; | |
1968 | unsigned long long min_csize; | |
1969 | struct mdinfo *mdi; | |
1970 | int raid0_takeover = 0; | |
1971 | ||
1972 | if (orig_size == 0) | |
1973 | orig_size = (unsigned) array.size; | |
1974 | ||
1975 | if (orig_size == 0) { | |
1976 | pr_err("Cannot set device size in this type of array.\n"); | |
1977 | rv = 1; | |
1978 | goto release; | |
1979 | } | |
1980 | ||
1981 | if (array.level == 0) { | |
1982 | pr_err("Component size change is not supported for RAID0\n"); | |
1983 | rv = 1; | |
1984 | goto release; | |
1985 | } | |
1986 | ||
1987 | if (reshape_super_size(st, devname, s->size, APPLY_METADATA_CHANGES, c)) { | |
1988 | rv = 1; | |
1989 | goto release; | |
1990 | } | |
1991 | sync_metadata(st); | |
1992 | if (st->ss->external) { | |
1993 | /* metadata can have size limitation | |
1994 | * update size value according to metadata information | |
1995 | */ | |
1996 | struct mdinfo *sizeinfo = | |
1997 | st->ss->container_content(st, subarray); | |
1998 | if (sizeinfo) { | |
1999 | unsigned long long new_size = | |
2000 | sizeinfo->custom_array_size/2; | |
2001 | int data_disks = get_data_disks( | |
2002 | sizeinfo->array.level, | |
2003 | sizeinfo->array.layout, | |
2004 | sizeinfo->array.raid_disks); | |
2005 | new_size /= data_disks; | |
2006 | dprintf("Metadata size correction from %llu to %llu (%llu)\n", | |
2007 | orig_size, new_size, | |
2008 | new_size * data_disks); | |
2009 | s->size = new_size; | |
2010 | sysfs_free(sizeinfo); | |
2011 | } | |
2012 | } | |
2013 | ||
2014 | /* Update the size of each member device in case | |
2015 | * they have been resized. This will never reduce | |
2016 | * below the current used-size. The "size" attribute | |
2017 | * understands '0' to mean 'max'. | |
2018 | */ | |
2019 | min_csize = 0; | |
2020 | for (mdi = sra->devs; mdi; mdi = mdi->next) { | |
2021 | sysfs_set_num(sra, mdi, "size", | |
2022 | s->size == MAX_SIZE ? 0 : s->size); | |
2023 | if (array.not_persistent == 0 && | |
2024 | array.major_version == 0 && | |
2025 | get_linux_version() < 3001000) { | |
2026 | /* Dangerous to allow size to exceed 2TB */ | |
2027 | unsigned long long csize; | |
2028 | if (sysfs_get_ll(sra, mdi, "size", | |
2029 | &csize) == 0) { | |
2030 | if (csize >= 2ULL*1024*1024*1024) | |
2031 | csize = 2ULL*1024*1024*1024; | |
2032 | if ((min_csize == 0 || | |
2033 | (min_csize > csize))) | |
2034 | min_csize = csize; | |
2035 | } | |
2036 | } | |
2037 | } | |
2038 | if (min_csize && s->size > min_csize) { | |
2039 | pr_err("Cannot safely make this array use more than 2TB per device on this kernel.\n"); | |
2040 | rv = 1; | |
2041 | goto size_change_error; | |
2042 | } | |
2043 | if (min_csize && s->size == MAX_SIZE) { | |
2044 | /* Don't let the kernel choose a size - it will get | |
2045 | * it wrong | |
2046 | */ | |
2047 | pr_err("Limited v0.90 array to 2TB per device\n"); | |
2048 | s->size = min_csize; | |
2049 | } | |
2050 | if (st->ss->external) { | |
2051 | if (sra->array.level == 0) { | |
2052 | rv = sysfs_set_str(sra, NULL, "level", "raid5"); | |
2053 | if (!rv) { | |
2054 | raid0_takeover = 1; | |
2055 | /* get array parameters after takeover | |
2056 | * to change one parameter at time only | |
2057 | */ | |
2058 | rv = md_get_array_info(fd, &array); | |
2059 | } | |
2060 | } | |
2061 | /* make sure mdmon is | |
2062 | * aware of the new level */ | |
2063 | if (!mdmon_running(st->container_devnm)) | |
2064 | start_mdmon(st->container_devnm); | |
2065 | ping_monitor(container); | |
2066 | if (wait_for_mdmon(st->container_devnm) != MDADM_STATUS_SUCCESS) { | |
2067 | pr_err("No mdmon found. Grow cannot continue.\n"); | |
2068 | goto release; | |
2069 | } | |
2070 | } | |
2071 | ||
2072 | if (s->size == MAX_SIZE) | |
2073 | s->size = 0; | |
2074 | array.size = s->size; | |
2075 | rv = sysfs_set_num(sra, NULL, "component_size", s->size); | |
2076 | ||
2077 | /* | |
2078 | * For native metadata, md/array_size is updated by kernel, | |
2079 | * for external management update it here. | |
2080 | */ | |
2081 | if (st->ss->external && rv == MDADM_STATUS_SUCCESS) | |
2082 | rv = set_array_size(st, sra, sra->text_version); | |
2083 | ||
2084 | if (raid0_takeover) { | |
2085 | /* do not recync non-existing parity, | |
2086 | * we will drop it anyway | |
2087 | */ | |
2088 | sysfs_set_str(sra, NULL, "sync_action", "frozen"); | |
2089 | /* go back to raid0, drop parity disk | |
2090 | */ | |
2091 | sysfs_set_str(sra, NULL, "level", "raid0"); | |
2092 | md_get_array_info(fd, &array); | |
2093 | } | |
2094 | ||
2095 | size_change_error: | |
2096 | if (rv != 0) { | |
2097 | int err = errno; | |
2098 | ||
2099 | /* restore metadata */ | |
2100 | if (reshape_super_size(st, devname, orig_size, | |
2101 | ROLLBACK_METADATA_CHANGES, c) == 0) | |
2102 | sync_metadata(st); | |
2103 | pr_err("Cannot set device size for %s: %s\n", | |
2104 | devname, strerror(err)); | |
2105 | if (err == EBUSY && | |
2106 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2107 | cont_err("Bitmap must be removed before size can be changed\n"); | |
2108 | rv = 1; | |
2109 | goto release; | |
2110 | } | |
2111 | if (s->assume_clean) { | |
2112 | /* This will fail on kernels older than 3.0 unless | |
2113 | * a backport has been arranged. | |
2114 | */ | |
2115 | if (sra == NULL || | |
2116 | sysfs_set_str(sra, NULL, "resync_start", STR_COMMON_NONE) < 0) | |
2117 | pr_err("--assume-clean not supported with --grow on this kernel\n"); | |
2118 | } | |
2119 | md_get_array_info(fd, &array); | |
2120 | s->size = get_component_size(fd)/2; | |
2121 | if (s->size == 0) | |
2122 | s->size = array.size; | |
2123 | if (c->verbose >= 0) { | |
2124 | if (s->size == orig_size) | |
2125 | pr_err("component size of %s unchanged at %lluK\n", | |
2126 | devname, s->size); | |
2127 | else | |
2128 | pr_err("component size of %s has been set to %lluK\n", | |
2129 | devname, s->size); | |
2130 | } | |
2131 | changed = 1; | |
2132 | } else if (!is_container(array.level)) { | |
2133 | s->size = get_component_size(fd)/2; | |
2134 | if (s->size == 0) | |
2135 | s->size = array.size; | |
2136 | } | |
2137 | ||
2138 | /* See if there is anything else to do */ | |
2139 | if ((s->level == UnSet || s->level == array.level) && | |
2140 | (s->layout_str == NULL) && | |
2141 | (s->chunk == 0 || s->chunk == array.chunk_size) && | |
2142 | s->data_offset == INVALID_SECTORS && | |
2143 | (s->raiddisks == 0 || s->raiddisks == array.raid_disks)) { | |
2144 | /* Nothing more to do */ | |
2145 | if (!changed && c->verbose >= 0) | |
2146 | pr_err("%s: no change requested\n", devname); | |
2147 | goto release; | |
2148 | } | |
2149 | ||
2150 | /* ========= check for Raid10/Raid1 -> Raid0 conversion =============== | |
2151 | * current implementation assumes that following conditions must be met: | |
2152 | * - RAID10: | |
2153 | * - far_copies == 1 | |
2154 | * - near_copies == 2 | |
2155 | */ | |
2156 | if ((s->level == 0 && array.level == 10 && sra && | |
2157 | array.layout == ((1 << 8) + 2) && !(array.raid_disks & 1)) || | |
2158 | (s->level == 0 && array.level == 1 && sra)) { | |
2159 | int err; | |
2160 | ||
2161 | err = remove_disks_for_takeover(st, sra, array.layout); | |
2162 | if (err) { | |
2163 | dprintf("Array cannot be reshaped\n"); | |
2164 | if (cfd > -1) | |
2165 | close(cfd); | |
2166 | rv = 1; | |
2167 | goto release; | |
2168 | } | |
2169 | /* Make sure mdmon has seen the device removal | |
2170 | * and updated metadata before we continue with | |
2171 | * level change | |
2172 | */ | |
2173 | if (container) | |
2174 | ping_monitor(container); | |
2175 | } | |
2176 | ||
2177 | memset(&info, 0, sizeof(info)); | |
2178 | info.array = array; | |
2179 | if (sysfs_init(&info, fd, NULL)) { | |
2180 | pr_err("failed to initialize sysfs.\n"); | |
2181 | rv = 1; | |
2182 | goto release; | |
2183 | } | |
2184 | strcpy(info.text_version, sra->text_version); | |
2185 | info.component_size = s->size*2; | |
2186 | info.new_level = s->level; | |
2187 | info.new_chunk = s->chunk * 1024; | |
2188 | if (is_container(info.array.level)) { | |
2189 | info.delta_disks = UnSet; | |
2190 | info.array.raid_disks = s->raiddisks; | |
2191 | } else if (s->raiddisks) | |
2192 | info.delta_disks = s->raiddisks - info.array.raid_disks; | |
2193 | else | |
2194 | info.delta_disks = UnSet; | |
2195 | if (s->layout_str == NULL) { | |
2196 | info.new_layout = UnSet; | |
2197 | if (info.array.level == 6 && | |
2198 | (info.new_level == 6 || info.new_level == UnSet) && | |
2199 | info.array.layout >= 16) { | |
2200 | pr_err("%s has a non-standard layout. If you wish to preserve this\n", devname); | |
2201 | cont_err("during the reshape, please specify --layout=preserve\n"); | |
2202 | cont_err("If you want to change it, specify a layout or use --layout=normalise\n"); | |
2203 | rv = 1; | |
2204 | goto release; | |
2205 | } | |
2206 | } else if (strcmp(s->layout_str, "normalise") == 0 || | |
2207 | strcmp(s->layout_str, "normalize") == 0) { | |
2208 | /* If we have a -6 RAID6 layout, remove the '-6'. */ | |
2209 | info.new_layout = UnSet; | |
2210 | if (info.array.level == 6 && info.new_level == UnSet) { | |
2211 | char l[40], *h; | |
2212 | strcpy(l, map_num_s(r6layout, info.array.layout)); | |
2213 | h = strrchr(l, '-'); | |
2214 | if (h && strcmp(h, "-6") == 0) { | |
2215 | *h = 0; | |
2216 | info.new_layout = map_name(r6layout, l); | |
2217 | } | |
2218 | } else { | |
2219 | pr_err("%s is only meaningful when reshaping a RAID6 array.\n", s->layout_str); | |
2220 | rv = 1; | |
2221 | goto release; | |
2222 | } | |
2223 | } else if (strcmp(s->layout_str, "preserve") == 0) { | |
2224 | /* This means that a non-standard RAID6 layout | |
2225 | * is OK. | |
2226 | * In particular: | |
2227 | * - When reshape a RAID6 (e.g. adding a device) | |
2228 | * which is in a non-standard layout, it is OK | |
2229 | * to preserve that layout. | |
2230 | * - When converting a RAID5 to RAID6, leave it in | |
2231 | * the XXX-6 layout, don't re-layout. | |
2232 | */ | |
2233 | if (info.array.level == 6 && info.new_level == UnSet) | |
2234 | info.new_layout = info.array.layout; | |
2235 | else if (info.array.level == 5 && info.new_level == 6) { | |
2236 | char l[40]; | |
2237 | strcpy(l, map_num_s(r5layout, info.array.layout)); | |
2238 | strcat(l, "-6"); | |
2239 | info.new_layout = map_name(r6layout, l); | |
2240 | } else { | |
2241 | pr_err("%s in only meaningful when reshaping to RAID6\n", s->layout_str); | |
2242 | rv = 1; | |
2243 | goto release; | |
2244 | } | |
2245 | } else { | |
2246 | int l = info.new_level; | |
2247 | if (l == UnSet) | |
2248 | l = info.array.level; | |
2249 | switch (l) { | |
2250 | case 5: | |
2251 | info.new_layout = map_name(r5layout, s->layout_str); | |
2252 | break; | |
2253 | case 6: | |
2254 | info.new_layout = map_name(r6layout, s->layout_str); | |
2255 | break; | |
2256 | case 10: | |
2257 | info.new_layout = parse_layout_10(s->layout_str); | |
2258 | break; | |
2259 | case LEVEL_FAULTY: | |
2260 | info.new_layout = parse_layout_faulty(s->layout_str); | |
2261 | break; | |
2262 | default: | |
2263 | pr_err("layout not meaningful with this level\n"); | |
2264 | rv = 1; | |
2265 | goto release; | |
2266 | } | |
2267 | if (info.new_layout == UnSet) { | |
2268 | pr_err("layout %s not understood for this level\n", | |
2269 | s->layout_str); | |
2270 | rv = 1; | |
2271 | goto release; | |
2272 | } | |
2273 | } | |
2274 | ||
2275 | if (array.level == LEVEL_FAULTY) { | |
2276 | if (s->level != UnSet && s->level != array.level) { | |
2277 | pr_err("cannot change level of Faulty device\n"); | |
2278 | rv =1 ; | |
2279 | } | |
2280 | if (s->chunk) { | |
2281 | pr_err("cannot set chunksize of Faulty device\n"); | |
2282 | rv =1 ; | |
2283 | } | |
2284 | if (s->raiddisks && s->raiddisks != 1) { | |
2285 | pr_err("cannot set raid_disks of Faulty device\n"); | |
2286 | rv =1 ; | |
2287 | } | |
2288 | if (s->layout_str) { | |
2289 | if (md_get_array_info(fd, &array) != 0) { | |
2290 | dprintf("Cannot get array information.\n"); | |
2291 | goto release; | |
2292 | } | |
2293 | array.layout = info.new_layout; | |
2294 | if (md_set_array_info(fd, &array) != 0) { | |
2295 | pr_err("failed to set new layout\n"); | |
2296 | rv = 1; | |
2297 | } else if (c->verbose >= 0) | |
2298 | printf("layout for %s set to %d\n", | |
2299 | devname, array.layout); | |
2300 | } | |
2301 | } else if (is_container(array.level)) { | |
2302 | /* This change is to be applied to every array in the | |
2303 | * container. This is only needed when the metadata imposes | |
2304 | * restraints of the various arrays in the container. | |
2305 | * Currently we only know that IMSM requires all arrays | |
2306 | * to have the same number of devices so changing the | |
2307 | * number of devices (On-Line Capacity Expansion) must be | |
2308 | * performed at the level of the container | |
2309 | */ | |
2310 | close_fd(&fd); | |
2311 | rv = reshape_container(container, devname, -1, st, &info, | |
2312 | c, 0, 0); | |
2313 | frozen = 0; | |
2314 | } else { | |
2315 | /* get spare devices from external metadata | |
2316 | */ | |
2317 | if (st->ss->external) { | |
2318 | struct mdinfo *info2; | |
2319 | ||
2320 | info2 = st->ss->container_content(st, subarray); | |
2321 | if (info2) { | |
2322 | info.array.spare_disks = | |
2323 | info2->array.spare_disks; | |
2324 | sysfs_free(info2); | |
2325 | } | |
2326 | } | |
2327 | ||
2328 | /* Impose these changes on a single array. First | |
2329 | * check that the metadata is OK with the change. | |
2330 | */ | |
2331 | if (reshape_super_non_size(st, devname, &info, c)) { | |
2332 | rv = 1; | |
2333 | goto release; | |
2334 | } | |
2335 | sync_metadata(st); | |
2336 | rv = reshape_array(container, fd, devname, st, &info, c->force, | |
2337 | devlist, s->data_offset, c->backup_file, | |
2338 | c->verbose, 0, 0); | |
2339 | frozen = 0; | |
2340 | } | |
2341 | release: | |
2342 | sysfs_free(sra); | |
2343 | if (frozen > 0) | |
2344 | unfreeze(st); | |
2345 | return rv; | |
2346 | } | |
2347 | ||
2348 | /* verify_reshape_position() | |
2349 | * Function checks if reshape position in metadata is not farther | |
2350 | * than position in md. | |
2351 | * Return value: | |
2352 | * 0 : not valid sysfs entry | |
2353 | * it can be caused by not started reshape, it should be started | |
2354 | * by reshape array or raid0 array is before takeover | |
2355 | * -1 : error, reshape position is obviously wrong | |
2356 | * 1 : success, reshape progress correct or updated | |
2357 | */ | |
2358 | static int verify_reshape_position(struct mdinfo *info, int level) | |
2359 | { | |
2360 | int ret_val = 0; | |
2361 | char buf[SYSFS_MAX_BUF_SIZE]; | |
2362 | int rv; | |
2363 | ||
2364 | /* read sync_max, failure can mean raid0 array */ | |
2365 | rv = sysfs_get_str(info, NULL, "sync_max", buf, sizeof(buf)); | |
2366 | ||
2367 | if (rv > 0) { | |
2368 | char *ep; | |
2369 | unsigned long long position = strtoull(buf, &ep, 0); | |
2370 | ||
2371 | dprintf("Read sync_max sysfs entry is: %s\n", buf); | |
2372 | if (!(ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))) { | |
2373 | position *= get_data_disks(level, | |
2374 | info->new_layout, | |
2375 | info->array.raid_disks); | |
2376 | if (info->reshape_progress < position) { | |
2377 | dprintf("Corrected reshape progress (%llu) to md position (%llu)\n", | |
2378 | info->reshape_progress, position); | |
2379 | info->reshape_progress = position; | |
2380 | ret_val = 1; | |
2381 | } else if (info->reshape_progress > position) { | |
2382 | pr_err("Fatal error: array reshape was not properly frozen (expected reshape position is %llu, but reshape progress is %llu.\n", | |
2383 | position, info->reshape_progress); | |
2384 | pr_err("Reassemble array to try to restore critical sector.\n"); | |
2385 | ret_val = -1; | |
2386 | } else { | |
2387 | dprintf("Reshape position in md and metadata are the same;"); | |
2388 | ret_val = 1; | |
2389 | } | |
2390 | } | |
2391 | } else if (rv == 0) { | |
2392 | /* for valid sysfs entry, 0-length content | |
2393 | * should be indicated as error | |
2394 | */ | |
2395 | ret_val = -1; | |
2396 | } | |
2397 | ||
2398 | return ret_val; | |
2399 | } | |
2400 | ||
2401 | static unsigned long long choose_offset(unsigned long long lo, | |
2402 | unsigned long long hi, | |
2403 | unsigned long long min, | |
2404 | unsigned long long max) | |
2405 | { | |
2406 | /* Choose a new offset between hi and lo. | |
2407 | * It must be between min and max, but | |
2408 | * we would prefer something near the middle of hi/lo, and also | |
2409 | * prefer to be aligned to a big power of 2. | |
2410 | * | |
2411 | * So we start with the middle, then for each bit, | |
2412 | * starting at '1' and increasing, if it is set, we either | |
2413 | * add it or subtract it if possible, preferring the option | |
2414 | * which is furthest from the boundary. | |
2415 | * | |
2416 | * We stop once we get a 1MB alignment. As units are in sectors, | |
2417 | * 1MB = 2*1024 sectors. | |
2418 | */ | |
2419 | unsigned long long choice = (lo + hi) / 2; | |
2420 | unsigned long long bit = 1; | |
2421 | ||
2422 | for (bit = 1; bit < 2*1024; bit = bit << 1) { | |
2423 | unsigned long long bigger, smaller; | |
2424 | if (! (bit & choice)) | |
2425 | continue; | |
2426 | bigger = choice + bit; | |
2427 | smaller = choice - bit; | |
2428 | if (bigger > max && smaller < min) | |
2429 | break; | |
2430 | if (bigger > max) | |
2431 | choice = smaller; | |
2432 | else if (smaller < min) | |
2433 | choice = bigger; | |
2434 | else if (hi - bigger > smaller - lo) | |
2435 | choice = bigger; | |
2436 | else | |
2437 | choice = smaller; | |
2438 | } | |
2439 | return choice; | |
2440 | } | |
2441 | ||
2442 | static int set_new_data_offset(struct mdinfo *sra, struct supertype *st, | |
2443 | char *devname, int delta_disks, | |
2444 | unsigned long long data_offset, | |
2445 | unsigned long long min, | |
2446 | int can_fallback) | |
2447 | { | |
2448 | struct mdinfo *sd; | |
2449 | int dir = 0; | |
2450 | int err = 0; | |
2451 | unsigned long long before, after; | |
2452 | ||
2453 | /* Need to find min space before and after so same is used | |
2454 | * on all devices | |
2455 | */ | |
2456 | before = UINT64_MAX; | |
2457 | after = UINT64_MAX; | |
2458 | for (sd = sra->devs; sd; sd = sd->next) { | |
2459 | char *dn; | |
2460 | int dfd; | |
2461 | int rv; | |
2462 | struct supertype *st2; | |
2463 | struct mdinfo info2; | |
2464 | ||
2465 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2466 | continue; | |
2467 | dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2468 | dfd = dev_open(dn, O_RDONLY); | |
2469 | if (dfd < 0) { | |
2470 | pr_err("%s: cannot open component %s\n", | |
2471 | devname, dn ? dn : "-unknown-"); | |
2472 | goto release; | |
2473 | } | |
2474 | st2 = dup_super(st); | |
2475 | rv = st2->ss->load_super(st2,dfd, NULL); | |
2476 | close(dfd); | |
2477 | if (rv) { | |
2478 | free(st2); | |
2479 | pr_err("%s: cannot get superblock from %s\n", | |
2480 | devname, dn); | |
2481 | goto release; | |
2482 | } | |
2483 | st2->ss->getinfo_super(st2, &info2, NULL); | |
2484 | st2->ss->free_super(st2); | |
2485 | free(st2); | |
2486 | if (info2.space_before == 0 && | |
2487 | info2.space_after == 0) { | |
2488 | /* Metadata doesn't support data_offset changes */ | |
2489 | if (!can_fallback) | |
2490 | pr_err("%s: Metadata version doesn't support data_offset changes\n", | |
2491 | devname); | |
2492 | goto fallback; | |
2493 | } | |
2494 | if (before > info2.space_before) | |
2495 | before = info2.space_before; | |
2496 | if (after > info2.space_after) | |
2497 | after = info2.space_after; | |
2498 | ||
2499 | if (data_offset != INVALID_SECTORS) { | |
2500 | if (dir == 0) { | |
2501 | if (info2.data_offset == data_offset) { | |
2502 | pr_err("%s: already has that data_offset\n", | |
2503 | dn); | |
2504 | goto release; | |
2505 | } | |
2506 | if (data_offset < info2.data_offset) | |
2507 | dir = -1; | |
2508 | else | |
2509 | dir = 1; | |
2510 | } else if ((data_offset <= info2.data_offset && | |
2511 | dir == 1) || | |
2512 | (data_offset >= info2.data_offset && | |
2513 | dir == -1)) { | |
2514 | pr_err("%s: differing data offsets on devices make this --data-offset setting impossible\n", | |
2515 | dn); | |
2516 | goto release; | |
2517 | } | |
2518 | } | |
2519 | } | |
2520 | if (before == UINT64_MAX) | |
2521 | /* impossible really, there must be no devices */ | |
2522 | return 1; | |
2523 | ||
2524 | for (sd = sra->devs; sd; sd = sd->next) { | |
2525 | char *dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2526 | unsigned long long new_data_offset; | |
2527 | ||
2528 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2529 | continue; | |
2530 | if (delta_disks < 0) { | |
2531 | /* Don't need any space as array is shrinking | |
2532 | * just move data_offset up by min | |
2533 | */ | |
2534 | if (data_offset == INVALID_SECTORS) | |
2535 | new_data_offset = sd->data_offset + min; | |
2536 | else { | |
2537 | if (data_offset < sd->data_offset + min) { | |
2538 | pr_err("--data-offset too small for %s\n", | |
2539 | dn); | |
2540 | goto release; | |
2541 | } | |
2542 | new_data_offset = data_offset; | |
2543 | } | |
2544 | } else if (delta_disks > 0) { | |
2545 | /* need space before */ | |
2546 | if (before < min) { | |
2547 | if (can_fallback) | |
2548 | goto fallback; | |
2549 | pr_err("Insufficient head-space for reshape on %s\n", | |
2550 | dn); | |
2551 | goto release; | |
2552 | } | |
2553 | if (data_offset == INVALID_SECTORS) | |
2554 | new_data_offset = sd->data_offset - min; | |
2555 | else { | |
2556 | if (data_offset > sd->data_offset - min) { | |
2557 | pr_err("--data-offset too large for %s\n", | |
2558 | dn); | |
2559 | goto release; | |
2560 | } | |
2561 | new_data_offset = data_offset; | |
2562 | } | |
2563 | } else { | |
2564 | if (dir == 0) { | |
2565 | /* can move up or down. If 'data_offset' | |
2566 | * was set we would have already decided, | |
2567 | * so just choose direction with most space. | |
2568 | */ | |
2569 | if (before > after) | |
2570 | dir = -1; | |
2571 | else | |
2572 | dir = 1; | |
2573 | } | |
2574 | sysfs_set_str(sra, NULL, "reshape_direction", | |
2575 | dir == 1 ? "backwards" : "forwards"); | |
2576 | if (dir > 0) { | |
2577 | /* Increase data offset */ | |
2578 | if (after < min) { | |
2579 | if (can_fallback) | |
2580 | goto fallback; | |
2581 | pr_err("Insufficient tail-space for reshape on %s\n", | |
2582 | dn); | |
2583 | goto release; | |
2584 | } | |
2585 | if (data_offset != INVALID_SECTORS && | |
2586 | data_offset < sd->data_offset + min) { | |
2587 | pr_err("--data-offset too small on %s\n", | |
2588 | dn); | |
2589 | goto release; | |
2590 | } | |
2591 | if (data_offset != INVALID_SECTORS) | |
2592 | new_data_offset = data_offset; | |
2593 | else | |
2594 | new_data_offset = choose_offset(sd->data_offset, | |
2595 | sd->data_offset + after, | |
2596 | sd->data_offset + min, | |
2597 | sd->data_offset + after); | |
2598 | } else { | |
2599 | /* Decrease data offset */ | |
2600 | if (before < min) { | |
2601 | if (can_fallback) | |
2602 | goto fallback; | |
2603 | pr_err("insufficient head-room on %s\n", | |
2604 | dn); | |
2605 | goto release; | |
2606 | } | |
2607 | if (data_offset != INVALID_SECTORS && | |
2608 | data_offset > sd->data_offset - min) { | |
2609 | pr_err("--data-offset too large on %s\n", | |
2610 | dn); | |
2611 | goto release; | |
2612 | } | |
2613 | if (data_offset != INVALID_SECTORS) | |
2614 | new_data_offset = data_offset; | |
2615 | else | |
2616 | new_data_offset = choose_offset(sd->data_offset - before, | |
2617 | sd->data_offset, | |
2618 | sd->data_offset - before, | |
2619 | sd->data_offset - min); | |
2620 | } | |
2621 | } | |
2622 | err = sysfs_set_num(sra, sd, "new_offset", new_data_offset); | |
2623 | if (err < 0 && errno == E2BIG) { | |
2624 | /* try again after increasing data size to max */ | |
2625 | err = sysfs_set_num(sra, sd, "size", 0); | |
2626 | if (err < 0 && errno == EINVAL && | |
2627 | !(sd->disk.state & (1<<MD_DISK_SYNC))) { | |
2628 | /* some kernels have a bug where you cannot | |
2629 | * use '0' on spare devices. */ | |
2630 | sysfs_set_num(sra, sd, "size", | |
2631 | (sra->component_size + after)/2); | |
2632 | } | |
2633 | err = sysfs_set_num(sra, sd, "new_offset", | |
2634 | new_data_offset); | |
2635 | } | |
2636 | if (err < 0) { | |
2637 | if (errno == E2BIG && data_offset != INVALID_SECTORS) { | |
2638 | pr_err("data-offset is too big for %s\n", dn); | |
2639 | goto release; | |
2640 | } | |
2641 | if (sd == sra->devs && | |
2642 | (errno == ENOENT || errno == E2BIG)) | |
2643 | /* Early kernel, no 'new_offset' file, | |
2644 | * or kernel doesn't like us. | |
2645 | * For RAID5/6 this is not fatal | |
2646 | */ | |
2647 | return 1; | |
2648 | pr_err("Cannot set new_offset for %s\n", dn); | |
2649 | break; | |
2650 | } | |
2651 | } | |
2652 | return err; | |
2653 | release: | |
2654 | return -1; | |
2655 | fallback: | |
2656 | /* Just use a backup file */ | |
2657 | return 1; | |
2658 | } | |
2659 | ||
2660 | static int raid10_reshape(char *container, int fd, char *devname, | |
2661 | struct supertype *st, struct mdinfo *info, | |
2662 | struct reshape *reshape, | |
2663 | unsigned long long data_offset, | |
2664 | int force, int verbose) | |
2665 | { | |
2666 | /* Changing raid_disks, layout, chunksize or possibly | |
2667 | * just data_offset for a RAID10. | |
2668 | * We must always change data_offset. We change by at least | |
2669 | * ->min_offset_change which is the largest of the old and new | |
2670 | * chunk sizes. | |
2671 | * If raid_disks is increasing, then data_offset must decrease | |
2672 | * by at least this copy size. | |
2673 | * If raid_disks is unchanged, data_offset must increase or | |
2674 | * decrease by at least min_offset_change but preferably by much more. | |
2675 | * We choose half of the available space. | |
2676 | * If raid_disks is decreasing, data_offset must increase by | |
2677 | * at least min_offset_change. To allow of this, component_size | |
2678 | * must be decreased by the same amount. | |
2679 | * | |
2680 | * So we calculate the required minimum and direction, possibly | |
2681 | * reduce the component_size, then iterate through the devices | |
2682 | * and set the new_data_offset. | |
2683 | * If that all works, we set chunk_size, layout, raid_disks, and start | |
2684 | * 'reshape' | |
2685 | */ | |
2686 | struct mdinfo *sra; | |
2687 | unsigned long long min; | |
2688 | int err = 0; | |
2689 | ||
2690 | sra = sysfs_read(fd, NULL, | |
2691 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK | |
2692 | ); | |
2693 | if (!sra) { | |
2694 | pr_err("%s: Cannot get array details from sysfs\n", devname); | |
2695 | goto release; | |
2696 | } | |
2697 | min = reshape->min_offset_change; | |
2698 | ||
2699 | if (info->delta_disks) | |
2700 | sysfs_set_str(sra, NULL, "reshape_direction", | |
2701 | info->delta_disks < 0 ? "backwards" : "forwards"); | |
2702 | if (info->delta_disks < 0 && info->space_after < min) { | |
2703 | int rv = sysfs_set_num(sra, NULL, "component_size", | |
2704 | (sra->component_size - min)/2); | |
2705 | if (rv) { | |
2706 | pr_err("cannot reduce component size\n"); | |
2707 | goto release; | |
2708 | } | |
2709 | } | |
2710 | err = set_new_data_offset(sra, st, devname, info->delta_disks, | |
2711 | data_offset, min, 0); | |
2712 | if (err == 1) { | |
2713 | pr_err("Cannot set new_data_offset: RAID10 reshape not\n"); | |
2714 | cont_err("supported on this kernel\n"); | |
2715 | err = -1; | |
2716 | } | |
2717 | if (err < 0) | |
2718 | goto release; | |
2719 | ||
2720 | if (!err && sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0) | |
2721 | err = errno; | |
2722 | if (!err && sysfs_set_num(sra, NULL, "layout", | |
2723 | reshape->after.layout) < 0) | |
2724 | err = errno; | |
2725 | if (!err && | |
2726 | sysfs_set_num(sra, NULL, "raid_disks", | |
2727 | info->array.raid_disks + info->delta_disks) < 0) | |
2728 | err = errno; | |
2729 | if (!err && sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) | |
2730 | err = errno; | |
2731 | if (err) { | |
2732 | pr_err("Cannot set array shape for %s\n", | |
2733 | devname); | |
2734 | if (err == EBUSY && | |
2735 | (info->array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2736 | cont_err(" Bitmap must be removed before shape can be changed\n"); | |
2737 | goto release; | |
2738 | } | |
2739 | sysfs_free(sra); | |
2740 | return 0; | |
2741 | release: | |
2742 | sysfs_free(sra); | |
2743 | return 1; | |
2744 | } | |
2745 | ||
2746 | static void get_space_after(int fd, struct supertype *st, struct mdinfo *info) | |
2747 | { | |
2748 | struct mdinfo *sra, *sd; | |
2749 | /* Initialisation to silence compiler warning */ | |
2750 | unsigned long long min_space_before = 0, min_space_after = 0; | |
2751 | int first = 1; | |
2752 | ||
2753 | sra = sysfs_read(fd, NULL, GET_DEVS); | |
2754 | if (!sra) | |
2755 | return; | |
2756 | for (sd = sra->devs; sd; sd = sd->next) { | |
2757 | char *dn; | |
2758 | int dfd; | |
2759 | struct supertype *st2; | |
2760 | struct mdinfo info2; | |
2761 | ||
2762 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
2763 | continue; | |
2764 | dn = map_dev(sd->disk.major, sd->disk.minor, 0); | |
2765 | dfd = dev_open(dn, O_RDONLY); | |
2766 | if (dfd < 0) | |
2767 | break; | |
2768 | st2 = dup_super(st); | |
2769 | if (st2->ss->load_super(st2,dfd, NULL)) { | |
2770 | close(dfd); | |
2771 | free(st2); | |
2772 | break; | |
2773 | } | |
2774 | close(dfd); | |
2775 | st2->ss->getinfo_super(st2, &info2, NULL); | |
2776 | st2->ss->free_super(st2); | |
2777 | free(st2); | |
2778 | if (first || | |
2779 | min_space_before > info2.space_before) | |
2780 | min_space_before = info2.space_before; | |
2781 | if (first || | |
2782 | min_space_after > info2.space_after) | |
2783 | min_space_after = info2.space_after; | |
2784 | first = 0; | |
2785 | } | |
2786 | if (sd == NULL && !first) { | |
2787 | info->space_after = min_space_after; | |
2788 | info->space_before = min_space_before; | |
2789 | } | |
2790 | sysfs_free(sra); | |
2791 | } | |
2792 | ||
2793 | static void update_cache_size(char *container, struct mdinfo *sra, | |
2794 | struct mdinfo *info, | |
2795 | int disks, unsigned long long blocks) | |
2796 | { | |
2797 | /* Check that the internal stripe cache is | |
2798 | * large enough, or it won't work. | |
2799 | * It must hold at least 4 stripes of the larger | |
2800 | * chunk size | |
2801 | */ | |
2802 | unsigned long cache; | |
2803 | cache = max(info->array.chunk_size, info->new_chunk); | |
2804 | cache *= 4; /* 4 stripes minimum */ | |
2805 | cache /= 512; /* convert to sectors */ | |
2806 | /* make sure there is room for 'blocks' with a bit to spare */ | |
2807 | if (cache < 16 + blocks / disks) | |
2808 | cache = 16 + blocks / disks; | |
2809 | cache /= (4096/512); /* Convert from sectors to pages */ | |
2810 | ||
2811 | if (sra->cache_size < cache) | |
2812 | subarray_set_num(container, sra, "stripe_cache_size", | |
2813 | cache+1); | |
2814 | } | |
2815 | ||
2816 | static int impose_reshape(struct mdinfo *sra, | |
2817 | struct mdinfo *info, | |
2818 | struct supertype *st, | |
2819 | int fd, | |
2820 | int restart, | |
2821 | char *devname, char *container, | |
2822 | struct reshape *reshape) | |
2823 | { | |
2824 | struct mdu_array_info_s array; | |
2825 | ||
2826 | sra->new_chunk = info->new_chunk; | |
2827 | ||
2828 | if (restart) { | |
2829 | /* for external metadata checkpoint saved by mdmon can be lost | |
2830 | * or missed /due to e.g. crash/. Check if md is not during | |
2831 | * restart farther than metadata points to. | |
2832 | * If so, this means metadata information is obsolete. | |
2833 | */ | |
2834 | if (st->ss->external) | |
2835 | verify_reshape_position(info, reshape->level); | |
2836 | sra->reshape_progress = info->reshape_progress; | |
2837 | } else { | |
2838 | sra->reshape_progress = 0; | |
2839 | if (reshape->after.data_disks < reshape->before.data_disks) | |
2840 | /* start from the end of the new array */ | |
2841 | sra->reshape_progress = (sra->component_size | |
2842 | * reshape->after.data_disks); | |
2843 | } | |
2844 | ||
2845 | md_get_array_info(fd, &array); | |
2846 | if (info->array.chunk_size == info->new_chunk && | |
2847 | reshape->before.layout == reshape->after.layout && | |
2848 | st->ss->external == 0) { | |
2849 | /* use SET_ARRAY_INFO but only if reshape hasn't started */ | |
2850 | array.raid_disks = reshape->after.data_disks + reshape->parity; | |
2851 | if (!restart && md_set_array_info(fd, &array) != 0) { | |
2852 | int err = errno; | |
2853 | ||
2854 | pr_err("Cannot set device shape for %s: %s\n", | |
2855 | devname, strerror(errno)); | |
2856 | ||
2857 | if (err == EBUSY && | |
2858 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2859 | cont_err("Bitmap must be removed before shape can be changed\n"); | |
2860 | ||
2861 | goto release; | |
2862 | } | |
2863 | } else if (!restart) { | |
2864 | /* set them all just in case some old 'new_*' value | |
2865 | * persists from some earlier problem. | |
2866 | */ | |
2867 | int err = 0; | |
2868 | ||
2869 | if (sysfs_set_num(sra, NULL, "chunk_size", info->new_chunk) < 0) | |
2870 | err = errno; | |
2871 | ||
2872 | if (!err && sysfs_set_num(sra, NULL, "layout", | |
2873 | reshape->after.layout) < 0) | |
2874 | err = errno; | |
2875 | ||
2876 | /* new_level is introduced in kernel 6.12 */ | |
2877 | if (!err && sysfs_attribute_available(sra, NULL, "new_level") && | |
2878 | sysfs_set_num(sra, NULL, "new_level", info->new_level) < 0) | |
2879 | err = errno; | |
2880 | ||
2881 | if (!err && subarray_set_num(container, sra, "raid_disks", | |
2882 | reshape->after.data_disks + | |
2883 | reshape->parity) < 0) | |
2884 | err = errno; | |
2885 | ||
2886 | if (err) { | |
2887 | pr_err("Cannot set device shape for %s\n", devname); | |
2888 | ||
2889 | if (err == EBUSY && | |
2890 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2891 | cont_err("Bitmap must be removed before shape can be changed\n"); | |
2892 | goto release; | |
2893 | } | |
2894 | } | |
2895 | return 0; | |
2896 | release: | |
2897 | return -1; | |
2898 | } | |
2899 | ||
2900 | static int impose_level(int fd, int level, char *devname, int verbose) | |
2901 | { | |
2902 | char *c; | |
2903 | struct mdu_array_info_s array; | |
2904 | struct mdinfo info; | |
2905 | ||
2906 | if (sysfs_init(&info, fd, NULL)) { | |
2907 | pr_err("failed to initialize sysfs.\n"); | |
2908 | return 1; | |
2909 | } | |
2910 | ||
2911 | md_get_array_info(fd, &array); | |
2912 | if (level == 0 && is_level456(array.level)) { | |
2913 | /* To convert to RAID0 we need to fail and | |
2914 | * remove any non-data devices. */ | |
2915 | int found = 0; | |
2916 | int d; | |
2917 | int data_disks = array.raid_disks - 1; | |
2918 | if (array.level == 6) | |
2919 | data_disks -= 1; | |
2920 | if (array.level == 5 && array.layout != ALGORITHM_PARITY_N) | |
2921 | return -1; | |
2922 | if (array.level == 6 && array.layout != ALGORITHM_PARITY_N_6) | |
2923 | return -1; | |
2924 | sysfs_set_str(&info, NULL,"sync_action", "idle"); | |
2925 | /* First remove any spares so no recovery starts */ | |
2926 | for (d = 0, found = 0; | |
2927 | d < MAX_DISKS && found < array.nr_disks; d++) { | |
2928 | mdu_disk_info_t disk; | |
2929 | disk.number = d; | |
2930 | if (md_get_disk_info(fd, &disk) < 0) | |
2931 | continue; | |
2932 | if (disk.major == 0 && disk.minor == 0) | |
2933 | continue; | |
2934 | found++; | |
2935 | if ((disk.state & (1 << MD_DISK_ACTIVE)) && | |
2936 | disk.raid_disk < data_disks) | |
2937 | /* keep this */ | |
2938 | continue; | |
2939 | ioctl(fd, HOT_REMOVE_DISK, | |
2940 | makedev(disk.major, disk.minor)); | |
2941 | } | |
2942 | /* Now fail anything left */ | |
2943 | md_get_array_info(fd, &array); | |
2944 | for (d = 0, found = 0; | |
2945 | d < MAX_DISKS && found < array.nr_disks; d++) { | |
2946 | mdu_disk_info_t disk; | |
2947 | disk.number = d; | |
2948 | if (md_get_disk_info(fd, &disk) < 0) | |
2949 | continue; | |
2950 | if (disk.major == 0 && disk.minor == 0) | |
2951 | continue; | |
2952 | found++; | |
2953 | if ((disk.state & (1 << MD_DISK_ACTIVE)) && | |
2954 | disk.raid_disk < data_disks) | |
2955 | /* keep this */ | |
2956 | continue; | |
2957 | ioctl(fd, SET_DISK_FAULTY, | |
2958 | makedev(disk.major, disk.minor)); | |
2959 | hot_remove_disk(fd, makedev(disk.major, disk.minor), 1); | |
2960 | } | |
2961 | /* | |
2962 | * hot_remove_disk lets kernel set MD_RECOVERY_RUNNING | |
2963 | * and it can't set level. It needs to wait sometime | |
2964 | * to let md thread to clear the flag. | |
2965 | */ | |
2966 | pr_info("wait 5 seconds to give kernel space to finish job\n"); | |
2967 | sleep_for(5, 0, true); | |
2968 | } | |
2969 | c = map_num(pers, level); | |
2970 | if (c) { | |
2971 | int err = sysfs_set_str(&info, NULL, "level", c); | |
2972 | if (err) { | |
2973 | err = errno; | |
2974 | pr_err("%s: could not set level to %s\n", | |
2975 | devname, c); | |
2976 | if (err == EBUSY && | |
2977 | (array.state & (1<<MD_SB_BITMAP_PRESENT))) | |
2978 | cont_err("Bitmap must be removed before level can be changed\n"); | |
2979 | return err; | |
2980 | } | |
2981 | if (verbose >= 0) | |
2982 | pr_err("level of %s changed to %s\n", devname, c); | |
2983 | } | |
2984 | return 0; | |
2985 | } | |
2986 | ||
2987 | int sigterm = 0; | |
2988 | static void catch_term(int sig) | |
2989 | { | |
2990 | sigterm = 1; | |
2991 | } | |
2992 | ||
2993 | ||
2994 | /** | |
2995 | * handle_forking() - Handle reshape forking. | |
2996 | * | |
2997 | * @forked: if already forked. | |
2998 | * @devname: device name. | |
2999 | * Returns: -1 if fork() failed, | |
3000 | * 0 if child process, | |
3001 | * 1 if job delegated to forked process or systemd. | |
3002 | * | |
3003 | * This function is a helper function for reshapes for fork handling. | |
3004 | */ | |
3005 | static mdadm_status_t handle_forking(bool forked, char *devname) | |
3006 | { | |
3007 | if (forked) | |
3008 | return MDADM_STATUS_FORKED; | |
3009 | ||
3010 | if (devname && continue_via_systemd(devname, GROW_SERVICE, NULL) == MDADM_STATUS_SUCCESS) | |
3011 | return MDADM_STATUS_SUCCESS; | |
3012 | ||
3013 | switch (fork()) { | |
3014 | case -1: return MDADM_STATUS_ERROR; /* error */ | |
3015 | case 0: return MDADM_STATUS_FORKED; /* child */ | |
3016 | default: return MDADM_STATUS_SUCCESS; /* parent */ | |
3017 | } | |
3018 | ||
3019 | } | |
3020 | ||
3021 | static int reshape_array(char *container, int fd, char *devname, | |
3022 | struct supertype *st, struct mdinfo *info, | |
3023 | int force, struct mddev_dev *devlist, | |
3024 | unsigned long long data_offset, | |
3025 | char *backup_file, int verbose, int forked, | |
3026 | int restart) | |
3027 | { | |
3028 | struct reshape reshape; | |
3029 | int spares_needed; | |
3030 | char *msg; | |
3031 | int orig_level = UnSet; | |
3032 | int odisks; | |
3033 | int delayed; | |
3034 | ||
3035 | struct mdu_array_info_s array; | |
3036 | char *c; | |
3037 | ||
3038 | struct mddev_dev *dv; | |
3039 | int added_disks; | |
3040 | ||
3041 | int *fdlist = NULL; | |
3042 | unsigned long long *offsets = NULL; | |
3043 | int d; | |
3044 | int nrdisks; | |
3045 | int err; | |
3046 | unsigned long blocks; | |
3047 | unsigned long long array_size; | |
3048 | int done; | |
3049 | struct mdinfo *sra = NULL; | |
3050 | char buf[SYSFS_MAX_BUF_SIZE]; | |
3051 | bool located_backup = false; | |
3052 | ||
3053 | /* when reshaping a RAID0, the component_size might be zero. | |
3054 | * So try to fix that up. | |
3055 | */ | |
3056 | if (md_get_array_info(fd, &array) != 0) { | |
3057 | dprintf("Cannot get array information.\n"); | |
3058 | goto release; | |
3059 | } | |
3060 | if (st->update_tail == NULL) | |
3061 | st->update_tail = &st->updates; | |
3062 | if (array.level == 0 && info->component_size == 0) { | |
3063 | get_dev_size(fd, NULL, &array_size); | |
3064 | info->component_size = array_size / array.raid_disks; | |
3065 | } | |
3066 | ||
3067 | if (array.level == 10) | |
3068 | /* Need space_after info */ | |
3069 | get_space_after(fd, st, info); | |
3070 | ||
3071 | if (info->reshape_active) { | |
3072 | int new_level = info->new_level; | |
3073 | info->new_level = UnSet; | |
3074 | if (info->delta_disks > 0) | |
3075 | info->array.raid_disks -= info->delta_disks; | |
3076 | msg = analyse_change(devname, info, &reshape); | |
3077 | info->new_level = new_level; | |
3078 | if (info->delta_disks > 0) | |
3079 | info->array.raid_disks += info->delta_disks; | |
3080 | if (!restart) | |
3081 | /* Make sure the array isn't read-only */ | |
3082 | ioctl(fd, RESTART_ARRAY_RW, 0); | |
3083 | } else | |
3084 | msg = analyse_change(devname, info, &reshape); | |
3085 | if (msg) { | |
3086 | /* if msg == "", error has already been printed */ | |
3087 | if (msg[0]) | |
3088 | pr_err("%s\n", msg); | |
3089 | goto release; | |
3090 | } | |
3091 | if (restart && (reshape.level != info->array.level || | |
3092 | reshape.before.layout != info->array.layout || | |
3093 | reshape.before.data_disks + reshape.parity != | |
3094 | info->array.raid_disks - max(0, info->delta_disks))) { | |
3095 | pr_err("reshape info is not in native format - cannot continue.\n"); | |
3096 | goto release; | |
3097 | } | |
3098 | ||
3099 | if (st->ss->external && restart && (info->reshape_progress == 0) && | |
3100 | !((sysfs_get_str(info, NULL, "sync_action", | |
3101 | buf, sizeof(buf)) > 0) && | |
3102 | (strncmp(buf, "reshape", 7) == 0))) { | |
3103 | /* When reshape is restarted from '0', very begin of array | |
3104 | * it is possible that for external metadata reshape and array | |
3105 | * configuration doesn't happen. | |
3106 | * Check if md has the same opinion, and reshape is restarted | |
3107 | * from 0. If so, this is regular reshape start after reshape | |
3108 | * switch in metadata to next array only. | |
3109 | */ | |
3110 | if ((verify_reshape_position(info, reshape.level) >= 0) && | |
3111 | (info->reshape_progress == 0)) | |
3112 | restart = 0; | |
3113 | } | |
3114 | if (restart) { | |
3115 | /* | |
3116 | * reshape already started. just skip to monitoring | |
3117 | * the reshape | |
3118 | */ | |
3119 | if (reshape.backup_blocks == 0) | |
3120 | return 0; | |
3121 | if (restart & RESHAPE_NO_BACKUP) | |
3122 | return 0; | |
3123 | ||
3124 | /* Need 'sra' down at 'started:' */ | |
3125 | sra = sysfs_read(fd, NULL, | |
3126 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE| | |
3127 | GET_CHUNK|GET_CACHE); | |
3128 | if (!sra) { | |
3129 | pr_err("%s: Cannot get array details from sysfs\n", | |
3130 | devname); | |
3131 | goto release; | |
3132 | } | |
3133 | ||
3134 | if (!backup_file) { | |
3135 | backup_file = locate_backup(sra->sys_name); | |
3136 | located_backup = true; | |
3137 | } | |
3138 | ||
3139 | goto started; | |
3140 | } | |
3141 | /* The container is frozen but the array may not be. | |
3142 | * So freeze the array so spares don't get put to the wrong use | |
3143 | * FIXME there should probably be a cleaner separation between | |
3144 | * freeze_array and freeze_container. | |
3145 | */ | |
3146 | sysfs_freeze_array(info); | |
3147 | /* Check we have enough spares to not be degraded */ | |
3148 | added_disks = 0; | |
3149 | for (dv = devlist; dv ; dv=dv->next) | |
3150 | added_disks++; | |
3151 | spares_needed = max(reshape.before.data_disks, | |
3152 | reshape.after.data_disks) + | |
3153 | reshape.parity - array.raid_disks; | |
3154 | ||
3155 | if (!force && info->new_level > 1 && info->array.level > 1 && | |
3156 | spares_needed > info->array.spare_disks + added_disks) { | |
3157 | pr_err("Need %d spare%s to avoid degraded array, and only have %d.\n" | |
3158 | " Use --force to over-ride this check.\n", | |
3159 | spares_needed, | |
3160 | spares_needed == 1 ? "" : "s", | |
3161 | info->array.spare_disks + added_disks); | |
3162 | goto release; | |
3163 | } | |
3164 | /* Check we have enough spares to not fail */ | |
3165 | spares_needed = max(reshape.before.data_disks, | |
3166 | reshape.after.data_disks) | |
3167 | - array.raid_disks; | |
3168 | if ((info->new_level > 1 || info->new_level == 0) && | |
3169 | spares_needed > info->array.spare_disks +added_disks) { | |
3170 | pr_err("Need %d spare%s to create working array, and only have %d.\n", | |
3171 | spares_needed, spares_needed == 1 ? "" : "s", | |
3172 | info->array.spare_disks + added_disks); | |
3173 | goto release; | |
3174 | } | |
3175 | ||
3176 | if (reshape.level != array.level) { | |
3177 | int err = impose_level(fd, reshape.level, devname, verbose); | |
3178 | if (err) | |
3179 | goto release; | |
3180 | info->new_layout = UnSet; /* after level change, | |
3181 | * layout is meaningless */ | |
3182 | orig_level = array.level; | |
3183 | sysfs_freeze_array(info); | |
3184 | ||
3185 | if (reshape.level > 0 && st->ss->external) { | |
3186 | /* make sure mdmon is aware of the new level */ | |
3187 | if (mdmon_running(container)) | |
3188 | flush_mdmon(container); | |
3189 | ||
3190 | if (!mdmon_running(container)) | |
3191 | start_mdmon(container); | |
3192 | ping_monitor(container); | |
3193 | if (wait_for_mdmon(container) == MDADM_STATUS_SUCCESS && | |
3194 | !st->update_tail) | |
3195 | st->update_tail = &st->updates; | |
3196 | } | |
3197 | } | |
3198 | /* ->reshape_super might have chosen some spares from the | |
3199 | * container that it wants to be part of the new array. | |
3200 | * We can collect them with ->container_content and give | |
3201 | * them to the kernel. | |
3202 | */ | |
3203 | if (st->ss->reshape_super && st->ss->container_content) { | |
3204 | char *subarray = strchr(info->text_version+1, '/')+1; | |
3205 | struct mdinfo *info2 = | |
3206 | st->ss->container_content(st, subarray); | |
3207 | struct mdinfo *d; | |
3208 | ||
3209 | if (info2) { | |
3210 | if (sysfs_init(info2, fd, st->devnm)) { | |
3211 | pr_err("unable to initialize sysfs for %s\n", | |
3212 | st->devnm); | |
3213 | free(info2); | |
3214 | goto release; | |
3215 | } | |
3216 | /* When increasing number of devices, we need to set | |
3217 | * new raid_disks before adding these, or they might | |
3218 | * be rejected. | |
3219 | */ | |
3220 | if (reshape.backup_blocks && | |
3221 | reshape.after.data_disks > | |
3222 | reshape.before.data_disks) | |
3223 | subarray_set_num(container, info2, "raid_disks", | |
3224 | reshape.after.data_disks + | |
3225 | reshape.parity); | |
3226 | for (d = info2->devs; d; d = d->next) { | |
3227 | if (d->disk.state == 0 && | |
3228 | d->disk.raid_disk >= 0) { | |
3229 | /* This is a spare that wants to | |
3230 | * be part of the array. | |
3231 | */ | |
3232 | if (add_disk(fd, st, info2, d) < 0) { | |
3233 | pr_err("Can not add disk %s\n", | |
3234 | d->sys_name); | |
3235 | free(info2); | |
3236 | goto release; | |
3237 | } | |
3238 | } | |
3239 | } | |
3240 | sysfs_free(info2); | |
3241 | } | |
3242 | } | |
3243 | /* We might have been given some devices to add to the | |
3244 | * array. Now that the array has been changed to the right | |
3245 | * level and frozen, we can safely add them. | |
3246 | */ | |
3247 | if (devlist) { | |
3248 | if (Manage_subdevs(devname, fd, devlist, verbose, 0, UOPT_UNDEFINED, 0)) | |
3249 | goto release; | |
3250 | } | |
3251 | ||
3252 | if (reshape.backup_blocks == 0 && data_offset != INVALID_SECTORS) | |
3253 | reshape.backup_blocks = reshape.before.data_disks * info->array.chunk_size/512; | |
3254 | if (reshape.backup_blocks == 0) { | |
3255 | /* No restriping needed, but we might need to impose | |
3256 | * some more changes: layout, raid_disks, chunk_size | |
3257 | */ | |
3258 | /* read current array info */ | |
3259 | if (md_get_array_info(fd, &array) != 0) { | |
3260 | dprintf("Cannot get array information.\n"); | |
3261 | goto release; | |
3262 | } | |
3263 | /* compare current array info with new values and if | |
3264 | * it is different update them to new */ | |
3265 | if (info->new_layout != UnSet && | |
3266 | info->new_layout != array.layout) { | |
3267 | array.layout = info->new_layout; | |
3268 | if (md_set_array_info(fd, &array) != 0) { | |
3269 | pr_err("failed to set new layout\n"); | |
3270 | goto release; | |
3271 | } else if (verbose >= 0) | |
3272 | printf("layout for %s set to %d\n", | |
3273 | devname, array.layout); | |
3274 | } | |
3275 | if (info->delta_disks != UnSet && info->delta_disks != 0 && | |
3276 | array.raid_disks != | |
3277 | (info->array.raid_disks + info->delta_disks)) { | |
3278 | array.raid_disks += info->delta_disks; | |
3279 | if (md_set_array_info(fd, &array) != 0) { | |
3280 | pr_err("failed to set raid disks\n"); | |
3281 | goto release; | |
3282 | } else if (verbose >= 0) { | |
3283 | printf("raid_disks for %s set to %d\n", | |
3284 | devname, array.raid_disks); | |
3285 | } | |
3286 | } | |
3287 | if (info->new_chunk != 0 && | |
3288 | info->new_chunk != array.chunk_size) { | |
3289 | if (sysfs_set_num(info, NULL, | |
3290 | "chunk_size", info->new_chunk) != 0) { | |
3291 | pr_err("failed to set chunk size\n"); | |
3292 | goto release; | |
3293 | } else if (verbose >= 0) | |
3294 | printf("chunk size for %s set to %d\n", | |
3295 | devname, info->new_chunk); | |
3296 | } | |
3297 | unfreeze(st); | |
3298 | return 0; | |
3299 | } | |
3300 | ||
3301 | /* | |
3302 | * There are three possibilities. | |
3303 | * 1/ The array will shrink. | |
3304 | * We need to ensure the reshape will pause before reaching | |
3305 | * the 'critical section'. We also need to fork and wait for | |
3306 | * that to happen. When it does we | |
3307 | * suspend/backup/complete/unfreeze | |
3308 | * | |
3309 | * 2/ The array will not change size. | |
3310 | * This requires that we keep a backup of a sliding window | |
3311 | * so that we can restore data after a crash. So we need | |
3312 | * to fork and monitor progress. | |
3313 | * In future we will allow the data_offset to change, so | |
3314 | * a sliding backup becomes unnecessary. | |
3315 | * | |
3316 | * 3/ The array will grow. This is relatively easy. | |
3317 | * However the kernel's restripe routines will cheerfully | |
3318 | * overwrite some early data before it is safe. So we | |
3319 | * need to make a backup of the early parts of the array | |
3320 | * and be ready to restore it if rebuild aborts very early. | |
3321 | * For externally managed metadata, we still need a forked | |
3322 | * child to monitor the reshape and suspend IO over the region | |
3323 | * that is being reshaped. | |
3324 | * | |
3325 | * We backup data by writing it to one spare, or to a | |
3326 | * file which was given on command line. | |
3327 | * | |
3328 | * In each case, we first make sure that storage is available | |
3329 | * for the required backup. | |
3330 | * Then we: | |
3331 | * - request the shape change. | |
3332 | * - fork to handle backup etc. | |
3333 | */ | |
3334 | /* Check that we can hold all the data */ | |
3335 | get_dev_size(fd, NULL, &array_size); | |
3336 | if (reshape.new_size < (array_size/512)) { | |
3337 | pr_err("this change will reduce the size of the array.\n" | |
3338 | " use --grow --array-size first to truncate array.\n" | |
3339 | " e.g. mdadm --grow %s --array-size %llu\n", | |
3340 | devname, reshape.new_size/2); | |
3341 | goto release; | |
3342 | } | |
3343 | ||
3344 | if (array.level == 10) { | |
3345 | /* Reshaping RAID10 does not require any data backup by | |
3346 | * user-space. Instead it requires that the data_offset | |
3347 | * is changed to avoid the need for backup. | |
3348 | * So this is handled very separately | |
3349 | */ | |
3350 | if (restart) | |
3351 | /* Nothing to do. */ | |
3352 | return 0; | |
3353 | return raid10_reshape(container, fd, devname, st, info, | |
3354 | &reshape, data_offset, force, verbose); | |
3355 | } | |
3356 | sra = sysfs_read(fd, NULL, | |
3357 | GET_COMPONENT|GET_DEVS|GET_OFFSET|GET_STATE|GET_CHUNK| | |
3358 | GET_CACHE); | |
3359 | if (!sra) { | |
3360 | pr_err("%s: Cannot get array details from sysfs\n", | |
3361 | devname); | |
3362 | goto release; | |
3363 | } | |
3364 | ||
3365 | if (!backup_file) | |
3366 | switch(set_new_data_offset(sra, st, devname, | |
3367 | reshape.after.data_disks - reshape.before.data_disks, | |
3368 | data_offset, | |
3369 | reshape.min_offset_change, 1)) { | |
3370 | case -1: | |
3371 | goto release; | |
3372 | case 0: | |
3373 | /* Updated data_offset, so it's easy now */ | |
3374 | update_cache_size(container, sra, info, | |
3375 | min(reshape.before.data_disks, | |
3376 | reshape.after.data_disks), | |
3377 | reshape.backup_blocks); | |
3378 | ||
3379 | /* Right, everything seems fine. Let's kick things off. | |
3380 | */ | |
3381 | sync_metadata(st); | |
3382 | ||
3383 | if (impose_reshape(sra, info, st, fd, restart, | |
3384 | devname, container, &reshape) < 0) | |
3385 | goto release; | |
3386 | if (sysfs_set_str(sra, NULL, "sync_action", "reshape") < 0) { | |
3387 | struct mdinfo *sd; | |
3388 | if (errno != EINVAL) { | |
3389 | pr_err("Failed to initiate reshape!\n"); | |
3390 | goto release; | |
3391 | } | |
3392 | /* revert data_offset and try the old way */ | |
3393 | for (sd = sra->devs; sd; sd = sd->next) { | |
3394 | sysfs_set_num(sra, sd, "new_offset", | |
3395 | sd->data_offset); | |
3396 | sysfs_set_str(sra, NULL, "reshape_direction", | |
3397 | "forwards"); | |
3398 | } | |
3399 | break; | |
3400 | } | |
3401 | if (info->new_level == reshape.level) | |
3402 | return 0; | |
3403 | /* need to adjust level when reshape completes */ | |
3404 | switch(fork()) { | |
3405 | case -1: /* ignore error, but don't wait */ | |
3406 | return 0; | |
3407 | default: /* parent */ | |
3408 | return 0; | |
3409 | case 0: | |
3410 | manage_fork_fds(0); | |
3411 | map_fork(); | |
3412 | break; | |
3413 | } | |
3414 | close(fd); | |
3415 | wait_reshape(sra); | |
3416 | fd = open_dev(sra->sys_name); | |
3417 | if (fd >= 0) | |
3418 | impose_level(fd, info->new_level, devname, verbose); | |
3419 | return 0; | |
3420 | case 1: /* Couldn't set data_offset, try the old way */ | |
3421 | if (data_offset != INVALID_SECTORS) { | |
3422 | pr_err("Cannot update data_offset on this array\n"); | |
3423 | goto release; | |
3424 | } | |
3425 | break; | |
3426 | } | |
3427 | ||
3428 | started: | |
3429 | /* Decide how many blocks (sectors) for a reshape | |
3430 | * unit. The number we have so far is just a minimum | |
3431 | */ | |
3432 | blocks = reshape.backup_blocks; | |
3433 | if (reshape.before.data_disks == | |
3434 | reshape.after.data_disks) { | |
3435 | /* Make 'blocks' bigger for better throughput, but | |
3436 | * not so big that we reject it below. | |
3437 | * Try for 16 megabytes | |
3438 | */ | |
3439 | while (blocks * 32 < sra->component_size && blocks < 16*1024*2) | |
3440 | blocks *= 2; | |
3441 | } else | |
3442 | pr_err("Need to backup %luK of critical section..\n", blocks/2); | |
3443 | ||
3444 | if (blocks >= sra->component_size/2) { | |
3445 | pr_err("%s: Something wrong - reshape aborted\n", devname); | |
3446 | goto release; | |
3447 | } | |
3448 | ||
3449 | /* Now we need to open all these devices so we can read/write. | |
3450 | */ | |
3451 | nrdisks = max(reshape.before.data_disks, | |
3452 | reshape.after.data_disks) + reshape.parity | |
3453 | + sra->array.spare_disks; | |
3454 | fdlist = xcalloc((1+nrdisks), sizeof(int)); | |
3455 | offsets = xcalloc((1+nrdisks), sizeof(offsets[0])); | |
3456 | ||
3457 | odisks = reshape.before.data_disks + reshape.parity; | |
3458 | d = reshape_prepare_fdlist(devname, sra, odisks, nrdisks, blocks, | |
3459 | backup_file, fdlist, offsets); | |
3460 | if (d < odisks) { | |
3461 | goto release; | |
3462 | } | |
3463 | if ((st->ss->manage_reshape == NULL) || | |
3464 | (st->ss->recover_backup == NULL)) { | |
3465 | if (backup_file == NULL) { | |
3466 | if (reshape.after.data_disks <= | |
3467 | reshape.before.data_disks) { | |
3468 | pr_err("%s: Cannot grow - need backup-file\n", | |
3469 | devname); | |
3470 | pr_err(" Please provide one with \"--backup=...\"\n"); | |
3471 | goto release; | |
3472 | } else if (d == odisks) { | |
3473 | pr_err("%s: Cannot grow - need a spare or backup-file to backup critical section\n", devname); | |
3474 | goto release; | |
3475 | } | |
3476 | } else { | |
3477 | if (!reshape_open_backup_file(backup_file, fd, devname, | |
3478 | (signed)blocks, | |
3479 | fdlist+d, offsets+d, | |
3480 | sra->sys_name, restart)) { | |
3481 | goto release; | |
3482 | } | |
3483 | d++; | |
3484 | } | |
3485 | } | |
3486 | ||
3487 | update_cache_size(container, sra, info, | |
3488 | min(reshape.before.data_disks, | |
3489 | reshape.after.data_disks), blocks); | |
3490 | ||
3491 | /* Right, everything seems fine. Let's kick things off. | |
3492 | * If only changing raid_disks, use ioctl, else use | |
3493 | * sysfs. | |
3494 | */ | |
3495 | sync_metadata(st); | |
3496 | ||
3497 | if (impose_reshape(sra, info, st, fd, restart, | |
3498 | devname, container, &reshape) < 0) | |
3499 | goto release; | |
3500 | ||
3501 | err = start_reshape(sra, restart, reshape.before.data_disks, | |
3502 | reshape.after.data_disks, st); | |
3503 | if (err) { | |
3504 | pr_err("Cannot %s reshape for %s\n", | |
3505 | restart ? "continue" : "start", devname); | |
3506 | goto release; | |
3507 | } | |
3508 | if (restart) | |
3509 | sysfs_set_str(sra, NULL, "array_state", "active"); | |
3510 | ||
3511 | /* Do not run in initrd */ | |
3512 | if (in_initrd()) { | |
3513 | free(fdlist); | |
3514 | free(offsets); | |
3515 | sysfs_free(sra); | |
3516 | pr_info("Reshape has to be continued from location %llu when root filesystem has been mounted.\n", | |
3517 | sra->reshape_progress); | |
3518 | return 1; | |
3519 | } | |
3520 | ||
3521 | /* Now we just need to kick off the reshape and watch, while | |
3522 | * handling backups of the data... | |
3523 | * This is all done by a forked background process. | |
3524 | */ | |
3525 | switch (handle_forking(forked, container ? container : sra->sys_name)) { | |
3526 | default: /* Unused, only to satisfy compiler. */ | |
3527 | case MDADM_STATUS_ERROR: /* error */ | |
3528 | pr_err("Cannot run child to monitor reshape: %s\n", | |
3529 | strerror(errno)); | |
3530 | abort_reshape(sra); | |
3531 | goto release; | |
3532 | case MDADM_STATUS_FORKED: /* child */ | |
3533 | map_fork(); | |
3534 | break; | |
3535 | case MDADM_STATUS_SUCCESS: /* parent */ | |
3536 | free(fdlist); | |
3537 | free(offsets); | |
3538 | sysfs_free(sra); | |
3539 | return 0; | |
3540 | } | |
3541 | ||
3542 | /* Close unused file descriptor in the forked process */ | |
3543 | close_fd(&fd); | |
3544 | ||
3545 | /* If another array on the same devices is busy, the | |
3546 | * reshape will wait for them. This would mean that | |
3547 | * the first section that we suspend will stay suspended | |
3548 | * for a long time. So check on that possibility | |
3549 | * by looking for "DELAYED" in /proc/mdstat, and if found, | |
3550 | * wait a while | |
3551 | */ | |
3552 | do { | |
3553 | struct mdstat_ent *mds, *m; | |
3554 | delayed = 0; | |
3555 | mds = mdstat_read(1, 0); | |
3556 | for (m = mds; m; m = m->next) | |
3557 | if (strcmp(m->devnm, sra->sys_name) == 0) { | |
3558 | if (m->resync && m->percent == RESYNC_DELAYED) | |
3559 | delayed = 1; | |
3560 | if (m->resync == 0) | |
3561 | /* Haven't started the reshape thread | |
3562 | * yet, wait a bit | |
3563 | */ | |
3564 | delayed = 2; | |
3565 | break; | |
3566 | } | |
3567 | free_mdstat(mds); | |
3568 | if (delayed == 1 && get_linux_version() < 3007000) { | |
3569 | pr_err("Reshape is delayed, but cannot wait carefully with this kernel.\n" | |
3570 | " You might experience problems until other reshapes complete.\n"); | |
3571 | delayed = 0; | |
3572 | } | |
3573 | if (delayed) | |
3574 | mdstat_wait(30 - (delayed-1) * 25); | |
3575 | } while (delayed); | |
3576 | mdstat_close(); | |
3577 | mlockall(MCL_FUTURE); | |
3578 | ||
3579 | if (signal_s(SIGTERM, catch_term) == SIG_ERR) | |
3580 | goto release; | |
3581 | ||
3582 | if (check_env("MDADM_GROW_VERIFY")) | |
3583 | fd = open(devname, O_RDONLY | O_DIRECT); | |
3584 | if (st->ss->external) { | |
3585 | /* metadata handler takes it from here */ | |
3586 | done = st->ss->manage_reshape( | |
3587 | fd, sra, &reshape, st, blocks, | |
3588 | fdlist, offsets, d - odisks, fdlist + odisks, | |
3589 | offsets + odisks); | |
3590 | } else | |
3591 | done = child_monitor( | |
3592 | fd, sra, &reshape, st, blocks, fdlist, offsets, | |
3593 | d - odisks, fdlist + odisks, offsets + odisks); | |
3594 | ||
3595 | close_fd(&fd); | |
3596 | free(fdlist); | |
3597 | free(offsets); | |
3598 | ||
3599 | if (backup_file && done) { | |
3600 | char *bul; | |
3601 | bul = make_backup(sra->sys_name); | |
3602 | if (bul) { | |
3603 | char buf[1024]; | |
3604 | int l = readlink(bul, buf, sizeof(buf) - 1); | |
3605 | if (l > 0) { | |
3606 | buf[l]=0; | |
3607 | unlink(buf); | |
3608 | } | |
3609 | unlink(bul); | |
3610 | free(bul); | |
3611 | } | |
3612 | unlink(backup_file); | |
3613 | } | |
3614 | if (!done) { | |
3615 | abort_reshape(sra); | |
3616 | goto out; | |
3617 | } | |
3618 | ||
3619 | if (!st->ss->external && | |
3620 | !(reshape.before.data_disks != reshape.after.data_disks && | |
3621 | info->custom_array_size) && info->new_level == reshape.level && | |
3622 | !forked) { | |
3623 | /* no need to wait for the reshape to finish as | |
3624 | * there is nothing more to do. | |
3625 | */ | |
3626 | sysfs_free(sra); | |
3627 | exit(0); | |
3628 | } | |
3629 | wait_reshape(sra); | |
3630 | ||
3631 | if (st->ss->external) { | |
3632 | /* Re-load the metadata as much could have changed */ | |
3633 | int cfd = open_dev(st->container_devnm); | |
3634 | if (cfd >= 0) { | |
3635 | flush_mdmon(container); | |
3636 | st->ss->free_super(st); | |
3637 | st->ss->load_container(st, cfd, container); | |
3638 | close(cfd); | |
3639 | } | |
3640 | } | |
3641 | ||
3642 | /* set new array size if required customer_array_size is used | |
3643 | * by this metadata. | |
3644 | */ | |
3645 | if (reshape.before.data_disks != reshape.after.data_disks && | |
3646 | info->custom_array_size) | |
3647 | set_array_size(st, info, info->text_version); | |
3648 | ||
3649 | if (info->new_level != reshape.level) { | |
3650 | fd = open_dev(sra->sys_name); | |
3651 | if (fd < 0) { | |
3652 | pr_err("Can't open %s\n", sra->sys_name); | |
3653 | goto out; | |
3654 | } | |
3655 | impose_level(fd, info->new_level, sra->sys_name, verbose); | |
3656 | close(fd); | |
3657 | if (info->new_level == 0) | |
3658 | st->update_tail = NULL; | |
3659 | } | |
3660 | out: | |
3661 | sysfs_free(sra); | |
3662 | if (forked) | |
3663 | return 0; | |
3664 | unfreeze(st); | |
3665 | exit(0); | |
3666 | ||
3667 | release: | |
3668 | if (located_backup) | |
3669 | free(backup_file); | |
3670 | free(fdlist); | |
3671 | free(offsets); | |
3672 | if (orig_level != UnSet && sra) { | |
3673 | c = map_num(pers, orig_level); | |
3674 | if (c && sysfs_set_str(sra, NULL, "level", c) == 0) | |
3675 | pr_err("aborting level change\n"); | |
3676 | } | |
3677 | sysfs_free(sra); | |
3678 | if (!forked) | |
3679 | unfreeze(st); | |
3680 | return 1; | |
3681 | } | |
3682 | ||
3683 | /* mdfd handle is passed to be closed in child process (after fork). | |
3684 | */ | |
3685 | int reshape_container(char *container, char *devname, | |
3686 | int mdfd, | |
3687 | struct supertype *st, | |
3688 | struct mdinfo *info, | |
3689 | struct context *c, | |
3690 | int forked, int restart) | |
3691 | { | |
3692 | struct mdinfo *cc = NULL; | |
3693 | int rv = restart; | |
3694 | char last_devnm[32] = ""; | |
3695 | ||
3696 | /* component_size is not meaningful for a container */ | |
3697 | if (!restart && reshape_super_non_size(st, devname, info, c)) { | |
3698 | unfreeze(st); | |
3699 | return 1; | |
3700 | } | |
3701 | ||
3702 | sync_metadata(st); | |
3703 | ||
3704 | /* ping monitor to be sure that update is on disk | |
3705 | */ | |
3706 | ping_monitor(container); | |
3707 | ||
3708 | switch (handle_forking(forked, container)) { | |
3709 | default: /* Unused, only to satisfy compiler. */ | |
3710 | case MDADM_STATUS_ERROR: /* error */ | |
3711 | perror("Cannot fork to complete reshape\n"); | |
3712 | unfreeze(st); | |
3713 | return 1; | |
3714 | case MDADM_STATUS_FORKED: /* child */ | |
3715 | manage_fork_fds(0); | |
3716 | map_fork(); | |
3717 | break; | |
3718 | case MDADM_STATUS_SUCCESS: /* parent */ | |
3719 | printf("%s: multi-array reshape continues in background\n", Name); | |
3720 | return 0; | |
3721 | } | |
3722 | ||
3723 | /* close unused handle in child process | |
3724 | */ | |
3725 | if (mdfd > -1) | |
3726 | close(mdfd); | |
3727 | ||
3728 | while(1) { | |
3729 | /* For each member array with reshape_active, | |
3730 | * we need to perform the reshape. | |
3731 | * We pick the first array that needs reshaping and | |
3732 | * reshape it. reshape_array() will re-read the metadata | |
3733 | * so the next time through a different array should be | |
3734 | * ready for reshape. | |
3735 | * It is possible that the 'different' array will not | |
3736 | * be assembled yet. In that case we simple exit. | |
3737 | * When it is assembled, the mdadm which assembles it | |
3738 | * will take over the reshape. | |
3739 | */ | |
3740 | struct mdinfo *content; | |
3741 | int fd; | |
3742 | struct mdstat_ent *mdstat; | |
3743 | char *adev; | |
3744 | dev_t devid; | |
3745 | ||
3746 | sysfs_free(cc); | |
3747 | ||
3748 | cc = st->ss->container_content(st, NULL); | |
3749 | ||
3750 | for (content = cc; content ; content = content->next) { | |
3751 | char *subarray; | |
3752 | if (!content->reshape_active) | |
3753 | continue; | |
3754 | ||
3755 | subarray = strchr(content->text_version+1, '/')+1; | |
3756 | mdstat = mdstat_by_subdev(subarray, container); | |
3757 | if (!mdstat) | |
3758 | continue; | |
3759 | if (mdstat->active == 0) { | |
3760 | pr_err("Skipping inactive array %s.\n", | |
3761 | mdstat->devnm); | |
3762 | free_mdstat(mdstat); | |
3763 | mdstat = NULL; | |
3764 | continue; | |
3765 | } | |
3766 | break; | |
3767 | } | |
3768 | if (!content) | |
3769 | break; | |
3770 | ||
3771 | devid = devnm2devid(mdstat->devnm); | |
3772 | adev = map_dev(major(devid), minor(devid), 0); | |
3773 | if (!adev) | |
3774 | adev = content->text_version; | |
3775 | ||
3776 | fd = open_dev(mdstat->devnm); | |
3777 | if (fd < 0) { | |
3778 | pr_err("Device %s cannot be opened for reshape.\n", | |
3779 | adev); | |
3780 | break; | |
3781 | } | |
3782 | ||
3783 | if (strcmp(last_devnm, mdstat->devnm) == 0) { | |
3784 | /* Do not allow for multiple reshape_array() calls for | |
3785 | * the same array. | |
3786 | * It can happen when reshape_array() returns without | |
3787 | * error, when reshape is not finished (wrong reshape | |
3788 | * starting/continuation conditions). Mdmon doesn't | |
3789 | * switch to next array in container and reentry | |
3790 | * conditions for the same array occur. | |
3791 | * This is possibly interim until the behaviour of | |
3792 | * reshape_array is resolved(). | |
3793 | */ | |
3794 | printf("%s: Multiple reshape execution detected for device %s.\n", Name, adev); | |
3795 | close(fd); | |
3796 | break; | |
3797 | } | |
3798 | strcpy(last_devnm, mdstat->devnm); | |
3799 | ||
3800 | if (sysfs_init(content, fd, mdstat->devnm)) { | |
3801 | pr_err("Unable to initialize sysfs for %s\n", | |
3802 | mdstat->devnm); | |
3803 | rv = 1; | |
3804 | close_fd(&fd); | |
3805 | break; | |
3806 | } | |
3807 | ||
3808 | if (mdmon_running(container)) | |
3809 | flush_mdmon(container); | |
3810 | ||
3811 | rv = reshape_array(container, fd, adev, st, | |
3812 | content, c->force, NULL, INVALID_SECTORS, | |
3813 | c->backup_file, c->verbose, 1, restart); | |
3814 | close(fd); | |
3815 | ||
3816 | /* Do not run reshape in initrd but let it initialize.*/ | |
3817 | if (in_initrd()) { | |
3818 | sysfs_free(cc); | |
3819 | exit(0); | |
3820 | } | |
3821 | ||
3822 | restart = 0; | |
3823 | if (rv) | |
3824 | break; | |
3825 | ||
3826 | if (mdmon_running(container)) | |
3827 | flush_mdmon(container); | |
3828 | } | |
3829 | if (!rv) | |
3830 | unfreeze(st); | |
3831 | sysfs_free(cc); | |
3832 | exit(0); | |
3833 | } | |
3834 | ||
3835 | /* | |
3836 | * We run a child process in the background which performs the following | |
3837 | * steps: | |
3838 | * - wait for resync to reach a certain point | |
3839 | * - suspend io to the following section | |
3840 | * - backup that section | |
3841 | * - allow resync to proceed further | |
3842 | * - resume io | |
3843 | * - discard the backup. | |
3844 | * | |
3845 | * When are combined in slightly different ways in the three cases. | |
3846 | * Grow: | |
3847 | * - suspend/backup/allow/wait/resume/discard | |
3848 | * Shrink: | |
3849 | * - allow/wait/suspend/backup/allow/wait/resume/discard | |
3850 | * same-size: | |
3851 | * - wait/resume/discard/suspend/backup/allow | |
3852 | * | |
3853 | * suspend/backup/allow always come together | |
3854 | * wait/resume/discard do too. | |
3855 | * For the same-size case we have two backups to improve flow. | |
3856 | * | |
3857 | */ | |
3858 | ||
3859 | int progress_reshape(struct mdinfo *info, struct reshape *reshape, | |
3860 | unsigned long long backup_point, | |
3861 | unsigned long long wait_point, | |
3862 | unsigned long long *suspend_point, | |
3863 | unsigned long long *reshape_completed, int *frozen) | |
3864 | { | |
3865 | /* This function is called repeatedly by the reshape manager. | |
3866 | * It determines how much progress can safely be made and allows | |
3867 | * that progress. | |
3868 | * - 'info' identifies the array and particularly records in | |
3869 | * ->reshape_progress the metadata's knowledge of progress | |
3870 | * This is a sector offset from the start of the array | |
3871 | * of the next array block to be relocated. This number | |
3872 | * may increase from 0 or decrease from array_size, depending | |
3873 | * on the type of reshape that is happening. | |
3874 | * Note that in contrast, 'sync_completed' is a block count of the | |
3875 | * reshape so far. It gives the distance between the start point | |
3876 | * (head or tail of device) and the next place that data will be | |
3877 | * written. It always increases. | |
3878 | * - 'reshape' is the structure created by analyse_change | |
3879 | * - 'backup_point' shows how much the metadata manager has backed-up | |
3880 | * data. For reshapes with increasing progress, it is the next address | |
3881 | * to be backed up, previous addresses have been backed-up. For | |
3882 | * decreasing progress, it is the earliest address that has been | |
3883 | * backed up - later address are also backed up. | |
3884 | * So addresses between reshape_progress and backup_point are | |
3885 | * backed up providing those are in the 'correct' order. | |
3886 | * - 'wait_point' is an array address. When reshape_completed | |
3887 | * passes this point, progress_reshape should return. It might | |
3888 | * return earlier if it determines that ->reshape_progress needs | |
3889 | * to be updated or further backup is needed. | |
3890 | * - suspend_point is maintained by progress_reshape and the caller | |
3891 | * should not touch it except to initialise to zero. | |
3892 | * It is an array address and it only increases in 2.6.37 and earlier. | |
3893 | * This makes it difficult to handle reducing reshapes with | |
3894 | * external metadata. | |
3895 | * However: it is similar to backup_point in that it records the | |
3896 | * other end of a suspended region from reshape_progress. | |
3897 | * it is moved to extend the region that is safe to backup and/or | |
3898 | * reshape | |
3899 | * - reshape_completed is read from sysfs and returned. The caller | |
3900 | * should copy this into ->reshape_progress when it has reason to | |
3901 | * believe that the metadata knows this, and any backup outside this | |
3902 | * has been erased. | |
3903 | * | |
3904 | * Return value is: | |
3905 | * 1 if more data from backup_point - but only as far as suspend_point, | |
3906 | * should be backed up | |
3907 | * 0 if things are progressing smoothly | |
3908 | * -1 if the reshape is finished because it is all done, | |
3909 | * -2 if the reshape is finished due to an error. | |
3910 | */ | |
3911 | ||
3912 | int advancing = (reshape->after.data_disks | |
3913 | >= reshape->before.data_disks); | |
3914 | unsigned long long need_backup; /* All data between start of array and | |
3915 | * here will at some point need to | |
3916 | * be backed up. | |
3917 | */ | |
3918 | unsigned long long read_offset, write_offset; | |
3919 | unsigned long long write_range; | |
3920 | unsigned long long max_progress, target, completed; | |
3921 | unsigned long long array_size = (info->component_size | |
3922 | * reshape->before.data_disks); | |
3923 | int fd; | |
3924 | char buf[SYSFS_MAX_BUF_SIZE]; | |
3925 | ||
3926 | /* First, we unsuspend any region that is now known to be safe. | |
3927 | * If suspend_point is on the 'wrong' side of reshape_progress, then | |
3928 | * we don't have or need suspension at the moment. This is true for | |
3929 | * native metadata when we don't need to back-up. | |
3930 | */ | |
3931 | if (advancing) { | |
3932 | if (info->reshape_progress <= *suspend_point) | |
3933 | sysfs_set_num(info, NULL, "suspend_lo", | |
3934 | info->reshape_progress); | |
3935 | } else { | |
3936 | /* Note: this won't work in 2.6.37 and before. | |
3937 | * Something somewhere should make sure we don't need it! | |
3938 | */ | |
3939 | if (info->reshape_progress >= *suspend_point) | |
3940 | sysfs_set_num(info, NULL, "suspend_hi", | |
3941 | info->reshape_progress); | |
3942 | } | |
3943 | ||
3944 | /* Now work out how far it is safe to progress. | |
3945 | * If the read_offset for ->reshape_progress is less than | |
3946 | * 'blocks' beyond the write_offset, we can only progress as far | |
3947 | * as a backup. | |
3948 | * Otherwise we can progress until the write_offset for the new location | |
3949 | * reaches (within 'blocks' of) the read_offset at the current location. | |
3950 | * However that region must be suspended unless we are using native | |
3951 | * metadata. | |
3952 | * If we need to suspend more, we limit it to 128M per device, which is | |
3953 | * rather arbitrary and should be some time-based calculation. | |
3954 | */ | |
3955 | read_offset = info->reshape_progress / reshape->before.data_disks; | |
3956 | write_offset = info->reshape_progress / reshape->after.data_disks; | |
3957 | write_range = info->new_chunk/512; | |
3958 | if (reshape->before.data_disks == reshape->after.data_disks) | |
3959 | need_backup = array_size; | |
3960 | else | |
3961 | need_backup = reshape->backup_blocks; | |
3962 | if (advancing) { | |
3963 | if (read_offset < write_offset + write_range) | |
3964 | max_progress = backup_point; | |
3965 | else | |
3966 | max_progress = | |
3967 | read_offset * reshape->after.data_disks; | |
3968 | } else { | |
3969 | if (read_offset > write_offset - write_range) | |
3970 | /* Can only progress as far as has been backed up, | |
3971 | * which must be suspended */ | |
3972 | max_progress = backup_point; | |
3973 | else if (info->reshape_progress <= need_backup) | |
3974 | max_progress = backup_point; | |
3975 | else { | |
3976 | if (info->array.major_version >= 0) | |
3977 | /* Can progress until backup is needed */ | |
3978 | max_progress = need_backup; | |
3979 | else { | |
3980 | /* Can progress until metadata update is required */ | |
3981 | max_progress = | |
3982 | read_offset * reshape->after.data_disks; | |
3983 | /* but data must be suspended */ | |
3984 | if (max_progress < *suspend_point) | |
3985 | max_progress = *suspend_point; | |
3986 | } | |
3987 | } | |
3988 | } | |
3989 | ||
3990 | /* We know it is safe to progress to 'max_progress' providing | |
3991 | * it is suspended or we are using native metadata. | |
3992 | * Consider extending suspend_point 128M per device if it | |
3993 | * is less than 64M per device beyond reshape_progress. | |
3994 | * But always do a multiple of 'blocks' | |
3995 | * FIXME this is too big - it takes to long to complete | |
3996 | * this much. | |
3997 | */ | |
3998 | target = 64*1024*2 * min(reshape->before.data_disks, | |
3999 | reshape->after.data_disks); | |
4000 | target /= reshape->backup_blocks; | |
4001 | if (target < 2) | |
4002 | target = 2; | |
4003 | target *= reshape->backup_blocks; | |
4004 | ||
4005 | /* For externally managed metadata we always need to suspend IO to | |
4006 | * the area being reshaped so we regularly push suspend_point forward. | |
4007 | * For native metadata we only need the suspend if we are going to do | |
4008 | * a backup. | |
4009 | */ | |
4010 | if (advancing) { | |
4011 | if ((need_backup > info->reshape_progress || | |
4012 | info->array.major_version < 0) && | |
4013 | *suspend_point < info->reshape_progress + target) { | |
4014 | if (need_backup < *suspend_point + 2 * target) | |
4015 | *suspend_point = need_backup; | |
4016 | else if (*suspend_point + 2 * target < array_size) | |
4017 | *suspend_point += 2 * target; | |
4018 | else | |
4019 | *suspend_point = array_size; | |
4020 | sysfs_set_num(info, NULL, "suspend_hi", *suspend_point); | |
4021 | if (max_progress > *suspend_point) | |
4022 | max_progress = *suspend_point; | |
4023 | } | |
4024 | } else { | |
4025 | if (info->array.major_version >= 0) { | |
4026 | /* Only need to suspend when about to backup */ | |
4027 | if (info->reshape_progress < need_backup * 2 && | |
4028 | *suspend_point > 0) { | |
4029 | *suspend_point = 0; | |
4030 | sysfs_set_num(info, NULL, "suspend_lo", 0); | |
4031 | sysfs_set_num(info, NULL, "suspend_hi", | |
4032 | need_backup); | |
4033 | } | |
4034 | } else { | |
4035 | /* Need to suspend continually */ | |
4036 | if (info->reshape_progress < *suspend_point) | |
4037 | *suspend_point = info->reshape_progress; | |
4038 | if (*suspend_point + target < info->reshape_progress) | |
4039 | /* No need to move suspend region yet */; | |
4040 | else { | |
4041 | if (*suspend_point >= 2 * target) | |
4042 | *suspend_point -= 2 * target; | |
4043 | else | |
4044 | *suspend_point = 0; | |
4045 | sysfs_set_num(info, NULL, "suspend_lo", | |
4046 | *suspend_point); | |
4047 | } | |
4048 | if (max_progress < *suspend_point) | |
4049 | max_progress = *suspend_point; | |
4050 | } | |
4051 | } | |
4052 | ||
4053 | /* now set sync_max to allow that progress. sync_max, like | |
4054 | * sync_completed is a count of sectors written per device, so | |
4055 | * we find the difference between max_progress and the start point, | |
4056 | * and divide that by after.data_disks to get a sync_max | |
4057 | * number. | |
4058 | * At the same time we convert wait_point to a similar number | |
4059 | * for comparing against sync_completed. | |
4060 | */ | |
4061 | /* scale down max_progress to per_disk */ | |
4062 | max_progress /= reshape->after.data_disks; | |
4063 | /* | |
4064 | * Round to chunk size as some kernels give an erroneously | |
4065 | * high number | |
4066 | */ | |
4067 | max_progress /= info->new_chunk/512; | |
4068 | max_progress *= info->new_chunk/512; | |
4069 | /* And round to old chunk size as the kernel wants that */ | |
4070 | max_progress /= info->array.chunk_size/512; | |
4071 | max_progress *= info->array.chunk_size/512; | |
4072 | /* Limit progress to the whole device */ | |
4073 | if (max_progress > info->component_size) | |
4074 | max_progress = info->component_size; | |
4075 | wait_point /= reshape->after.data_disks; | |
4076 | if (!advancing) { | |
4077 | /* switch from 'device offset' to 'processed block count' */ | |
4078 | max_progress = info->component_size - max_progress; | |
4079 | wait_point = info->component_size - wait_point; | |
4080 | } | |
4081 | ||
4082 | if (!*frozen) | |
4083 | sysfs_set_num(info, NULL, "sync_max", max_progress); | |
4084 | ||
4085 | /* Now wait. If we have already reached the point that we were | |
4086 | * asked to wait to, don't wait at all, else wait for any change. | |
4087 | * We need to select on 'sync_completed' as that is the place that | |
4088 | * notifications happen, but we are really interested in | |
4089 | * 'reshape_position' | |
4090 | */ | |
4091 | fd = sysfs_get_fd(info, NULL, "sync_completed"); | |
4092 | if (fd < 0) | |
4093 | goto check_progress; | |
4094 | ||
4095 | if (sysfs_fd_get_ll(fd, &completed) < 0) | |
4096 | goto check_progress; | |
4097 | ||
4098 | while (completed < max_progress && completed < wait_point) { | |
4099 | /* Check that sync_action is still 'reshape' to avoid | |
4100 | * waiting forever on a dead array | |
4101 | */ | |
4102 | char action[SYSFS_MAX_BUF_SIZE]; | |
4103 | ||
4104 | if (sysfs_get_str(info, NULL, "sync_action", action, sizeof(action)) <= 0) | |
4105 | break; | |
4106 | /* Some kernels reset 'sync_completed' to zero | |
4107 | * before setting 'sync_action' to 'idle'. | |
4108 | * So we need these extra tests. | |
4109 | */ | |
4110 | if (completed == 0 && advancing && | |
4111 | strncmp(action, "idle", 4) == 0 && | |
4112 | info->reshape_progress > 0) { | |
4113 | info->reshape_progress = need_backup; | |
4114 | break; | |
4115 | } | |
4116 | if (completed == 0 && !advancing && | |
4117 | strncmp(action, "idle", 4) == 0 && | |
4118 | info->reshape_progress < | |
4119 | (info->component_size * reshape->after.data_disks)) { | |
4120 | info->reshape_progress = need_backup; | |
4121 | break; | |
4122 | } | |
4123 | if (strncmp(action, "reshape", 7) != 0) | |
4124 | break; | |
4125 | sysfs_wait(fd, NULL); | |
4126 | if (sysfs_fd_get_ll(fd, &completed) < 0) | |
4127 | goto check_progress; | |
4128 | } | |
4129 | /* Some kernels reset 'sync_completed' to zero, | |
4130 | * we need to have real point we are in md. | |
4131 | * So in that case, read 'reshape_position' from sysfs. | |
4132 | */ | |
4133 | if (completed == 0) { | |
4134 | unsigned long long reshapep; | |
4135 | char action[SYSFS_MAX_BUF_SIZE]; | |
4136 | if (sysfs_get_str(info, NULL, "sync_action", action, sizeof(action)) > 0 && | |
4137 | strncmp(action, "idle", 4) == 0 && | |
4138 | sysfs_get_ll(info, NULL, | |
4139 | "reshape_position", &reshapep) == 0) | |
4140 | *reshape_completed = reshapep; | |
4141 | } else { | |
4142 | /* some kernels can give an incorrectly high | |
4143 | * 'completed' number, so round down */ | |
4144 | completed /= (info->new_chunk/512); | |
4145 | completed *= (info->new_chunk/512); | |
4146 | /* Convert 'completed' back in to a 'progress' number */ | |
4147 | completed *= reshape->after.data_disks; | |
4148 | if (!advancing) | |
4149 | completed = (info->component_size | |
4150 | * reshape->after.data_disks | |
4151 | - completed); | |
4152 | *reshape_completed = completed; | |
4153 | } | |
4154 | ||
4155 | close(fd); | |
4156 | ||
4157 | /* We return the need_backup flag. Caller will decide | |
4158 | * how much - a multiple of ->backup_blocks up to *suspend_point | |
4159 | */ | |
4160 | if (advancing) | |
4161 | return need_backup > info->reshape_progress; | |
4162 | else | |
4163 | return need_backup >= info->reshape_progress; | |
4164 | ||
4165 | check_progress: | |
4166 | /* if we couldn't read a number from sync_completed, then | |
4167 | * either the reshape did complete, or it aborted. | |
4168 | * We can tell which by checking for 'none' in reshape_position. | |
4169 | * If it did abort, then it might immediately restart if it | |
4170 | * it was just a device failure that leaves us degraded but | |
4171 | * functioning. | |
4172 | */ | |
4173 | if (sysfs_get_str(info, NULL, "reshape_position", buf, sizeof(buf)) < 0 || | |
4174 | str_is_none(buf) == false) { | |
4175 | /* The abort might only be temporary. Wait up to 10 | |
4176 | * seconds for fd to contain a valid number again. | |
4177 | */ | |
4178 | int wait = 10000; | |
4179 | int rv = -2; | |
4180 | unsigned long long new_sync_max; | |
4181 | while (fd >= 0 && rv < 0 && wait > 0) { | |
4182 | if (sysfs_wait(fd, &wait) != 1) | |
4183 | break; | |
4184 | switch (sysfs_fd_get_ll(fd, &completed)) { | |
4185 | case 0: | |
4186 | /* all good again */ | |
4187 | rv = 1; | |
4188 | /* If "sync_max" is no longer max_progress | |
4189 | * we need to freeze things | |
4190 | */ | |
4191 | sysfs_get_ll(info, NULL, "sync_max", | |
4192 | &new_sync_max); | |
4193 | *frozen = (new_sync_max != max_progress); | |
4194 | break; | |
4195 | case -2: /* read error - abort */ | |
4196 | wait = 0; | |
4197 | break; | |
4198 | } | |
4199 | } | |
4200 | if (fd >= 0) | |
4201 | close(fd); | |
4202 | return rv; /* abort */ | |
4203 | } else { | |
4204 | /* Maybe racing with array shutdown - check state */ | |
4205 | if (fd >= 0) | |
4206 | close(fd); | |
4207 | if (sysfs_get_str(info, NULL, "array_state", buf, | |
4208 | sizeof(buf)) < 0 || | |
4209 | strncmp(buf, "inactive", 8) == 0 || | |
4210 | strncmp(buf, "clear",5) == 0) | |
4211 | return -2; /* abort */ | |
4212 | return -1; /* complete */ | |
4213 | } | |
4214 | } | |
4215 | ||
4216 | /* FIXME return status is never checked */ | |
4217 | static int grow_backup(struct mdinfo *sra, | |
4218 | unsigned long long offset, /* per device */ | |
4219 | unsigned long stripes, /* per device, in old chunks */ | |
4220 | int *sources, unsigned long long *offsets, | |
4221 | int disks, int chunk, int level, int layout, | |
4222 | int dests, int *destfd, unsigned long long *destoffsets, | |
4223 | int part, int *degraded, | |
4224 | char *buf) | |
4225 | { | |
4226 | /* Backup 'blocks' sectors at 'offset' on each device of the array, | |
4227 | * to storage 'destfd' (offset 'destoffsets'), after first | |
4228 | * suspending IO. Then allow resync to continue | |
4229 | * over the suspended section. | |
4230 | * Use part 'part' of the backup-super-block. | |
4231 | */ | |
4232 | int odata = disks; | |
4233 | int rv = 0; | |
4234 | int i; | |
4235 | unsigned long long ll; | |
4236 | int new_degraded; | |
4237 | //printf("offset %llu\n", offset); | |
4238 | if (level >= 4) | |
4239 | odata--; | |
4240 | if (level == 6) | |
4241 | odata--; | |
4242 | ||
4243 | /* Check that array hasn't become degraded, else we might backup the wrong data */ | |
4244 | if (sysfs_get_ll(sra, NULL, "degraded", &ll) < 0) | |
4245 | return -1; /* FIXME this error is ignored */ | |
4246 | new_degraded = (int)ll; | |
4247 | if (new_degraded != *degraded) { | |
4248 | /* check each device to ensure it is still working */ | |
4249 | struct mdinfo *sd; | |
4250 | for (sd = sra->devs ; sd ; sd = sd->next) { | |
4251 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
4252 | continue; | |
4253 | if (sd->disk.state & (1<<MD_DISK_SYNC)) { | |
4254 | char sbuf[SYSFS_MAX_BUF_SIZE]; | |
4255 | ||
4256 | if (sysfs_get_str(sra, sd, "state", | |
4257 | sbuf, sizeof(sbuf)) < 0 || | |
4258 | strstr(sbuf, "faulty") || | |
4259 | strstr(sbuf, "in_sync") == NULL) { | |
4260 | /* this device is dead */ | |
4261 | sd->disk.state = (1<<MD_DISK_FAULTY); | |
4262 | if (sd->disk.raid_disk >= 0 && | |
4263 | sources[sd->disk.raid_disk] >= 0) { | |
4264 | close(sources[sd->disk.raid_disk]); | |
4265 | sources[sd->disk.raid_disk] = -1; | |
4266 | } | |
4267 | } | |
4268 | } | |
4269 | } | |
4270 | *degraded = new_degraded; | |
4271 | } | |
4272 | if (part) { | |
4273 | bsb.arraystart2 = __cpu_to_le64(offset * odata); | |
4274 | bsb.length2 = __cpu_to_le64(stripes * (chunk/512) * odata); | |
4275 | } else { | |
4276 | bsb.arraystart = __cpu_to_le64(offset * odata); | |
4277 | bsb.length = __cpu_to_le64(stripes * (chunk/512) * odata); | |
4278 | } | |
4279 | if (part) | |
4280 | bsb.magic[15] = '2'; | |
4281 | for (i = 0; i < dests; i++) | |
4282 | if (part) | |
4283 | lseek64(destfd[i], destoffsets[i] + | |
4284 | __le64_to_cpu(bsb.devstart2)*512, 0); | |
4285 | else | |
4286 | lseek64(destfd[i], destoffsets[i], 0); | |
4287 | ||
4288 | rv = save_stripes(sources, offsets, disks, chunk, level, layout, | |
4289 | dests, destfd, offset * 512 * odata, | |
4290 | stripes * chunk * odata, buf); | |
4291 | ||
4292 | if (rv) | |
4293 | return rv; | |
4294 | bsb.mtime = __cpu_to_le64(time(0)); | |
4295 | for (i = 0; i < dests; i++) { | |
4296 | bsb.devstart = __cpu_to_le64(destoffsets[i]/512); | |
4297 | ||
4298 | bsb.sb_csum = bsb_csum((char*)&bsb, | |
4299 | ((char*)&bsb.sb_csum)-((char*)&bsb)); | |
4300 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0) | |
4301 | bsb.sb_csum2 = bsb_csum((char*)&bsb, | |
4302 | ((char*)&bsb.sb_csum2)-((char*)&bsb)); | |
4303 | ||
4304 | rv = -1; | |
4305 | if ((unsigned long long)lseek64(destfd[i], | |
4306 | destoffsets[i] - 4096, 0) != | |
4307 | destoffsets[i] - 4096) | |
4308 | break; | |
4309 | if (write(destfd[i], &bsb, 512) != 512) | |
4310 | break; | |
4311 | if (destoffsets[i] > 4096) { | |
4312 | if ((unsigned long long)lseek64(destfd[i], destoffsets[i]+stripes*chunk*odata, 0) != | |
4313 | destoffsets[i]+stripes*chunk*odata) | |
4314 | break; | |
4315 | if (write(destfd[i], &bsb, 512) != 512) | |
4316 | break; | |
4317 | } | |
4318 | fsync(destfd[i]); | |
4319 | rv = 0; | |
4320 | } | |
4321 | ||
4322 | return rv; | |
4323 | } | |
4324 | ||
4325 | /* in 2.6.30, the value reported by sync_completed can be | |
4326 | * less that it should be by one stripe. | |
4327 | * This only happens when reshape hits sync_max and pauses. | |
4328 | * So allow wait_backup to either extent sync_max further | |
4329 | * than strictly necessary, or return before the | |
4330 | * sync has got quite as far as we would really like. | |
4331 | * This is what 'blocks2' is for. | |
4332 | * The various caller give appropriate values so that | |
4333 | * every works. | |
4334 | */ | |
4335 | /* FIXME return value is often ignored */ | |
4336 | static int forget_backup(int dests, int *destfd, | |
4337 | unsigned long long *destoffsets, | |
4338 | int part) | |
4339 | { | |
4340 | /* | |
4341 | * Erase backup 'part' (which is 0 or 1) | |
4342 | */ | |
4343 | int i; | |
4344 | int rv; | |
4345 | ||
4346 | if (part) { | |
4347 | bsb.arraystart2 = __cpu_to_le64(0); | |
4348 | bsb.length2 = __cpu_to_le64(0); | |
4349 | } else { | |
4350 | bsb.arraystart = __cpu_to_le64(0); | |
4351 | bsb.length = __cpu_to_le64(0); | |
4352 | } | |
4353 | bsb.mtime = __cpu_to_le64(time(0)); | |
4354 | rv = 0; | |
4355 | for (i = 0; i < dests; i++) { | |
4356 | bsb.devstart = __cpu_to_le64(destoffsets[i]/512); | |
4357 | bsb.sb_csum = bsb_csum((char*)&bsb, | |
4358 | ((char*)&bsb.sb_csum)-((char*)&bsb)); | |
4359 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0) | |
4360 | bsb.sb_csum2 = bsb_csum((char*)&bsb, | |
4361 | ((char*)&bsb.sb_csum2)-((char*)&bsb)); | |
4362 | if ((unsigned long long)lseek64(destfd[i], destoffsets[i]-4096, 0) != | |
4363 | destoffsets[i]-4096) | |
4364 | rv = -1; | |
4365 | if (rv == 0 && write(destfd[i], &bsb, 512) != 512) | |
4366 | rv = -1; | |
4367 | fsync(destfd[i]); | |
4368 | } | |
4369 | return rv; | |
4370 | } | |
4371 | ||
4372 | static void fail(char *msg) | |
4373 | { | |
4374 | int rv; | |
4375 | rv = (write(2, msg, strlen(msg)) != (int)strlen(msg)); | |
4376 | rv |= (write(2, "\n", 1) != 1); | |
4377 | exit(rv ? 1 : 2); | |
4378 | } | |
4379 | ||
4380 | static char *abuf, *bbuf; | |
4381 | static unsigned long long abuflen; | |
4382 | static void validate(int afd, int bfd, unsigned long long offset) | |
4383 | { | |
4384 | /* check that the data in the backup against the array. | |
4385 | * This is only used for regression testing and should not | |
4386 | * be used while the array is active | |
4387 | */ | |
4388 | if (afd < 0) | |
4389 | return; | |
4390 | if (lseek64(bfd, offset - 4096, 0) < 0) { | |
4391 | pr_err("lseek64 fails %d:%s\n", errno, strerror(errno)); | |
4392 | return; | |
4393 | } | |
4394 | if (read(bfd, &bsb2, 512) != 512) | |
4395 | fail("cannot read bsb"); | |
4396 | if (bsb2.sb_csum != bsb_csum((char*)&bsb2, | |
4397 | ((char*)&bsb2.sb_csum)-((char*)&bsb2))) | |
4398 | fail("first csum bad"); | |
4399 | if (memcmp(bsb2.magic, "md_backup_data", 14) != 0) | |
4400 | fail("magic is bad"); | |
4401 | if (memcmp(bsb2.magic, "md_backup_data-2", 16) == 0 && | |
4402 | bsb2.sb_csum2 != bsb_csum((char*)&bsb2, | |
4403 | ((char*)&bsb2.sb_csum2)-((char*)&bsb2))) | |
4404 | fail("second csum bad"); | |
4405 | ||
4406 | if (__le64_to_cpu(bsb2.devstart)*512 != offset) | |
4407 | fail("devstart is wrong"); | |
4408 | ||
4409 | if (bsb2.length) { | |
4410 | unsigned long long len = __le64_to_cpu(bsb2.length)*512; | |
4411 | ||
4412 | if (abuflen < len) { | |
4413 | free(abuf); | |
4414 | free(bbuf); | |
4415 | abuflen = len; | |
4416 | if (posix_memalign((void**)&abuf, 4096, abuflen) || | |
4417 | posix_memalign((void**)&bbuf, 4096, abuflen)) { | |
4418 | abuflen = 0; | |
4419 | /* just stop validating on mem-alloc failure */ | |
4420 | return; | |
4421 | } | |
4422 | } | |
4423 | ||
4424 | if (lseek64(bfd, offset, 0) < 0) { | |
4425 | pr_err("lseek64 fails %d:%s\n", errno, strerror(errno)); | |
4426 | goto out; | |
4427 | } | |
4428 | if ((unsigned long long)read(bfd, bbuf, len) != len) { | |
4429 | //printf("len %llu\n", len); | |
4430 | fail("read first backup failed"); | |
4431 | } | |
4432 | ||
4433 | if (lseek64(afd, __le64_to_cpu(bsb2.arraystart)*512, 0) < 0) { | |
4434 | pr_err("lseek64 fails %d:%s\n", errno, strerror(errno)); | |
4435 | goto out; | |
4436 | } | |
4437 | if ((unsigned long long)read(afd, abuf, len) != len) | |
4438 | fail("read first from array failed"); | |
4439 | if (memcmp(bbuf, abuf, len) != 0) | |
4440 | fail("data1 compare failed"); | |
4441 | } | |
4442 | if (bsb2.length2) { | |
4443 | unsigned long long len = __le64_to_cpu(bsb2.length2)*512; | |
4444 | ||
4445 | if (abuflen < len) { | |
4446 | free(abuf); | |
4447 | free(bbuf); | |
4448 | abuflen = len; | |
4449 | abuf = xmalloc(abuflen); | |
4450 | bbuf = xmalloc(abuflen); | |
4451 | } | |
4452 | ||
4453 | if (lseek64(bfd, offset+__le64_to_cpu(bsb2.devstart2)*512, 0) < 0) { | |
4454 | pr_err("lseek64 fails %d:%s\n", errno, strerror(errno)); | |
4455 | goto out; | |
4456 | } | |
4457 | if ((unsigned long long)read(bfd, bbuf, len) != len) | |
4458 | fail("read second backup failed"); | |
4459 | if (lseek64(afd, __le64_to_cpu(bsb2.arraystart2)*512, 0) < 0) { | |
4460 | pr_err("lseek64 fails %d:%s\n", errno, strerror(errno)); | |
4461 | goto out; | |
4462 | } | |
4463 | if ((unsigned long long)read(afd, abuf, len) != len) | |
4464 | fail("read second from array failed"); | |
4465 | if (memcmp(bbuf, abuf, len) != 0) | |
4466 | fail("data2 compare failed"); | |
4467 | } | |
4468 | out: | |
4469 | free(abuf); | |
4470 | free(bbuf); | |
4471 | return; | |
4472 | } | |
4473 | ||
4474 | int child_monitor(int afd, struct mdinfo *sra, struct reshape *reshape, | |
4475 | struct supertype *st, unsigned long blocks, | |
4476 | int *fds, unsigned long long *offsets, | |
4477 | int dests, int *destfd, unsigned long long *destoffsets) | |
4478 | { | |
4479 | /* Monitor a reshape where backup is being performed using | |
4480 | * 'native' mechanism - either to a backup file, or | |
4481 | * to some space in a spare. | |
4482 | */ | |
4483 | char *buf; | |
4484 | int degraded = -1; | |
4485 | unsigned long long suspend_point, array_size; | |
4486 | unsigned long long backup_point, wait_point; | |
4487 | unsigned long long reshape_completed; | |
4488 | int done = 0; | |
4489 | int increasing = reshape->after.data_disks >= | |
4490 | reshape->before.data_disks; | |
4491 | int part = 0; /* The next part of the backup area to fill. It | |
4492 | * may already be full, so we need to check */ | |
4493 | int level = reshape->level; | |
4494 | int layout = reshape->before.layout; | |
4495 | int data = reshape->before.data_disks; | |
4496 | int disks = reshape->before.data_disks + reshape->parity; | |
4497 | int chunk = sra->array.chunk_size; | |
4498 | struct mdinfo *sd; | |
4499 | unsigned long stripes; | |
4500 | int uuid[4]; | |
4501 | int frozen = 0; | |
4502 | ||
4503 | /* set up the backup-super-block. This requires the | |
4504 | * uuid from the array. | |
4505 | */ | |
4506 | /* Find a superblock */ | |
4507 | for (sd = sra->devs; sd; sd = sd->next) { | |
4508 | char *dn; | |
4509 | int devfd; | |
4510 | int ok; | |
4511 | if (sd->disk.state & (1<<MD_DISK_FAULTY)) | |
4512 | continue; | |
4513 | dn = map_dev(sd->disk.major, sd->disk.minor, 1); | |
4514 | devfd = dev_open(dn, O_RDONLY); | |
4515 | if (devfd < 0) | |
4516 | continue; | |
4517 | ok = st->ss->load_super(st, devfd, NULL); | |
4518 | close(devfd); | |
4519 | if (ok == 0) | |
4520 | break; | |
4521 | } | |
4522 | if (!sd) { | |
4523 | pr_err("Cannot find a superblock\n"); | |
4524 | return 0; | |
4525 | } | |
4526 | ||
4527 | memset(&bsb, 0, 512); | |
4528 | memcpy(bsb.magic, "md_backup_data-1", 16); | |
4529 | st->ss->uuid_from_super(st, uuid); | |
4530 | memcpy(bsb.set_uuid, uuid, 16); | |
4531 | bsb.mtime = __cpu_to_le64(time(0)); | |
4532 | bsb.devstart2 = blocks; | |
4533 | ||
4534 | stripes = blocks / (sra->array.chunk_size/512) / | |
4535 | reshape->before.data_disks; | |
4536 | ||
4537 | if (posix_memalign((void**)&buf, 4096, disks * chunk)) | |
4538 | /* Don't start the 'reshape' */ | |
4539 | return 0; | |
4540 | ||
4541 | if (increasing) { | |
4542 | array_size = sra->component_size * reshape->after.data_disks; | |
4543 | backup_point = sra->reshape_progress; | |
4544 | suspend_point = 0; | |
4545 | } else { | |
4546 | array_size = sra->component_size * reshape->before.data_disks; | |
4547 | backup_point = reshape->backup_blocks; | |
4548 | suspend_point = array_size; | |
4549 | } | |
4550 | ||
4551 | while (!done) { | |
4552 | int rv; | |
4553 | ||
4554 | /* Want to return as soon the oldest backup slot can | |
4555 | * be released as that allows us to start backing up | |
4556 | * some more, providing suspend_point has been | |
4557 | * advanced, which it should have. | |
4558 | */ | |
4559 | if (increasing) { | |
4560 | wait_point = array_size; | |
4561 | if (part == 0 && __le64_to_cpu(bsb.length) > 0) | |
4562 | wait_point = (__le64_to_cpu(bsb.arraystart) + | |
4563 | __le64_to_cpu(bsb.length)); | |
4564 | if (part == 1 && __le64_to_cpu(bsb.length2) > 0) | |
4565 | wait_point = (__le64_to_cpu(bsb.arraystart2) + | |
4566 | __le64_to_cpu(bsb.length2)); | |
4567 | } else { | |
4568 | wait_point = 0; | |
4569 | if (part == 0 && __le64_to_cpu(bsb.length) > 0) | |
4570 | wait_point = __le64_to_cpu(bsb.arraystart); | |
4571 | if (part == 1 && __le64_to_cpu(bsb.length2) > 0) | |
4572 | wait_point = __le64_to_cpu(bsb.arraystart2); | |
4573 | } | |
4574 | ||
4575 | reshape_completed = sra->reshape_progress; | |
4576 | rv = progress_reshape(sra, reshape, | |
4577 | backup_point, wait_point, | |
4578 | &suspend_point, &reshape_completed, | |
4579 | &frozen); | |
4580 | /* external metadata would need to ping_monitor here */ | |
4581 | sra->reshape_progress = reshape_completed; | |
4582 | ||
4583 | /* Clear any backup region that is before 'here' */ | |
4584 | if (increasing) { | |
4585 | if (__le64_to_cpu(bsb.length) > 0 && | |
4586 | reshape_completed >= (__le64_to_cpu(bsb.arraystart) + | |
4587 | __le64_to_cpu(bsb.length))) | |
4588 | forget_backup(dests, destfd, | |
4589 | destoffsets, 0); | |
4590 | if (__le64_to_cpu(bsb.length2) > 0 && | |
4591 | reshape_completed >= (__le64_to_cpu(bsb.arraystart2) + | |
4592 | __le64_to_cpu(bsb.length2))) | |
4593 | forget_backup(dests, destfd, | |
4594 | destoffsets, 1); | |
4595 | } else { | |
4596 | if (__le64_to_cpu(bsb.length) > 0 && | |
4597 | reshape_completed <= (__le64_to_cpu(bsb.arraystart))) | |
4598 | forget_backup(dests, destfd, | |
4599 | destoffsets, 0); | |
4600 | if (__le64_to_cpu(bsb.length2) > 0 && | |
4601 | reshape_completed <= (__le64_to_cpu(bsb.arraystart2))) | |
4602 | forget_backup(dests, destfd, | |
4603 | destoffsets, 1); | |
4604 | } | |
4605 | if (sigterm) | |
4606 | rv = -2; | |
4607 | if (rv < 0) { | |
4608 | if (rv == -1) | |
4609 | done = 1; | |
4610 | break; | |
4611 | } | |
4612 | if (rv == 0 && increasing && !st->ss->external) { | |
4613 | /* No longer need to monitor this reshape */ | |
4614 | sysfs_set_str(sra, NULL, "sync_max", "max"); | |
4615 | done = 1; | |
4616 | break; | |
4617 | } | |
4618 | ||
4619 | while (rv) { | |
4620 | unsigned long long offset; | |
4621 | unsigned long actual_stripes; | |
4622 | /* Need to backup some data. | |
4623 | * If 'part' is not used and the desired | |
4624 | * backup size is suspended, do a backup, | |
4625 | * then consider the next part. | |
4626 | */ | |
4627 | /* Check that 'part' is unused */ | |
4628 | if (part == 0 && __le64_to_cpu(bsb.length) != 0) | |
4629 | break; | |
4630 | if (part == 1 && __le64_to_cpu(bsb.length2) != 0) | |
4631 | break; | |
4632 | ||
4633 | offset = backup_point / data; | |
4634 | actual_stripes = stripes; | |
4635 | if (increasing) { | |
4636 | if (offset + actual_stripes * (chunk/512) > | |
4637 | sra->component_size) | |
4638 | actual_stripes = ((sra->component_size - offset) | |
4639 | / (chunk/512)); | |
4640 | if (offset + actual_stripes * (chunk/512) > | |
4641 | suspend_point/data) | |
4642 | break; | |
4643 | } else { | |
4644 | if (offset < actual_stripes * (chunk/512)) | |
4645 | actual_stripes = offset / (chunk/512); | |
4646 | offset -= actual_stripes * (chunk/512); | |
4647 | if (offset < suspend_point/data) | |
4648 | break; | |
4649 | } | |
4650 | if (actual_stripes == 0) | |
4651 | break; | |
4652 | grow_backup(sra, offset, actual_stripes, fds, offsets, | |
4653 | disks, chunk, level, layout, dests, destfd, | |
4654 | destoffsets, part, °raded, buf); | |
4655 | validate(afd, destfd[0], destoffsets[0]); | |
4656 | /* record where 'part' is up to */ | |
4657 | part = !part; | |
4658 | if (increasing) | |
4659 | backup_point += actual_stripes * (chunk/512) * data; | |
4660 | else | |
4661 | backup_point -= actual_stripes * (chunk/512) * data; | |
4662 | } | |
4663 | } | |
4664 | ||
4665 | /* FIXME maybe call progress_reshape one more time instead */ | |
4666 | /* remove any remaining suspension */ | |
4667 | sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL); | |
4668 | sysfs_set_num(sra, NULL, "suspend_hi", 0); | |
4669 | sysfs_set_num(sra, NULL, "suspend_lo", 0); | |
4670 | sysfs_set_num(sra, NULL, "sync_min", 0); | |
4671 | ||
4672 | free(buf); | |
4673 | return done; | |
4674 | } | |
4675 | ||
4676 | /* | |
4677 | * If any spare contains md_back_data-1 which is recent wrt mtime, | |
4678 | * write that data into the array and update the super blocks with | |
4679 | * the new reshape_progress | |
4680 | */ | |
4681 | int Grow_restart(struct supertype *st, struct mdinfo *info, int *fdlist, | |
4682 | int cnt, char *backup_file, int verbose) | |
4683 | { | |
4684 | int i, j; | |
4685 | int old_disks; | |
4686 | unsigned long long *offsets; | |
4687 | unsigned long long nstripe, ostripe; | |
4688 | int ndata, odata; | |
4689 | int fd, backup_fd = -1; | |
4690 | ||
4691 | odata = info->array.raid_disks - info->delta_disks - 1; | |
4692 | if (info->array.level == 6) | |
4693 | odata--; /* number of data disks */ | |
4694 | ndata = info->array.raid_disks - 1; | |
4695 | if (info->new_level == 6) | |
4696 | ndata--; | |
4697 | ||
4698 | old_disks = info->array.raid_disks - info->delta_disks; | |
4699 | ||
4700 | if (info->delta_disks <= 0) | |
4701 | /* Didn't grow, so the backup file must have | |
4702 | * been used | |
4703 | */ | |
4704 | old_disks = cnt; | |
4705 | ||
4706 | if (backup_file) { | |
4707 | backup_fd = open(backup_file, O_RDONLY); | |
4708 | if (!is_fd_valid(backup_fd)) { | |
4709 | pr_err("Can't open backup file %s : %s\n", | |
4710 | backup_file, strerror(errno)); | |
4711 | return -EINVAL; | |
4712 | } | |
4713 | } | |
4714 | ||
4715 | for (i=old_disks-(backup_file?1:0); i<cnt; i++) { | |
4716 | struct mdinfo dinfo; | |
4717 | int bsbsize; | |
4718 | char *devname, namebuf[20]; | |
4719 | unsigned long long lo, hi; | |
4720 | ||
4721 | /* This was a spare and may have some saved data on it. | |
4722 | * Load the superblock, find and load the | |
4723 | * backup_super_block. | |
4724 | * If either fail, go on to next device. | |
4725 | * If the backup contains no new info, just return | |
4726 | * else restore data and update all superblocks | |
4727 | */ | |
4728 | if (i == old_disks-1) { | |
4729 | if (!is_fd_valid(backup_fd)) | |
4730 | continue; | |
4731 | fd = backup_fd; | |
4732 | devname = backup_file; | |
4733 | } else { | |
4734 | fd = fdlist[i]; | |
4735 | if (fd < 0) | |
4736 | continue; | |
4737 | if (st->ss->load_super(st, fd, NULL)) | |
4738 | continue; | |
4739 | ||
4740 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4741 | st->ss->free_super(st); | |
4742 | ||
4743 | if (lseek64(fd, | |
4744 | (dinfo.data_offset + dinfo.component_size - 8) <<9, | |
4745 | 0) < 0) { | |
4746 | pr_err("Cannot seek on device %d\n", i); | |
4747 | continue; /* Cannot seek */ | |
4748 | } | |
4749 | sprintf(namebuf, "device-%d", i); | |
4750 | devname = namebuf; | |
4751 | } | |
4752 | if (read(fd, &bsb, sizeof(bsb)) != sizeof(bsb)) { | |
4753 | if (verbose) | |
4754 | pr_err("Cannot read from %s\n", devname); | |
4755 | continue; /* Cannot read */ | |
4756 | } | |
4757 | if (memcmp(bsb.magic, "md_backup_data-1", 16) != 0 && | |
4758 | memcmp(bsb.magic, "md_backup_data-2", 16) != 0) { | |
4759 | if (verbose) | |
4760 | pr_err("No backup metadata on %s\n", devname); | |
4761 | continue; | |
4762 | } | |
4763 | if (bsb.sb_csum != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum)-((char*)&bsb))) { | |
4764 | if (verbose) | |
4765 | pr_err("Bad backup-metadata checksum on %s\n", | |
4766 | devname); | |
4767 | continue; /* bad checksum */ | |
4768 | } | |
4769 | if (memcmp(bsb.magic, "md_backup_data-2", 16) == 0 && | |
4770 | bsb.sb_csum2 != bsb_csum((char*)&bsb, ((char*)&bsb.sb_csum2)-((char*)&bsb))) { | |
4771 | if (verbose) | |
4772 | pr_err("Bad backup-metadata checksum2 on %s\n", | |
4773 | devname); | |
4774 | continue; /* Bad second checksum */ | |
4775 | } | |
4776 | if (memcmp(bsb.set_uuid,info->uuid, 16) != 0) { | |
4777 | if (verbose) | |
4778 | pr_err("Wrong uuid on backup-metadata on %s\n", | |
4779 | devname); | |
4780 | continue; /* Wrong uuid */ | |
4781 | } | |
4782 | ||
4783 | /* | |
4784 | * array utime and backup-mtime should be updated at | |
4785 | * much the same time, but it seems that sometimes | |
4786 | * they aren't... So allow considerable flexability in | |
4787 | * matching, and allow this test to be overridden by | |
4788 | * an environment variable. | |
4789 | */ | |
4790 | if(time_after(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) + 2*60*60) || | |
4791 | time_before(info->array.utime, (unsigned int)__le64_to_cpu(bsb.mtime) - 10*60)) { | |
4792 | if (check_env("MDADM_GROW_ALLOW_OLD")) { | |
4793 | pr_err("accepting backup with timestamp %lu for array with timestamp %lu\n", | |
4794 | (unsigned long)__le64_to_cpu(bsb.mtime), | |
4795 | (unsigned long)info->array.utime); | |
4796 | } else { | |
4797 | pr_err("too-old timestamp on backup-metadata on %s\n", devname); | |
4798 | pr_err("If you think it is should be safe, try 'export MDADM_GROW_ALLOW_OLD=1'\n"); | |
4799 | continue; /* time stamp is too bad */ | |
4800 | } | |
4801 | } | |
4802 | ||
4803 | if (bsb.magic[15] == '1') { | |
4804 | if (bsb.length == 0) | |
4805 | continue; | |
4806 | if (info->delta_disks >= 0) { | |
4807 | /* reshape_progress is increasing */ | |
4808 | if (__le64_to_cpu(bsb.arraystart) | |
4809 | + __le64_to_cpu(bsb.length) | |
4810 | < info->reshape_progress) { | |
4811 | nonew: | |
4812 | if (verbose) | |
4813 | pr_err("backup-metadata found on %s but is not needed\n", devname); | |
4814 | continue; /* No new data here */ | |
4815 | } | |
4816 | } else { | |
4817 | /* reshape_progress is decreasing */ | |
4818 | if (__le64_to_cpu(bsb.arraystart) >= | |
4819 | info->reshape_progress) | |
4820 | goto nonew; /* No new data here */ | |
4821 | } | |
4822 | } else { | |
4823 | if (bsb.length == 0 && bsb.length2 == 0) | |
4824 | continue; | |
4825 | if (info->delta_disks >= 0) { | |
4826 | /* reshape_progress is increasing */ | |
4827 | if ((__le64_to_cpu(bsb.arraystart) | |
4828 | + __le64_to_cpu(bsb.length) | |
4829 | < info->reshape_progress) && | |
4830 | (__le64_to_cpu(bsb.arraystart2) | |
4831 | + __le64_to_cpu(bsb.length2) | |
4832 | < info->reshape_progress)) | |
4833 | goto nonew; /* No new data here */ | |
4834 | } else { | |
4835 | /* reshape_progress is decreasing */ | |
4836 | if (__le64_to_cpu(bsb.arraystart) >= | |
4837 | info->reshape_progress && | |
4838 | __le64_to_cpu(bsb.arraystart2) >= | |
4839 | info->reshape_progress) | |
4840 | goto nonew; /* No new data here */ | |
4841 | } | |
4842 | } | |
4843 | if (lseek64(fd, __le64_to_cpu(bsb.devstart)*512, 0)< 0) { | |
4844 | second_fail: | |
4845 | if (verbose) | |
4846 | pr_err("Failed to verify secondary backup-metadata block on %s\n", | |
4847 | devname); | |
4848 | continue; /* Cannot seek */ | |
4849 | } | |
4850 | /* There should be a duplicate backup superblock 4k before here */ | |
4851 | if (lseek64(fd, -4096, 1) < 0 || | |
4852 | read(fd, &bsb2, sizeof(bsb2)) != sizeof(bsb2)) | |
4853 | goto second_fail; /* Cannot find leading superblock */ | |
4854 | if (bsb.magic[15] == '1') | |
4855 | bsbsize = offsetof(struct mdp_backup_super, pad1); | |
4856 | else | |
4857 | bsbsize = offsetof(struct mdp_backup_super, pad); | |
4858 | if (memcmp(&bsb2, &bsb, bsbsize) != 0) | |
4859 | goto second_fail; /* Cannot find leading superblock */ | |
4860 | ||
4861 | /* Now need the data offsets for all devices. */ | |
4862 | offsets = xmalloc(sizeof(*offsets)*info->array.raid_disks); | |
4863 | for(j=0; j<info->array.raid_disks; j++) { | |
4864 | if (fdlist[j] < 0) | |
4865 | continue; | |
4866 | if (st->ss->load_super(st, fdlist[j], NULL)) | |
4867 | /* FIXME should be this be an error */ | |
4868 | continue; | |
4869 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4870 | st->ss->free_super(st); | |
4871 | offsets[j] = dinfo.data_offset * 512; | |
4872 | } | |
4873 | printf("%s: restoring critical section\n", Name); | |
4874 | ||
4875 | if (restore_stripes(fdlist, offsets, info->array.raid_disks, | |
4876 | info->new_chunk, info->new_level, | |
4877 | info->new_layout, fd, | |
4878 | __le64_to_cpu(bsb.devstart)*512, | |
4879 | __le64_to_cpu(bsb.arraystart)*512, | |
4880 | __le64_to_cpu(bsb.length)*512, NULL)) { | |
4881 | /* didn't succeed, so giveup */ | |
4882 | if (verbose) | |
4883 | pr_err("Error restoring backup from %s\n", | |
4884 | devname); | |
4885 | free(offsets); | |
4886 | close_fd(&backup_fd); | |
4887 | return 1; | |
4888 | } | |
4889 | ||
4890 | if (bsb.magic[15] == '2' && | |
4891 | restore_stripes(fdlist, offsets, info->array.raid_disks, | |
4892 | info->new_chunk, info->new_level, | |
4893 | info->new_layout, fd, | |
4894 | __le64_to_cpu(bsb.devstart)*512 + | |
4895 | __le64_to_cpu(bsb.devstart2)*512, | |
4896 | __le64_to_cpu(bsb.arraystart2)*512, | |
4897 | __le64_to_cpu(bsb.length2)*512, NULL)) { | |
4898 | /* didn't succeed, so giveup */ | |
4899 | if (verbose) | |
4900 | pr_err("Error restoring second backup from %s\n", | |
4901 | devname); | |
4902 | free(offsets); | |
4903 | close_fd(&backup_fd); | |
4904 | return 1; | |
4905 | } | |
4906 | ||
4907 | free(offsets); | |
4908 | ||
4909 | /* Ok, so the data is restored. Let's update those superblocks. */ | |
4910 | ||
4911 | lo = hi = 0; | |
4912 | if (bsb.length) { | |
4913 | lo = __le64_to_cpu(bsb.arraystart); | |
4914 | hi = lo + __le64_to_cpu(bsb.length); | |
4915 | } | |
4916 | if (bsb.magic[15] == '2' && bsb.length2) { | |
4917 | unsigned long long lo1, hi1; | |
4918 | lo1 = __le64_to_cpu(bsb.arraystart2); | |
4919 | hi1 = lo1 + __le64_to_cpu(bsb.length2); | |
4920 | if (lo == hi) { | |
4921 | lo = lo1; | |
4922 | hi = hi1; | |
4923 | } else if (lo < lo1) | |
4924 | hi = hi1; | |
4925 | else | |
4926 | lo = lo1; | |
4927 | } | |
4928 | if (lo < hi && (info->reshape_progress < lo || | |
4929 | info->reshape_progress > hi)) | |
4930 | /* backup does not affect reshape_progress*/ ; | |
4931 | else if (info->delta_disks >= 0) { | |
4932 | info->reshape_progress = __le64_to_cpu(bsb.arraystart) + | |
4933 | __le64_to_cpu(bsb.length); | |
4934 | if (bsb.magic[15] == '2') { | |
4935 | unsigned long long p2; | |
4936 | ||
4937 | p2 = __le64_to_cpu(bsb.arraystart2) + | |
4938 | __le64_to_cpu(bsb.length2); | |
4939 | if (p2 > info->reshape_progress) | |
4940 | info->reshape_progress = p2; | |
4941 | } | |
4942 | } else { | |
4943 | info->reshape_progress = __le64_to_cpu(bsb.arraystart); | |
4944 | if (bsb.magic[15] == '2') { | |
4945 | unsigned long long p2; | |
4946 | ||
4947 | p2 = __le64_to_cpu(bsb.arraystart2); | |
4948 | if (p2 < info->reshape_progress) | |
4949 | info->reshape_progress = p2; | |
4950 | } | |
4951 | } | |
4952 | for (j=0; j<info->array.raid_disks; j++) { | |
4953 | if (fdlist[j] < 0) | |
4954 | continue; | |
4955 | if (st->ss->load_super(st, fdlist[j], NULL)) | |
4956 | continue; | |
4957 | st->ss->getinfo_super(st, &dinfo, NULL); | |
4958 | dinfo.reshape_progress = info->reshape_progress; | |
4959 | st->ss->update_super(st, &dinfo, | |
4960 | UOPT_SPEC__RESHAPE_PROGRESS, | |
4961 | NULL,0, 0, NULL); | |
4962 | st->ss->store_super(st, fdlist[j]); | |
4963 | st->ss->free_super(st); | |
4964 | } | |
4965 | close_fd(&backup_fd); | |
4966 | return 0; | |
4967 | } | |
4968 | ||
4969 | close_fd(&backup_fd); | |
4970 | ||
4971 | /* Didn't find any backup data, try to see if any | |
4972 | * was needed. | |
4973 | */ | |
4974 | if (info->delta_disks < 0) { | |
4975 | /* When shrinking, the critical section is at the end. | |
4976 | * So see if we are before the critical section. | |
4977 | */ | |
4978 | unsigned long long first_block; | |
4979 | nstripe = ostripe = 0; | |
4980 | first_block = 0; | |
4981 | while (ostripe >= nstripe) { | |
4982 | ostripe += info->array.chunk_size / 512; | |
4983 | first_block = ostripe * odata; | |
4984 | nstripe = first_block / ndata / (info->new_chunk/512) * | |
4985 | (info->new_chunk/512); | |
4986 | } | |
4987 | ||
4988 | if (info->reshape_progress >= first_block) | |
4989 | return 0; | |
4990 | } | |
4991 | if (info->delta_disks > 0) { | |
4992 | /* See if we are beyond the critical section. */ | |
4993 | unsigned long long last_block; | |
4994 | nstripe = ostripe = 0; | |
4995 | last_block = 0; | |
4996 | while (nstripe >= ostripe) { | |
4997 | nstripe += info->new_chunk / 512; | |
4998 | last_block = nstripe * ndata; | |
4999 | ostripe = last_block / odata / (info->array.chunk_size/512) * | |
5000 | (info->array.chunk_size/512); | |
5001 | } | |
5002 | ||
5003 | if (info->reshape_progress >= last_block) | |
5004 | return 0; | |
5005 | } | |
5006 | /* needed to recover critical section! */ | |
5007 | if (verbose) | |
5008 | pr_err("Failed to find backup of critical section\n"); | |
5009 | return 1; | |
5010 | } | |
5011 | ||
5012 | int Grow_continue_command(char *devname, int fd, struct context *c) | |
5013 | { | |
5014 | int ret_val = 0; | |
5015 | struct supertype *st = NULL; | |
5016 | struct mdinfo *content = NULL; | |
5017 | struct mdinfo array; | |
5018 | char *subarray = NULL; | |
5019 | struct mdinfo *cc = NULL; | |
5020 | struct mdstat_ent *mdstat = NULL; | |
5021 | int cfd = -1; | |
5022 | int fd2; | |
5023 | ||
5024 | dprintf("Grow continue from command line called for %s\n", devname); | |
5025 | ||
5026 | st = super_by_fd(fd, &subarray); | |
5027 | if (!st || !st->ss) { | |
5028 | pr_err("Unable to determine metadata format for %s\n", devname); | |
5029 | return 1; | |
5030 | } | |
5031 | dprintf("Grow continue is run for "); | |
5032 | if (st->ss->external == 0) { | |
5033 | int d; | |
5034 | int cnt = 5; | |
5035 | dprintf_cont("native array (%s)\n", devname); | |
5036 | if (md_get_array_info(fd, &array.array) < 0) { | |
5037 | pr_err("%s is not an active md array - aborting\n", | |
5038 | devname); | |
5039 | ret_val = 1; | |
5040 | goto Grow_continue_command_exit; | |
5041 | } | |
5042 | content = &array; | |
5043 | if (sysfs_init(content, fd, NULL) < 0) { | |
5044 | pr_err("sysfs_init fails\n"); | |
5045 | ret_val = 1; | |
5046 | goto Grow_continue_command_exit; | |
5047 | } | |
5048 | /* Need to load a superblock. | |
5049 | * FIXME we should really get what we need from | |
5050 | * sysfs | |
5051 | */ | |
5052 | do { | |
5053 | for (d = 0; d < MAX_DISKS; d++) { | |
5054 | mdu_disk_info_t disk; | |
5055 | char *dv; | |
5056 | int err; | |
5057 | disk.number = d; | |
5058 | if (md_get_disk_info(fd, &disk) < 0) | |
5059 | continue; | |
5060 | if (disk.major == 0 && disk.minor == 0) | |
5061 | continue; | |
5062 | if ((disk.state & (1 << MD_DISK_ACTIVE)) == 0) | |
5063 | continue; | |
5064 | dv = map_dev(disk.major, disk.minor, 1); | |
5065 | if (!dv) | |
5066 | continue; | |
5067 | fd2 = dev_open(dv, O_RDONLY); | |
5068 | if (fd2 < 0) | |
5069 | continue; | |
5070 | err = st->ss->load_super(st, fd2, NULL); | |
5071 | close(fd2); | |
5072 | if (err) | |
5073 | continue; | |
5074 | break; | |
5075 | } | |
5076 | if (d == MAX_DISKS) { | |
5077 | pr_err("Unable to load metadata for %s\n", | |
5078 | devname); | |
5079 | ret_val = 1; | |
5080 | goto Grow_continue_command_exit; | |
5081 | } | |
5082 | st->ss->getinfo_super(st, content, NULL); | |
5083 | if (!content->reshape_active) | |
5084 | sleep_for(3, 0, true); | |
5085 | else | |
5086 | break; | |
5087 | } while (cnt-- > 0); | |
5088 | } else { | |
5089 | char *container; | |
5090 | ||
5091 | if (subarray) { | |
5092 | dprintf_cont("subarray (%s)\n", subarray); | |
5093 | container = st->container_devnm; | |
5094 | cfd = open_dev_excl(st->container_devnm); | |
5095 | } else { | |
5096 | container = st->devnm; | |
5097 | close(fd); | |
5098 | cfd = open_dev_excl(st->devnm); | |
5099 | dprintf_cont("container (%s)\n", container); | |
5100 | fd = cfd; | |
5101 | } | |
5102 | if (cfd < 0) { | |
5103 | pr_err("Unable to open container for %s\n", devname); | |
5104 | ret_val = 1; | |
5105 | goto Grow_continue_command_exit; | |
5106 | } | |
5107 | ||
5108 | /* find in container array under reshape | |
5109 | */ | |
5110 | ret_val = st->ss->load_container(st, cfd, NULL); | |
5111 | if (ret_val) { | |
5112 | pr_err("Cannot read superblock for %s\n", devname); | |
5113 | ret_val = 1; | |
5114 | goto Grow_continue_command_exit; | |
5115 | } | |
5116 | ||
5117 | cc = st->ss->container_content(st, subarray); | |
5118 | for (content = cc; content ; content = content->next) { | |
5119 | char *array_name; | |
5120 | int allow_reshape = 1; | |
5121 | ||
5122 | if (content->reshape_active == 0) | |
5123 | continue; | |
5124 | /* The decision about array or container wide | |
5125 | * reshape is taken in Grow_continue based | |
5126 | * content->reshape_active state, therefore we | |
5127 | * need to check_reshape based on | |
5128 | * reshape_active and subarray name | |
5129 | */ | |
5130 | if (content->array.state & (1<<MD_SB_BLOCK_VOLUME)) | |
5131 | allow_reshape = 0; | |
5132 | if (content->reshape_active == CONTAINER_RESHAPE && | |
5133 | (content->array.state | |
5134 | & (1<<MD_SB_BLOCK_CONTAINER_RESHAPE))) | |
5135 | allow_reshape = 0; | |
5136 | ||
5137 | if (!allow_reshape) { | |
5138 | pr_err("cannot continue reshape of an array in container with unsupported metadata: %s(%s)\n", | |
5139 | devname, container); | |
5140 | ret_val = 1; | |
5141 | goto Grow_continue_command_exit; | |
5142 | } | |
5143 | ||
5144 | array_name = strchr(content->text_version+1, '/')+1; | |
5145 | mdstat = mdstat_by_subdev(array_name, container); | |
5146 | if (!mdstat) | |
5147 | continue; | |
5148 | if (mdstat->active == 0) { | |
5149 | pr_err("Skipping inactive array %s.\n", | |
5150 | mdstat->devnm); | |
5151 | free_mdstat(mdstat); | |
5152 | mdstat = NULL; | |
5153 | continue; | |
5154 | } | |
5155 | break; | |
5156 | } | |
5157 | if (!content) { | |
5158 | pr_err("Unable to determine reshaped array for %s\n", devname); | |
5159 | ret_val = 1; | |
5160 | goto Grow_continue_command_exit; | |
5161 | } | |
5162 | fd2 = open_dev(mdstat->devnm); | |
5163 | if (fd2 < 0) { | |
5164 | pr_err("cannot open (%s)\n", mdstat->devnm); | |
5165 | ret_val = 1; | |
5166 | goto Grow_continue_command_exit; | |
5167 | } | |
5168 | ||
5169 | if (sysfs_init(content, fd2, mdstat->devnm)) { | |
5170 | pr_err("Unable to initialize sysfs for %s, Grow cannot continue.\n", | |
5171 | mdstat->devnm); | |
5172 | ret_val = 1; | |
5173 | close(fd2); | |
5174 | goto Grow_continue_command_exit; | |
5175 | } | |
5176 | ||
5177 | close(fd2); | |
5178 | ||
5179 | /* start mdmon in case it is not running | |
5180 | */ | |
5181 | if (!mdmon_running(container)) | |
5182 | start_mdmon(container); | |
5183 | ping_monitor(container); | |
5184 | ||
5185 | if (wait_for_mdmon(container) != MDADM_STATUS_SUCCESS) { | |
5186 | pr_err("No mdmon found. Grow cannot continue.\n"); | |
5187 | ret_val = 1; | |
5188 | goto Grow_continue_command_exit; | |
5189 | } | |
5190 | } | |
5191 | ||
5192 | /* verify that array under reshape is started from | |
5193 | * correct position | |
5194 | */ | |
5195 | if (verify_reshape_position(content, content->array.level) < 0) { | |
5196 | ret_val = 1; | |
5197 | goto Grow_continue_command_exit; | |
5198 | } | |
5199 | ||
5200 | /* continue reshape | |
5201 | */ | |
5202 | ret_val = Grow_continue(fd, st, content, 1, c); | |
5203 | ||
5204 | Grow_continue_command_exit: | |
5205 | if (cfd > -1) | |
5206 | close(cfd); | |
5207 | st->ss->free_super(st); | |
5208 | free_mdstat(mdstat); | |
5209 | sysfs_free(cc); | |
5210 | free(subarray); | |
5211 | ||
5212 | return ret_val; | |
5213 | } | |
5214 | ||
5215 | int Grow_continue(int mdfd, struct supertype *st, struct mdinfo *info, | |
5216 | int forked, struct context *c) | |
5217 | { | |
5218 | int ret_val = 2; | |
5219 | ||
5220 | if (!info->reshape_active) | |
5221 | return ret_val; | |
5222 | ||
5223 | if (st->ss->external) { | |
5224 | int cfd = open_dev(st->container_devnm); | |
5225 | ||
5226 | if (cfd < 0) | |
5227 | return 1; | |
5228 | ||
5229 | st->ss->load_container(st, cfd, st->container_devnm); | |
5230 | close(cfd); | |
5231 | ret_val = reshape_container(st->container_devnm, NULL, mdfd, | |
5232 | st, info, c, forked, 1 | info->reshape_active); | |
5233 | } else | |
5234 | ret_val = reshape_array(NULL, mdfd, "array", st, info, 1, | |
5235 | NULL, INVALID_SECTORS, c->backup_file, | |
5236 | 0, forked, 1 | info->reshape_active); | |
5237 | ||
5238 | return ret_val; | |
5239 | } | |
5240 | ||
5241 | char *make_backup(char *name) | |
5242 | { | |
5243 | char *base = "backup_file-"; | |
5244 | int len; | |
5245 | char *fname; | |
5246 | ||
5247 | len = strlen(MAP_DIR) + 1 + strlen(base) + strlen(name)+1; | |
5248 | fname = xmalloc(len); | |
5249 | sprintf(fname, "%s/%s%s", MAP_DIR, base, name); | |
5250 | return fname; | |
5251 | } | |
5252 | ||
5253 | char *locate_backup(char *name) | |
5254 | { | |
5255 | char *fl = make_backup(name); | |
5256 | struct stat stb; | |
5257 | ||
5258 | if (stat(fl, &stb) == 0 && S_ISREG(stb.st_mode)) | |
5259 | return fl; | |
5260 | ||
5261 | free(fl); | |
5262 | return NULL; | |
5263 | } |