]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super1.c
match_metadata_desc1(): Use calloc instead of malloc+memset
[thirdparty/mdadm.git] / super1.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24
25 #include "mdadm.h"
26 /*
27 * The version-1 superblock :
28 * All numeric fields are little-endian.
29 *
30 * total size: 256 bytes plus 2 per device.
31 * 1K allows 384 devices.
32 */
33 struct mdp_superblock_1 {
34 /* constant array information - 128 bytes */
35 __u32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */
36 __u32 major_version; /* 1 */
37 __u32 feature_map; /* 0 for now */
38 __u32 pad0; /* always set to 0 when writing */
39
40 __u8 set_uuid[16]; /* user-space generated. */
41 char set_name[32]; /* set and interpreted by user-space */
42
43 __u64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
44 __u32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
45 __u32 layout; /* only for raid5 currently */
46 __u64 size; /* used size of component devices, in 512byte sectors */
47
48 __u32 chunksize; /* in 512byte sectors */
49 __u32 raid_disks;
50 __u32 bitmap_offset; /* sectors after start of superblock that bitmap starts
51 * NOTE: signed, so bitmap can be before superblock
52 * only meaningful of feature_map[0] is set.
53 */
54
55 /* These are only valid with feature bit '4' */
56 __u32 new_level; /* new level we are reshaping to */
57 __u64 reshape_position; /* next address in array-space for reshape */
58 __u32 delta_disks; /* change in number of raid_disks */
59 __u32 new_layout; /* new layout */
60 __u32 new_chunk; /* new chunk size (bytes) */
61 __u8 pad1[128-124]; /* set to 0 when written */
62
63 /* constant this-device information - 64 bytes */
64 __u64 data_offset; /* sector start of data, often 0 */
65 __u64 data_size; /* sectors in this device that can be used for data */
66 __u64 super_offset; /* sector start of this superblock */
67 __u64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
68 __u32 dev_number; /* permanent identifier of this device - not role in raid */
69 __u32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
70 __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
71 __u8 devflags; /* per-device flags. Only one defined...*/
72 #define WriteMostly1 1 /* mask for writemostly flag in above */
73 __u8 pad2[64-57]; /* set to 0 when writing */
74
75 /* array state information - 64 bytes */
76 __u64 utime; /* 40 bits second, 24 btes microseconds */
77 __u64 events; /* incremented when superblock updated */
78 __u64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
79 __u32 sb_csum; /* checksum upto dev_roles[max_dev] */
80 __u32 max_dev; /* size of dev_roles[] array to consider */
81 __u8 pad3[64-32]; /* set to 0 when writing */
82
83 /* device state information. Indexed by dev_number.
84 * 2 bytes per device
85 * Note there are no per-device state flags. State information is rolled
86 * into the 'roles' value. If a device is spare or faulty, then it doesn't
87 * have a meaningful role.
88 */
89 __u16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
90 };
91
92 #define MAX_SB_SIZE 4096
93 /* bitmap super size is 256, but we round up to a sector for alignment */
94 #define BM_SUPER_SIZE 512
95 #define MAX_DEVS ((int)(MAX_SB_SIZE - sizeof(struct mdp_superblock_1)) / 2)
96 #define SUPER1_SIZE (MAX_SB_SIZE + BM_SUPER_SIZE \
97 + sizeof(struct misc_dev_info))
98
99 struct misc_dev_info {
100 __u64 device_size;
101 };
102
103 /* feature_map bits */
104 #define MD_FEATURE_BITMAP_OFFSET 1
105 #define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
106 * must be honoured
107 */
108 #define MD_FEATURE_RESHAPE_ACTIVE 4
109
110 #define MD_FEATURE_ALL (1|2|4)
111
112 #ifndef offsetof
113 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
114 #endif
115 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
116 {
117 unsigned int disk_csum, csum;
118 unsigned long long newcsum;
119 int size = sizeof(*sb) + __le32_to_cpu(sb->max_dev)*2;
120 unsigned int *isuper = (unsigned int*)sb;
121
122 /* make sure I can count... */
123 if (offsetof(struct mdp_superblock_1,data_offset) != 128 ||
124 offsetof(struct mdp_superblock_1, utime) != 192 ||
125 sizeof(struct mdp_superblock_1) != 256) {
126 fprintf(stderr, "WARNING - superblock isn't sized correctly\n");
127 }
128
129 disk_csum = sb->sb_csum;
130 sb->sb_csum = 0;
131 newcsum = 0;
132 for (; size>=4; size -= 4 ) {
133 newcsum += __le32_to_cpu(*isuper);
134 isuper++;
135 }
136
137 if (size == 2)
138 newcsum += __le16_to_cpu(*(unsigned short*) isuper);
139
140 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
141 sb->sb_csum = disk_csum;
142 return __cpu_to_le32(csum);
143 }
144
145 static char abuf[4096+4096];
146 static int aread(int fd, void *buf, int len)
147 {
148 /* aligned read.
149 * On devices with a 4K sector size, we need to read
150 * the full sector and copy relevant bits into
151 * the buffer
152 */
153 int bsize, iosize;
154 char *b;
155 int n;
156 if (ioctl(fd, BLKSSZGET, &bsize) != 0)
157 bsize = 512;
158
159 if (bsize > 4096 || len > 4096)
160 return -1;
161 b = (char*)(((long)(abuf+4096))&~4095UL);
162
163 for (iosize = 0; iosize < len; iosize += bsize)
164 ;
165 n = read(fd, b, iosize);
166 if (n <= 0)
167 return n;
168 lseek(fd, len - n, 1);
169 if (n > len)
170 n = len;
171 memcpy(buf, b, n);
172 return n;
173 }
174
175 static int awrite(int fd, void *buf, int len)
176 {
177 /* aligned write.
178 * On devices with a 4K sector size, we need to write
179 * the full sector. We pre-read if the sector is larger
180 * than the write.
181 * The address must be sector-aligned.
182 */
183 int bsize, iosize;
184 char *b;
185 int n;
186 if (ioctl(fd, BLKSSZGET, &bsize) != 0)
187 bsize = 512;
188 if (bsize > 4096 || len > 4096)
189 return -1;
190 b = (char*)(((long)(abuf+4096))&~4095UL);
191
192 for (iosize = 0; iosize < len ; iosize += bsize)
193 ;
194
195 if (len != iosize) {
196 n = read(fd, b, iosize);
197 if (n <= 0)
198 return n;
199 lseek(fd, -n, 1);
200 }
201
202 memcpy(b, buf, len);
203 n = write(fd, b, iosize);
204 if (n <= 0)
205 return n;
206 lseek(fd, len - n, 1);
207 return len;
208 }
209
210 #ifndef MDASSEMBLE
211 static void examine_super1(struct supertype *st, char *homehost)
212 {
213 struct mdp_superblock_1 *sb = st->sb;
214 time_t atime;
215 unsigned int d;
216 int role;
217 int delta_extra = 0;
218 int i;
219 char *c;
220 int l = homehost ? strlen(homehost) : 0;
221 int layout;
222 unsigned long long sb_offset;
223
224 printf(" Magic : %08x\n", __le32_to_cpu(sb->magic));
225 printf(" Version : 1");
226 sb_offset = __le64_to_cpu(sb->super_offset);
227 if (sb_offset <= 4)
228 printf(".1\n");
229 else if (sb_offset <= 8)
230 printf(".2\n");
231 else
232 printf(".0\n");
233 printf(" Feature Map : 0x%x\n", __le32_to_cpu(sb->feature_map));
234 printf(" Array UUID : ");
235 for (i=0; i<16; i++) {
236 if ((i&3)==0 && i != 0) printf(":");
237 printf("%02x", sb->set_uuid[i]);
238 }
239 printf("\n");
240 printf(" Name : %.32s", sb->set_name);
241 if (l > 0 && l < 32 &&
242 sb->set_name[l] == ':' &&
243 strncmp(sb->set_name, homehost, l) == 0)
244 printf(" (local to host %s)", homehost);
245 printf("\n");
246 atime = __le64_to_cpu(sb->ctime) & 0xFFFFFFFFFFULL;
247 printf(" Creation Time : %.24s\n", ctime(&atime));
248 c=map_num(pers, __le32_to_cpu(sb->level));
249 printf(" Raid Level : %s\n", c?c:"-unknown-");
250 printf(" Raid Devices : %d\n", __le32_to_cpu(sb->raid_disks));
251 printf("\n");
252 printf(" Avail Dev Size : %llu%s\n",
253 (unsigned long long)__le64_to_cpu(sb->data_size),
254 human_size(__le64_to_cpu(sb->data_size)<<9));
255 if (__le32_to_cpu(sb->level) > 0) {
256 int ddsks=0;
257 switch(__le32_to_cpu(sb->level)) {
258 case 1: ddsks=1;break;
259 case 4:
260 case 5: ddsks = __le32_to_cpu(sb->raid_disks)-1; break;
261 case 6: ddsks = __le32_to_cpu(sb->raid_disks)-2; break;
262 case 10:
263 layout = __le32_to_cpu(sb->layout);
264 ddsks = __le32_to_cpu(sb->raid_disks)
265 / (layout&255) / ((layout>>8)&255);
266 }
267 if (ddsks)
268 printf(" Array Size : %llu%s\n",
269 ddsks*(unsigned long long)__le64_to_cpu(sb->size),
270 human_size(ddsks*__le64_to_cpu(sb->size)<<9));
271 if (sb->size != sb->data_size)
272 printf(" Used Dev Size : %llu%s\n",
273 (unsigned long long)__le64_to_cpu(sb->size),
274 human_size(__le64_to_cpu(sb->size)<<9));
275 }
276 if (sb->data_offset)
277 printf(" Data Offset : %llu sectors\n",
278 (unsigned long long)__le64_to_cpu(sb->data_offset));
279 printf(" Super Offset : %llu sectors\n",
280 (unsigned long long)__le64_to_cpu(sb->super_offset));
281 if (__le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)
282 printf("Recovery Offset : %llu sectors\n", (unsigned long long)__le64_to_cpu(sb->recovery_offset));
283 printf(" State : %s\n", (__le64_to_cpu(sb->resync_offset)+1)? "active":"clean");
284 printf(" Device UUID : ");
285 for (i=0; i<16; i++) {
286 if ((i&3)==0 && i != 0) printf(":");
287 printf("%02x", sb->device_uuid[i]);
288 }
289 printf("\n");
290 printf("\n");
291 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
292 printf("Internal Bitmap : %ld sectors from superblock\n",
293 (long)(int32_t)__le32_to_cpu(sb->bitmap_offset));
294 }
295 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE)) {
296 printf(" Reshape pos'n : %llu%s\n", (unsigned long long)__le64_to_cpu(sb->reshape_position)/2,
297 human_size(__le64_to_cpu(sb->reshape_position)<<9));
298 if (__le32_to_cpu(sb->delta_disks)) {
299 printf(" Delta Devices : %d", __le32_to_cpu(sb->delta_disks));
300 printf(" (%d->%d)\n",
301 __le32_to_cpu(sb->raid_disks)-__le32_to_cpu(sb->delta_disks),
302 __le32_to_cpu(sb->raid_disks));
303 if ((int)__le32_to_cpu(sb->delta_disks) < 0)
304 delta_extra = -__le32_to_cpu(sb->delta_disks);
305 }
306 if (__le32_to_cpu(sb->new_level) != __le32_to_cpu(sb->level)) {
307 c = map_num(pers, __le32_to_cpu(sb->new_level));
308 printf(" New Level : %s\n", c?c:"-unknown-");
309 }
310 if (__le32_to_cpu(sb->new_layout) != __le32_to_cpu(sb->layout)) {
311 if (__le32_to_cpu(sb->level) == 5) {
312 c = map_num(r5layout, __le32_to_cpu(sb->new_layout));
313 printf(" New Layout : %s\n", c?c:"-unknown-");
314 }
315 if (__le32_to_cpu(sb->level) == 6) {
316 c = map_num(r6layout, __le32_to_cpu(sb->new_layout));
317 printf(" New Layout : %s\n", c?c:"-unknown-");
318 }
319 if (__le32_to_cpu(sb->level) == 10) {
320 printf(" New Layout :");
321 print_r10_layout(__le32_to_cpu(sb->new_layout));
322 printf("\n");
323 }
324 }
325 if (__le32_to_cpu(sb->new_chunk) != __le32_to_cpu(sb->chunksize))
326 printf(" New Chunksize : %dK\n", __le32_to_cpu(sb->new_chunk)/2);
327 printf("\n");
328 }
329 if (sb->devflags) {
330 printf(" Flags :");
331 if (sb->devflags & WriteMostly1)
332 printf(" write-mostly");
333 printf("\n");
334 }
335
336 atime = __le64_to_cpu(sb->utime) & 0xFFFFFFFFFFULL;
337 printf(" Update Time : %.24s\n", ctime(&atime));
338
339 if (calc_sb_1_csum(sb) == sb->sb_csum)
340 printf(" Checksum : %x - correct\n", __le32_to_cpu(sb->sb_csum));
341 else
342 printf(" Checksum : %x - expected %x\n", __le32_to_cpu(sb->sb_csum),
343 __le32_to_cpu(calc_sb_1_csum(sb)));
344 printf(" Events : %llu\n", (unsigned long long)__le64_to_cpu(sb->events));
345 printf("\n");
346 if (__le32_to_cpu(sb->level) == 5) {
347 c = map_num(r5layout, __le32_to_cpu(sb->layout));
348 printf(" Layout : %s\n", c?c:"-unknown-");
349 }
350 if (__le32_to_cpu(sb->level) == 6) {
351 c = map_num(r6layout, __le32_to_cpu(sb->layout));
352 printf(" Layout : %s\n", c?c:"-unknown-");
353 }
354 if (__le32_to_cpu(sb->level) == 10) {
355 int lo = __le32_to_cpu(sb->layout);
356 printf(" Layout :");
357 print_r10_layout(lo);
358 printf("\n");
359 }
360 switch(__le32_to_cpu(sb->level)) {
361 case 0:
362 case 4:
363 case 5:
364 case 6:
365 case 10:
366 printf(" Chunk Size : %dK\n", __le32_to_cpu(sb->chunksize)/2);
367 break;
368 case -1:
369 printf(" Rounding : %dK\n", __le32_to_cpu(sb->chunksize)/2);
370 break;
371 default: break;
372 }
373 printf("\n");
374 #if 0
375 /* This turns out to just be confusing */
376 printf(" Array Slot : %d (", __le32_to_cpu(sb->dev_number));
377 for (i= __le32_to_cpu(sb->max_dev); i> 0 ; i--)
378 if (__le16_to_cpu(sb->dev_roles[i-1]) != 0xffff)
379 break;
380 for (d=0; d < i; d++) {
381 int role = __le16_to_cpu(sb->dev_roles[d]);
382 if (d) printf(", ");
383 if (role == 0xffff) printf("empty");
384 else if(role == 0xfffe) printf("failed");
385 else printf("%d", role);
386 }
387 printf(")\n");
388 #endif
389 printf(" Device Role : ");
390 d = __le32_to_cpu(sb->dev_number);
391 if (d < __le32_to_cpu(sb->max_dev))
392 role = __le16_to_cpu(sb->dev_roles[d]);
393 else
394 role = 0xFFFF;
395 if (role >= 0xFFFE)
396 printf("spare\n");
397 else
398 printf("Active device %d\n", role);
399
400 printf(" Array State : ");
401 for (d=0; d<__le32_to_cpu(sb->raid_disks) + delta_extra; d++) {
402 int cnt = 0;
403 unsigned int i;
404 for (i=0; i< __le32_to_cpu(sb->max_dev); i++) {
405 unsigned int role = __le16_to_cpu(sb->dev_roles[i]);
406 if (role == d)
407 cnt++;
408 }
409 if (cnt > 1) printf("?");
410 else if (cnt == 1) printf("A");
411 else printf (".");
412 }
413 #if 0
414 /* This is confusing too */
415 faulty = 0;
416 for (i=0; i< __le32_to_cpu(sb->max_dev); i++) {
417 int role = __le16_to_cpu(sb->dev_roles[i]);
418 if (role == 0xFFFE)
419 faulty++;
420 }
421 if (faulty) printf(" %d failed", faulty);
422 #endif
423 printf(" ('A' == active, '.' == missing)");
424 printf("\n");
425 }
426
427
428 static void brief_examine_super1(struct supertype *st, int verbose)
429 {
430 struct mdp_superblock_1 *sb = st->sb;
431 int i;
432 unsigned long long sb_offset;
433 char *nm;
434 char *c=map_num(pers, __le32_to_cpu(sb->level));
435
436 nm = strchr(sb->set_name, ':');
437 if (nm)
438 nm++;
439 else if (sb->set_name[0])
440 nm = sb->set_name;
441 else
442 nm = NULL;
443
444 printf("ARRAY%s%s", nm ? " /dev/md/":"", nm);
445 if (verbose && c)
446 printf(" level=%s", c);
447 sb_offset = __le64_to_cpu(sb->super_offset);
448 if (sb_offset <= 4)
449 printf(" metadata=1.1 ");
450 else if (sb_offset <= 8)
451 printf(" metadata=1.2 ");
452 else
453 printf(" metadata=1.0 ");
454 if (verbose)
455 printf("num-devices=%d ", __le32_to_cpu(sb->raid_disks));
456 printf("UUID=");
457 for (i=0; i<16; i++) {
458 if ((i&3)==0 && i != 0) printf(":");
459 printf("%02x", sb->set_uuid[i]);
460 }
461 if (sb->set_name[0])
462 printf(" name=%.32s", sb->set_name);
463 printf("\n");
464 }
465
466 static void export_examine_super1(struct supertype *st)
467 {
468 struct mdp_superblock_1 *sb = st->sb;
469 int i;
470 int len = 32;
471
472 printf("MD_LEVEL=%s\n", map_num(pers, __le32_to_cpu(sb->level)));
473 printf("MD_DEVICES=%d\n", __le32_to_cpu(sb->raid_disks));
474 for (i=0; i<32; i++)
475 if (sb->set_name[i] == '\n' ||
476 sb->set_name[i] == '\0') {
477 len = i;
478 break;
479 }
480 if (len)
481 printf("MD_NAME=%.*s\n", len, sb->set_name);
482 printf("MD_UUID=");
483 for (i=0; i<16; i++) {
484 if ((i&3)==0 && i != 0) printf(":");
485 printf("%02x", sb->set_uuid[i]);
486 }
487 printf("\n");
488 printf("MD_UPDATE_TIME=%llu\n",
489 __le64_to_cpu(sb->utime) & 0xFFFFFFFFFFULL);
490 printf("MD_DEV_UUID=");
491 for (i=0; i<16; i++) {
492 if ((i&3)==0 && i != 0) printf(":");
493 printf("%02x", sb->device_uuid[i]);
494 }
495 printf("\n");
496 printf("MD_EVENTS=%llu\n",
497 (unsigned long long)__le64_to_cpu(sb->events));
498 }
499
500 static void detail_super1(struct supertype *st, char *homehost)
501 {
502 struct mdp_superblock_1 *sb = st->sb;
503 int i;
504 int l = homehost ? strlen(homehost) : 0;
505
506 printf(" Name : %.32s", sb->set_name);
507 if (l > 0 && l < 32 &&
508 sb->set_name[l] == ':' &&
509 strncmp(sb->set_name, homehost, l) == 0)
510 printf(" (local to host %s)", homehost);
511 printf("\n UUID : ");
512 for (i=0; i<16; i++) {
513 if ((i&3)==0 && i != 0) printf(":");
514 printf("%02x", sb->set_uuid[i]);
515 }
516 printf("\n Events : %llu\n\n", (unsigned long long)__le64_to_cpu(sb->events));
517 }
518
519 static void brief_detail_super1(struct supertype *st)
520 {
521 struct mdp_superblock_1 *sb = st->sb;
522 int i;
523
524 if (sb->set_name[0])
525 printf(" name=%.32s", sb->set_name);
526 printf(" UUID=");
527 for (i=0; i<16; i++) {
528 if ((i&3)==0 && i != 0) printf(":");
529 printf("%02x", sb->set_uuid[i]);
530 }
531 }
532
533 static void export_detail_super1(struct supertype *st)
534 {
535 struct mdp_superblock_1 *sb = st->sb;
536 int i;
537 int len = 32;
538
539 for (i=0; i<32; i++)
540 if (sb->set_name[i] == '\n' ||
541 sb->set_name[i] == '\0') {
542 len = i;
543 break;
544 }
545 if (len)
546 printf("MD_NAME=%.*s\n", len, sb->set_name);
547 }
548
549 #endif
550
551 static int match_home1(struct supertype *st, char *homehost)
552 {
553 struct mdp_superblock_1 *sb = st->sb;
554 int l = homehost ? strlen(homehost) : 0;
555
556 return (l > 0 && l < 32 &&
557 sb->set_name[l] == ':' &&
558 strncmp(sb->set_name, homehost, l) == 0);
559 }
560
561 static void uuid_from_super1(struct supertype *st, int uuid[4])
562 {
563 struct mdp_superblock_1 *super = st->sb;
564 char *cuuid = (char*)uuid;
565 int i;
566 for (i=0; i<16; i++)
567 cuuid[i] = super->set_uuid[i];
568 }
569
570 static void getinfo_super1(struct supertype *st, struct mdinfo *info, char *map)
571 {
572 struct mdp_superblock_1 *sb = st->sb;
573 int working = 0;
574 unsigned int i;
575 unsigned int role;
576 unsigned int map_disks = info->array.raid_disks;
577
578 memset(info, 0, sizeof(*info));
579 info->array.major_version = 1;
580 info->array.minor_version = st->minor_version;
581 info->array.patch_version = 0;
582 info->array.raid_disks = __le32_to_cpu(sb->raid_disks);
583 info->array.level = __le32_to_cpu(sb->level);
584 info->array.layout = __le32_to_cpu(sb->layout);
585 info->array.md_minor = -1;
586 info->array.ctime = __le64_to_cpu(sb->ctime);
587 info->array.utime = __le64_to_cpu(sb->utime);
588 info->array.chunk_size = __le32_to_cpu(sb->chunksize)*512;
589 info->array.state =
590 (__le64_to_cpu(sb->resync_offset) == MaxSector)
591 ? 1 : 0;
592
593 info->data_offset = __le64_to_cpu(sb->data_offset);
594 info->component_size = __le64_to_cpu(sb->size);
595 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET))
596 info->bitmap_offset = __le32_to_cpu(sb->bitmap_offset);
597
598 info->disk.major = 0;
599 info->disk.minor = 0;
600 info->disk.number = __le32_to_cpu(sb->dev_number);
601 if (__le32_to_cpu(sb->dev_number) >= __le32_to_cpu(sb->max_dev) ||
602 __le32_to_cpu(sb->dev_number) >= MAX_DEVS)
603 role = 0xfffe;
604 else
605 role = __le16_to_cpu(sb->dev_roles[__le32_to_cpu(sb->dev_number)]);
606
607 info->disk.raid_disk = -1;
608 switch(role) {
609 case 0xFFFF:
610 info->disk.state = 0; /* spare: not active, not sync, not faulty */
611 break;
612 case 0xFFFE:
613 info->disk.state = 1; /* faulty */
614 break;
615 default:
616 info->disk.state = 6; /* active and in sync */
617 info->disk.raid_disk = role;
618 }
619 if (sb->devflags & WriteMostly1)
620 info->disk.state |= (1 << MD_DISK_WRITEMOSTLY);
621 info->events = __le64_to_cpu(sb->events);
622 sprintf(info->text_version, "1.%d", st->minor_version);
623 info->safe_mode_delay = 200;
624
625 memcpy(info->uuid, sb->set_uuid, 16);
626
627 strncpy(info->name, sb->set_name, 32);
628 info->name[32] = 0;
629
630 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RECOVERY_OFFSET))
631 info->recovery_start = __le32_to_cpu(sb->recovery_offset);
632 else
633 info->recovery_start = MaxSector;
634
635 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE)) {
636 info->reshape_active = 1;
637 info->reshape_progress = __le64_to_cpu(sb->reshape_position);
638 info->new_level = __le32_to_cpu(sb->new_level);
639 info->delta_disks = __le32_to_cpu(sb->delta_disks);
640 info->new_layout = __le32_to_cpu(sb->new_layout);
641 info->new_chunk = __le32_to_cpu(sb->new_chunk)<<9;
642 if (info->delta_disks < 0)
643 info->array.raid_disks -= info->delta_disks;
644 } else
645 info->reshape_active = 0;
646
647 info->recovery_blocked = info->reshape_active;
648
649 if (map)
650 for (i=0; i<map_disks; i++)
651 map[i] = 0;
652 for (i = 0; i < __le32_to_cpu(sb->max_dev); i++) {
653 role = __le16_to_cpu(sb->dev_roles[i]);
654 if (/*role == 0xFFFF || */role < (unsigned) info->array.raid_disks) {
655 working++;
656 if (map && role < map_disks)
657 map[role] = 1;
658 }
659 }
660
661 info->array.working_disks = working;
662 }
663
664 static struct mdinfo *container_content1(struct supertype *st, char *subarray)
665 {
666 struct mdinfo *info;
667
668 if (subarray)
669 return NULL;
670
671 info = malloc(sizeof(*info));
672 getinfo_super1(st, info, NULL);
673 return info;
674 }
675
676 static int update_super1(struct supertype *st, struct mdinfo *info,
677 char *update,
678 char *devname, int verbose,
679 int uuid_set, char *homehost)
680 {
681 /* NOTE: for 'assemble' and 'force' we need to return non-zero
682 * if any change was made. For others, the return value is
683 * ignored.
684 */
685 int rv = 0;
686 struct mdp_superblock_1 *sb = st->sb;
687
688 if (strcmp(update, "force-one")==0) {
689 /* Not enough devices for a working array,
690 * so bring this one up-to-date
691 */
692 if (sb->events != __cpu_to_le64(info->events))
693 rv = 1;
694 sb->events = __cpu_to_le64(info->events);
695 } else if (strcmp(update, "force-array")==0) {
696 /* Degraded array and 'force' requests to
697 * maybe need to mark it 'clean'.
698 */
699 switch(__le32_to_cpu(sb->level)) {
700 case 5: case 4: case 6:
701 /* need to force clean */
702 if (sb->resync_offset != MaxSector)
703 rv = 1;
704 sb->resync_offset = MaxSector;
705 }
706 } else if (strcmp(update, "assemble")==0) {
707 int d = info->disk.number;
708 int want;
709 if (info->disk.state == 6)
710 want = info->disk.raid_disk;
711 else
712 want = 0xFFFF;
713 if (sb->dev_roles[d] != __cpu_to_le16(want)) {
714 sb->dev_roles[d] = __cpu_to_le16(want);
715 rv = 1;
716 }
717 if (info->reshape_active &&
718 sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE) &&
719 info->delta_disks >= 0 &&
720 info->reshape_progress < __le64_to_cpu(sb->reshape_position)) {
721 sb->reshape_position = __cpu_to_le64(info->reshape_progress);
722 rv = 1;
723 }
724 if (info->reshape_active &&
725 sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE) &&
726 info->delta_disks < 0 &&
727 info->reshape_progress > __le64_to_cpu(sb->reshape_position)) {
728 sb->reshape_position = __cpu_to_le64(info->reshape_progress);
729 rv = 1;
730 }
731 } else if (strcmp(update, "linear-grow-new") == 0) {
732 unsigned int i;
733 int rfd, fd;
734 unsigned int max = __le32_to_cpu(sb->max_dev);
735
736 for (i=0 ; i < max ; i++)
737 if (__le16_to_cpu(sb->dev_roles[i]) >= 0xfffe)
738 break;
739 sb->dev_number = __cpu_to_le32(i);
740 info->disk.number = i;
741 if (max >= __le32_to_cpu(sb->max_dev))
742 sb->max_dev = __cpu_to_le32(max+1);
743
744 if ((rfd = open("/dev/urandom", O_RDONLY)) < 0 ||
745 read(rfd, sb->device_uuid, 16) != 16) {
746 __u32 r[4] = {random(), random(), random(), random()};
747 memcpy(sb->device_uuid, r, 16);
748 }
749 if (rfd >= 0)
750 close(rfd);
751
752 sb->dev_roles[i] =
753 __cpu_to_le16(info->disk.raid_disk);
754
755 fd = open(devname, O_RDONLY);
756 if (fd >= 0) {
757 unsigned long long ds;
758 get_dev_size(fd, devname, &ds);
759 close(fd);
760 ds >>= 9;
761 if (__le64_to_cpu(sb->super_offset) <
762 __le64_to_cpu(sb->data_offset)) {
763 sb->data_size = __cpu_to_le64(
764 ds - __le64_to_cpu(sb->data_offset));
765 } else {
766 ds -= 8*2;
767 ds &= ~(unsigned long long)(4*2-1);
768 sb->super_offset = __cpu_to_le64(ds);
769 sb->data_size = __cpu_to_le64(
770 ds - __le64_to_cpu(sb->data_offset));
771 }
772 }
773 } else if (strcmp(update, "linear-grow-update") == 0) {
774 sb->raid_disks = __cpu_to_le32(info->array.raid_disks);
775 sb->dev_roles[info->disk.number] =
776 __cpu_to_le16(info->disk.raid_disk);
777 } else if (strcmp(update, "resync") == 0) {
778 /* make sure resync happens */
779 sb->resync_offset = 0ULL;
780 } else if (strcmp(update, "uuid") == 0) {
781 copy_uuid(sb->set_uuid, info->uuid, super1.swapuuid);
782
783 if (__le32_to_cpu(sb->feature_map)&MD_FEATURE_BITMAP_OFFSET) {
784 struct bitmap_super_s *bm;
785 bm = (struct bitmap_super_s*)(st->sb+MAX_SB_SIZE);
786 memcpy(bm->uuid, sb->set_uuid, 16);
787 }
788 } else if (strcmp(update, "no-bitmap") == 0) {
789 sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
790 } else if (strcmp(update, "homehost") == 0 &&
791 homehost) {
792 char *c;
793 update = "name";
794 c = strchr(sb->set_name, ':');
795 if (c)
796 strncpy(info->name, c+1, 31 - (c-sb->set_name));
797 else
798 strncpy(info->name, sb->set_name, 32);
799 info->name[32] = 0;
800 } else if (strcmp(update, "name") == 0) {
801 if (info->name[0] == 0)
802 sprintf(info->name, "%d", info->array.md_minor);
803 memset(sb->set_name, 0, sizeof(sb->set_name));
804 if (homehost &&
805 strchr(info->name, ':') == NULL &&
806 strlen(homehost)+1+strlen(info->name) < 32) {
807 strcpy(sb->set_name, homehost);
808 strcat(sb->set_name, ":");
809 strcat(sb->set_name, info->name);
810 } else
811 strcpy(sb->set_name, info->name);
812 } else if (strcmp(update, "devicesize") == 0 &&
813 __le64_to_cpu(sb->super_offset) <
814 __le64_to_cpu(sb->data_offset)) {
815 /* set data_size to device size less data_offset */
816 struct misc_dev_info *misc = (struct misc_dev_info*)
817 (st->sb + MAX_SB_SIZE + BM_SUPER_SIZE);
818 printf("Size was %llu\n", (unsigned long long)
819 __le64_to_cpu(sb->data_size));
820 sb->data_size = __cpu_to_le64(
821 misc->device_size - __le64_to_cpu(sb->data_offset));
822 printf("Size is %llu\n", (unsigned long long)
823 __le64_to_cpu(sb->data_size));
824 } else if (strcmp(update, "_reshape_progress")==0)
825 sb->reshape_position = __cpu_to_le64(info->reshape_progress);
826 else if (strcmp(update, "writemostly")==0)
827 sb->devflags |= WriteMostly1;
828 else if (strcmp(update, "readwrite")==0)
829 sb->devflags &= ~WriteMostly1;
830 else
831 rv = -1;
832
833 sb->sb_csum = calc_sb_1_csum(sb);
834 return rv;
835 }
836
837 static int init_super1(struct supertype *st, mdu_array_info_t *info,
838 unsigned long long size, char *name, char *homehost, int *uuid)
839 {
840 struct mdp_superblock_1 *sb;
841 int spares;
842 int rfd;
843 char defname[10];
844 int sbsize;
845
846 if (posix_memalign((void**)&sb, 512, SUPER1_SIZE) != 0) {
847 fprintf(stderr, Name
848 ": %s could not allocate superblock\n", __func__);
849 return 0;
850 }
851 memset(sb, 0, SUPER1_SIZE);
852
853 st->sb = sb;
854 if (info == NULL) {
855 /* zeroing superblock */
856 return 0;
857 }
858
859 spares = info->working_disks - info->active_disks;
860 if (info->raid_disks + spares > MAX_DEVS) {
861 fprintf(stderr, Name ": too many devices requested: %d+%d > %d\n",
862 info->raid_disks , spares, MAX_DEVS);
863 return 0;
864 }
865
866 sb->magic = __cpu_to_le32(MD_SB_MAGIC);
867 sb->major_version = __cpu_to_le32(1);
868 sb->feature_map = 0;
869 sb->pad0 = 0;
870
871 if (uuid)
872 copy_uuid(sb->set_uuid, uuid, super1.swapuuid);
873 else {
874 if ((rfd = open("/dev/urandom", O_RDONLY)) < 0 ||
875 read(rfd, sb->set_uuid, 16) != 16) {
876 __u32 r[4] = {random(), random(), random(), random()};
877 memcpy(sb->set_uuid, r, 16);
878 }
879 if (rfd >= 0) close(rfd);
880 }
881
882 if (name == NULL || *name == 0) {
883 sprintf(defname, "%d", info->md_minor);
884 name = defname;
885 }
886 if (homehost &&
887 strchr(name, ':')== NULL &&
888 strlen(homehost)+1+strlen(name) < 32) {
889 strcpy(sb->set_name, homehost);
890 strcat(sb->set_name, ":");
891 strcat(sb->set_name, name);
892 } else
893 strcpy(sb->set_name, name);
894
895 sb->ctime = __cpu_to_le64((unsigned long long)time(0));
896 sb->level = __cpu_to_le32(info->level);
897 sb->layout = __cpu_to_le32(info->layout);
898 sb->size = __cpu_to_le64(size*2ULL);
899 sb->chunksize = __cpu_to_le32(info->chunk_size>>9);
900 sb->raid_disks = __cpu_to_le32(info->raid_disks);
901
902 sb->data_offset = __cpu_to_le64(0);
903 sb->data_size = __cpu_to_le64(0);
904 sb->super_offset = __cpu_to_le64(0);
905 sb->recovery_offset = __cpu_to_le64(0);
906
907 sb->utime = sb->ctime;
908 sb->events = __cpu_to_le64(1);
909 if (info->state & (1<<MD_SB_CLEAN))
910 sb->resync_offset = MaxSector;
911 else
912 sb->resync_offset = 0;
913 sbsize = sizeof(struct mdp_superblock_1) + 2 * (info->raid_disks + spares);
914 sbsize = ROUND_UP(sbsize, 512);
915 sb->max_dev = __cpu_to_le32((sbsize - sizeof(struct mdp_superblock_1)) / 2);
916
917 memset(sb->dev_roles, 0xff, MAX_SB_SIZE - sizeof(struct mdp_superblock_1));
918
919 return 1;
920 }
921
922 struct devinfo {
923 int fd;
924 char *devname;
925 mdu_disk_info_t disk;
926 struct devinfo *next;
927 };
928 #ifndef MDASSEMBLE
929 /* Add a device to the superblock being created */
930 static int add_to_super1(struct supertype *st, mdu_disk_info_t *dk,
931 int fd, char *devname)
932 {
933 struct mdp_superblock_1 *sb = st->sb;
934 __u16 *rp = sb->dev_roles + dk->number;
935 struct devinfo *di, **dip;
936
937 if ((dk->state & 6) == 6) /* active, sync */
938 *rp = __cpu_to_le16(dk->raid_disk);
939 else if ((dk->state & ~2) == 0) /* active or idle -> spare */
940 *rp = 0xffff;
941 else
942 *rp = 0xfffe;
943
944 if (dk->number >= (int)__le32_to_cpu(sb->max_dev) &&
945 __le32_to_cpu(sb->max_dev) < MAX_DEVS)
946 sb->max_dev = __cpu_to_le32(dk->number+1);
947
948 sb->dev_number = __cpu_to_le32(dk->number);
949 sb->devflags = 0; /* don't copy another disks flags */
950 sb->sb_csum = calc_sb_1_csum(sb);
951
952 dip = (struct devinfo **)&st->info;
953 while (*dip)
954 dip = &(*dip)->next;
955 di = malloc(sizeof(struct devinfo));
956 di->fd = fd;
957 di->devname = devname;
958 di->disk = *dk;
959 di->next = NULL;
960 *dip = di;
961
962 return 0;
963 }
964 #endif
965
966 static void locate_bitmap1(struct supertype *st, int fd);
967
968 static int store_super1(struct supertype *st, int fd)
969 {
970 struct mdp_superblock_1 *sb = st->sb;
971 unsigned long long sb_offset;
972 int sbsize;
973 unsigned long long dsize;
974
975 if (!get_dev_size(fd, NULL, &dsize))
976 return 1;
977
978 dsize >>= 9;
979
980 if (dsize < 24)
981 return 2;
982
983 /*
984 * Calculate the position of the superblock.
985 * It is always aligned to a 4K boundary and
986 * depending on minor_version, it can be:
987 * 0: At least 8K, but less than 12K, from end of device
988 * 1: At start of device
989 * 2: 4K from start of device.
990 */
991 switch(st->minor_version) {
992 case 0:
993 sb_offset = dsize;
994 sb_offset -= 8*2;
995 sb_offset &= ~(4*2-1);
996 break;
997 case 1:
998 sb_offset = 0;
999 break;
1000 case 2:
1001 sb_offset = 4*2;
1002 break;
1003 default:
1004 return -EINVAL;
1005 }
1006
1007
1008
1009 if (sb_offset != __le64_to_cpu(sb->super_offset) &&
1010 0 != __le64_to_cpu(sb->super_offset)
1011 ) {
1012 fprintf(stderr, Name ": internal error - sb_offset is wrong\n");
1013 abort();
1014 }
1015
1016 if (lseek64(fd, sb_offset << 9, 0)< 0LL)
1017 return 3;
1018
1019 sbsize = sizeof(*sb) + 2 * __le32_to_cpu(sb->max_dev);
1020 sbsize = (sbsize+511)&(~511UL);
1021
1022 if (awrite(fd, sb, sbsize) != sbsize)
1023 return 4;
1024
1025 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
1026 struct bitmap_super_s *bm = (struct bitmap_super_s*)
1027 (((char*)sb)+MAX_SB_SIZE);
1028 if (__le32_to_cpu(bm->magic) == BITMAP_MAGIC) {
1029 locate_bitmap1(st, fd);
1030 if (awrite(fd, bm, sizeof(*bm)) !=
1031 sizeof(*bm))
1032 return 5;
1033 }
1034 }
1035 fsync(fd);
1036 return 0;
1037 }
1038
1039 static int load_super1(struct supertype *st, int fd, char *devname);
1040
1041 static unsigned long choose_bm_space(unsigned long devsize)
1042 {
1043 /* if the device is bigger than 8Gig, save 64k for bitmap usage,
1044 * if bigger than 200Gig, save 128k
1045 * NOTE: result must be multiple of 4K else bad things happen
1046 * on 4K-sector devices.
1047 */
1048 if (devsize < 64*2) return 0;
1049 if (devsize - 64*2 >= 200*1024*1024*2)
1050 return 128*2;
1051 if (devsize - 4*2 > 8*1024*1024*2)
1052 return 64*2;
1053 return 4*2;
1054 }
1055
1056 static void free_super1(struct supertype *st);
1057
1058 #ifndef MDASSEMBLE
1059 static int write_init_super1(struct supertype *st)
1060 {
1061 struct mdp_superblock_1 *sb = st->sb;
1062 struct supertype *refst;
1063 int rfd;
1064 int rv = 0;
1065 unsigned long long bm_space;
1066 unsigned long long reserved;
1067 struct devinfo *di;
1068 unsigned long long dsize, array_size;
1069 unsigned long long sb_offset;
1070
1071 for (di = st->info; di && ! rv ; di = di->next) {
1072 if (di->disk.state == 1)
1073 continue;
1074 if (di->fd < 0)
1075 continue;
1076
1077 while (Kill(di->devname, NULL, 0, 1, 1) == 0)
1078 ;
1079
1080 sb->dev_number = __cpu_to_le32(di->disk.number);
1081 if (di->disk.state & (1<<MD_DISK_WRITEMOSTLY))
1082 sb->devflags |= WriteMostly1;
1083 else
1084 sb->devflags &= ~WriteMostly1;
1085
1086 if ((rfd = open("/dev/urandom", O_RDONLY)) < 0 ||
1087 read(rfd, sb->device_uuid, 16) != 16) {
1088 __u32 r[4] = {random(), random(), random(), random()};
1089 memcpy(sb->device_uuid, r, 16);
1090 }
1091 if (rfd >= 0)
1092 close(rfd);
1093
1094 sb->events = 0;
1095
1096 refst = dup_super(st);
1097 if (load_super1(refst, di->fd, NULL)==0) {
1098 struct mdp_superblock_1 *refsb = refst->sb;
1099
1100 memcpy(sb->device_uuid, refsb->device_uuid, 16);
1101 if (memcmp(sb->set_uuid, refsb->set_uuid, 16)==0) {
1102 /* same array, so preserve events and
1103 * dev_number */
1104 sb->events = refsb->events;
1105 /* bugs in 2.6.17 and earlier mean the
1106 * dev_number chosen in Manage must be preserved
1107 */
1108 if (get_linux_version() >= 2006018)
1109 sb->dev_number = refsb->dev_number;
1110 }
1111 free_super1(refst);
1112 }
1113 free(refst);
1114
1115 if (!get_dev_size(di->fd, NULL, &dsize)) {
1116 rv = 1;
1117 goto error_out;
1118 }
1119 dsize >>= 9;
1120
1121 if (dsize < 24) {
1122 close(di->fd);
1123 rv = 2;
1124 goto error_out;
1125 }
1126
1127
1128 /*
1129 * Calculate the position of the superblock.
1130 * It is always aligned to a 4K boundary and
1131 * depending on minor_version, it can be:
1132 * 0: At least 8K, but less than 12K, from end of device
1133 * 1: At start of device
1134 * 2: 4K from start of device.
1135 * Depending on the array size, we might leave extra space
1136 * for a bitmap.
1137 */
1138 array_size = __le64_to_cpu(sb->size);
1139 /* work out how much space we left for a bitmap */
1140 bm_space = choose_bm_space(array_size);
1141
1142 switch(st->minor_version) {
1143 case 0:
1144 sb_offset = dsize;
1145 sb_offset -= 8*2;
1146 sb_offset &= ~(4*2-1);
1147 sb->super_offset = __cpu_to_le64(sb_offset);
1148 sb->data_offset = __cpu_to_le64(0);
1149 if (sb_offset < array_size + bm_space)
1150 bm_space = sb_offset - array_size;
1151 sb->data_size = __cpu_to_le64(sb_offset - bm_space);
1152 break;
1153 case 1:
1154 sb->super_offset = __cpu_to_le64(0);
1155 reserved = bm_space + 4*2;
1156 /* Try for multiple of 1Meg so it is nicely aligned */
1157 #define ONE_MEG (2*1024)
1158 reserved = ((reserved + ONE_MEG-1)/ONE_MEG) * ONE_MEG;
1159 if (reserved + __le64_to_cpu(sb->size) > dsize)
1160 reserved = dsize - __le64_to_cpu(sb->size);
1161 /* force 4K alignment */
1162 reserved &= ~7ULL;
1163
1164 sb->data_offset = __cpu_to_le64(reserved);
1165 sb->data_size = __cpu_to_le64(dsize - reserved);
1166 break;
1167 case 2:
1168 sb_offset = 4*2;
1169 sb->super_offset = __cpu_to_le64(4*2);
1170 if (4*2 + 4*2 + bm_space + __le64_to_cpu(sb->size)
1171 > dsize)
1172 bm_space = dsize - __le64_to_cpu(sb->size)
1173 - 4*2 - 4*2;
1174
1175 reserved = bm_space + 4*2 + 4*2;
1176 /* Try for multiple of 1Meg so it is nicely aligned */
1177 #define ONE_MEG (2*1024)
1178 reserved = ((reserved + ONE_MEG-1)/ONE_MEG) * ONE_MEG;
1179 if (reserved + __le64_to_cpu(sb->size) > dsize)
1180 reserved = dsize - __le64_to_cpu(sb->size);
1181 /* force 4K alignment */
1182 reserved &= ~7ULL;
1183
1184 sb->data_offset = __cpu_to_le64(reserved);
1185 sb->data_size = __cpu_to_le64(dsize - reserved);
1186 break;
1187 default:
1188 fprintf(stderr, Name ": Failed to write invalid "
1189 "metadata format 1.%i to %s\n",
1190 st->minor_version, di->devname);
1191 rv = -EINVAL;
1192 goto out;
1193 }
1194
1195
1196 sb->sb_csum = calc_sb_1_csum(sb);
1197 rv = store_super1(st, di->fd);
1198 if (rv == 0 && (__le32_to_cpu(sb->feature_map) & 1))
1199 rv = st->ss->write_bitmap(st, di->fd);
1200 close(di->fd);
1201 di->fd = -1;
1202 }
1203 error_out:
1204 if (rv)
1205 fprintf(stderr, Name ": Failed to write metadata to %s\n",
1206 di->devname);
1207 out:
1208 return rv;
1209 }
1210 #endif
1211
1212 static int compare_super1(struct supertype *st, struct supertype *tst)
1213 {
1214 /*
1215 * return:
1216 * 0 same, or first was empty, and second was copied
1217 * 1 second had wrong number
1218 * 2 wrong uuid
1219 * 3 wrong other info
1220 */
1221 struct mdp_superblock_1 *first = st->sb;
1222 struct mdp_superblock_1 *second = tst->sb;
1223
1224 if (second->magic != __cpu_to_le32(MD_SB_MAGIC))
1225 return 1;
1226 if (second->major_version != __cpu_to_le32(1))
1227 return 1;
1228
1229 if (!first) {
1230 if (posix_memalign((void**)&first, 512, SUPER1_SIZE) != 0) {
1231 fprintf(stderr, Name
1232 ": %s could not allocate superblock\n", __func__);
1233 return 1;
1234 }
1235 memcpy(first, second, SUPER1_SIZE);
1236 st->sb = first;
1237 return 0;
1238 }
1239 if (memcmp(first->set_uuid, second->set_uuid, 16)!= 0)
1240 return 2;
1241
1242 if (first->ctime != second->ctime ||
1243 first->level != second->level ||
1244 first->layout != second->layout ||
1245 first->size != second->size ||
1246 first->chunksize != second->chunksize ||
1247 first->raid_disks != second->raid_disks)
1248 return 3;
1249 return 0;
1250 }
1251
1252 static int load_super1(struct supertype *st, int fd, char *devname)
1253 {
1254 unsigned long long dsize;
1255 unsigned long long sb_offset;
1256 struct mdp_superblock_1 *super;
1257 int uuid[4];
1258 struct bitmap_super_s *bsb;
1259 struct misc_dev_info *misc;
1260
1261 free_super1(st);
1262
1263 if (st->ss == NULL || st->minor_version == -1) {
1264 int bestvers = -1;
1265 struct supertype tst;
1266 __u64 bestctime = 0;
1267 /* guess... choose latest ctime */
1268 memset(&tst, 0, sizeof(tst));
1269 tst.ss = &super1;
1270 for (tst.minor_version = 0; tst.minor_version <= 2 ; tst.minor_version++) {
1271 switch(load_super1(&tst, fd, devname)) {
1272 case 0: super = tst.sb;
1273 if (bestvers == -1 ||
1274 bestctime < __le64_to_cpu(super->ctime)) {
1275 bestvers = tst.minor_version;
1276 bestctime = __le64_to_cpu(super->ctime);
1277 }
1278 free(super);
1279 tst.sb = NULL;
1280 break;
1281 case 1: return 1; /*bad device */
1282 case 2: break; /* bad, try next */
1283 }
1284 }
1285 if (bestvers != -1) {
1286 int rv;
1287 tst.minor_version = bestvers;
1288 tst.ss = &super1;
1289 tst.max_devs = MAX_DEVS;
1290 rv = load_super1(&tst, fd, devname);
1291 if (rv == 0)
1292 *st = tst;
1293 return rv;
1294 }
1295 return 2;
1296 }
1297 if (!get_dev_size(fd, devname, &dsize))
1298 return 1;
1299 dsize >>= 9;
1300
1301 if (dsize < 24) {
1302 if (devname)
1303 fprintf(stderr, Name ": %s is too small for md: size is %llu sectors.\n",
1304 devname, dsize);
1305 return 1;
1306 }
1307
1308 /*
1309 * Calculate the position of the superblock.
1310 * It is always aligned to a 4K boundary and
1311 * depending on minor_version, it can be:
1312 * 0: At least 8K, but less than 12K, from end of device
1313 * 1: At start of device
1314 * 2: 4K from start of device.
1315 */
1316 switch(st->minor_version) {
1317 case 0:
1318 sb_offset = dsize;
1319 sb_offset -= 8*2;
1320 sb_offset &= ~(4*2-1);
1321 break;
1322 case 1:
1323 sb_offset = 0;
1324 break;
1325 case 2:
1326 sb_offset = 4*2;
1327 break;
1328 default:
1329 return -EINVAL;
1330 }
1331
1332 ioctl(fd, BLKFLSBUF, 0); /* make sure we read current data */
1333
1334
1335 if (lseek64(fd, sb_offset << 9, 0)< 0LL) {
1336 if (devname)
1337 fprintf(stderr, Name ": Cannot seek to superblock on %s: %s\n",
1338 devname, strerror(errno));
1339 return 1;
1340 }
1341
1342 if (posix_memalign((void**)&super, 512, SUPER1_SIZE) != 0) {
1343 fprintf(stderr, Name ": %s could not allocate superblock\n",
1344 __func__);
1345 return 1;
1346 }
1347
1348 if (aread(fd, super, MAX_SB_SIZE) != MAX_SB_SIZE) {
1349 if (devname)
1350 fprintf(stderr, Name ": Cannot read superblock on %s\n",
1351 devname);
1352 free(super);
1353 return 1;
1354 }
1355
1356 if (__le32_to_cpu(super->magic) != MD_SB_MAGIC) {
1357 if (devname)
1358 fprintf(stderr, Name ": No super block found on %s (Expected magic %08x, got %08x)\n",
1359 devname, MD_SB_MAGIC, __le32_to_cpu(super->magic));
1360 free(super);
1361 return 2;
1362 }
1363
1364 if (__le32_to_cpu(super->major_version) != 1) {
1365 if (devname)
1366 fprintf(stderr, Name ": Cannot interpret superblock on %s - version is %d\n",
1367 devname, __le32_to_cpu(super->major_version));
1368 free(super);
1369 return 2;
1370 }
1371 if (__le64_to_cpu(super->super_offset) != sb_offset) {
1372 if (devname)
1373 fprintf(stderr, Name ": No superblock found on %s (super_offset is wrong)\n",
1374 devname);
1375 free(super);
1376 return 2;
1377 }
1378 st->sb = super;
1379
1380 bsb = (struct bitmap_super_s *)(((char*)super)+MAX_SB_SIZE);
1381
1382 misc = (struct misc_dev_info*) (((char*)super)+MAX_SB_SIZE+BM_SUPER_SIZE);
1383 misc->device_size = dsize;
1384
1385 /* Now check on the bitmap superblock */
1386 if ((__le32_to_cpu(super->feature_map)&MD_FEATURE_BITMAP_OFFSET) == 0)
1387 return 0;
1388 /* Read the bitmap superblock and make sure it looks
1389 * valid. If it doesn't clear the bit. An --assemble --force
1390 * should get that written out.
1391 */
1392 locate_bitmap1(st, fd);
1393 if (aread(fd, bsb, 512) != 512)
1394 goto no_bitmap;
1395
1396 uuid_from_super1(st, uuid);
1397 if (__le32_to_cpu(bsb->magic) != BITMAP_MAGIC ||
1398 memcmp(bsb->uuid, uuid, 16) != 0)
1399 goto no_bitmap;
1400 return 0;
1401
1402 no_bitmap:
1403 super->feature_map = __cpu_to_le32(__le32_to_cpu(super->feature_map)
1404 & ~MD_FEATURE_BITMAP_OFFSET);
1405 return 0;
1406 }
1407
1408
1409 static struct supertype *match_metadata_desc1(char *arg)
1410 {
1411 struct supertype *st = calloc(1, sizeof(*st));
1412 if (!st)
1413 return st;
1414
1415 st->container_dev = NoMdDev;
1416 st->ss = &super1;
1417 st->max_devs = MAX_DEVS;
1418 st->sb = NULL;
1419 /* leading zeros can be safely ignored. --detail generates them. */
1420 while (*arg == '0')
1421 arg++;
1422 if (strcmp(arg, "1.0") == 0 ||
1423 strcmp(arg, "1.00") == 0) {
1424 st->minor_version = 0;
1425 return st;
1426 }
1427 if (strcmp(arg, "1.1") == 0 ||
1428 strcmp(arg, "1.01") == 0
1429 ) {
1430 st->minor_version = 1;
1431 return st;
1432 }
1433 if (strcmp(arg, "1.2") == 0 ||
1434 #ifndef DEFAULT_OLD_METADATA /* ifdef in super0.c */
1435 strcmp(arg, "default") == 0 ||
1436 #endif /* DEFAULT_OLD_METADATA */
1437 strcmp(arg, "1.02") == 0) {
1438 st->minor_version = 2;
1439 return st;
1440 }
1441 if (strcmp(arg, "1") == 0 ||
1442 strcmp(arg, "default") == 0) {
1443 st->minor_version = -1;
1444 return st;
1445 }
1446
1447 free(st);
1448 return NULL;
1449 }
1450
1451 /* find available size on device with this devsize, using
1452 * superblock type st, and reserving 'reserve' sectors for
1453 * a possible bitmap
1454 */
1455 static __u64 avail_size1(struct supertype *st, __u64 devsize)
1456 {
1457 struct mdp_superblock_1 *super = st->sb;
1458 if (devsize < 24)
1459 return 0;
1460
1461 if (super == NULL)
1462 /* creating: allow suitable space for bitmap */
1463 devsize -= choose_bm_space(devsize);
1464 #ifndef MDASSEMBLE
1465 else if (__le32_to_cpu(super->feature_map)&MD_FEATURE_BITMAP_OFFSET) {
1466 /* hot-add. allow for actual size of bitmap */
1467 struct bitmap_super_s *bsb;
1468 bsb = (struct bitmap_super_s *)(((char*)super)+MAX_SB_SIZE);
1469 devsize -= bitmap_sectors(bsb);
1470 }
1471 #endif
1472
1473 if (st->minor_version < 0)
1474 /* not specified, so time to set default */
1475 st->minor_version = 2;
1476 if (super == NULL && st->minor_version > 0) {
1477 /* haven't committed to a size yet, so allow some
1478 * slack for alignment of data_offset.
1479 * We haven't access to device details so allow
1480 * 1 Meg if bigger than 1Gig
1481 */
1482 if (devsize > 1024*1024*2)
1483 devsize -= 1024*2;
1484 }
1485 switch(st->minor_version) {
1486 case 0:
1487 /* at end */
1488 return ((devsize - 8*2 ) & ~(4*2-1));
1489 case 1:
1490 /* at start, 4K for superblock and possible bitmap */
1491 return devsize - 4*2;
1492 case 2:
1493 /* 4k from start, 4K for superblock and possible bitmap */
1494 return devsize - (4+4)*2;
1495 }
1496 return 0;
1497 }
1498
1499 static int
1500 add_internal_bitmap1(struct supertype *st,
1501 int *chunkp, int delay, int write_behind,
1502 unsigned long long size,
1503 int may_change, int major)
1504 {
1505 /*
1506 * If not may_change, then this is a 'Grow' without sysfs support for
1507 * bitmaps, and the bitmap must fit after the superblock at 1K offset.
1508 * If may_change, then this is create or a Grow with sysfs syupport,
1509 * and we can put the bitmap wherever we like.
1510 *
1511 * size is in sectors, chunk is in bytes !!!
1512 */
1513
1514 unsigned long long bits;
1515 unsigned long long max_bits;
1516 unsigned long long min_chunk;
1517 long offset;
1518 unsigned long long chunk = *chunkp;
1519 int room = 0;
1520 int creating = 0;
1521 struct mdp_superblock_1 *sb = st->sb;
1522 bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
1523 int uuid[4];
1524
1525 if (__le64_to_cpu(sb->data_size) == 0)
1526 /* Must be creating the array, else data_size would be non-zero */
1527 creating = 1;
1528 switch(st->minor_version) {
1529 case 0:
1530 /* either 3K after the superblock (when hot-add),
1531 * or some amount of space before.
1532 */
1533 if (creating) {
1534 /* We are creating array, so we *know* how much room has
1535 * been left.
1536 */
1537 offset = 0;
1538 room = choose_bm_space(__le64_to_cpu(sb->size));
1539 } else {
1540 room = __le64_to_cpu(sb->super_offset)
1541 - __le64_to_cpu(sb->data_offset)
1542 - __le64_to_cpu(sb->data_size);
1543
1544 if (!may_change || (room < 3*2 &&
1545 __le32_to_cpu(sb->max_dev) <= 384)) {
1546 room = 3*2;
1547 offset = 1*2;
1548 } else {
1549 offset = 0; /* means movable offset */
1550 }
1551 }
1552 break;
1553 case 1:
1554 case 2: /* between superblock and data */
1555 if (creating) {
1556 offset = 4*2;
1557 room = choose_bm_space(__le64_to_cpu(sb->size));
1558 } else {
1559 room = __le64_to_cpu(sb->data_offset)
1560 - __le64_to_cpu(sb->super_offset);
1561 if (!may_change) {
1562 room -= 2; /* Leave 1K for superblock */
1563 offset = 2;
1564 } else {
1565 room -= 4*2; /* leave 4K for superblock */
1566 offset = 4*2;
1567 }
1568 }
1569 break;
1570 default:
1571 return 0;
1572 }
1573
1574 if (chunk == UnSet && room > 128*2)
1575 /* Limit to 128K of bitmap when chunk size not requested */
1576 room = 128*2;
1577
1578 max_bits = (room * 512 - sizeof(bitmap_super_t)) * 8;
1579
1580 min_chunk = 4096; /* sub-page chunks don't work yet.. */
1581 bits = (size*512)/min_chunk +1;
1582 while (bits > max_bits) {
1583 min_chunk *= 2;
1584 bits = (bits+1)/2;
1585 }
1586 if (chunk == UnSet) {
1587 /* For practical purpose, 64Meg is a good
1588 * default chunk size for internal bitmaps.
1589 */
1590 chunk = min_chunk;
1591 if (chunk < 64*1024*1024)
1592 chunk = 64*1024*1024;
1593 } else if (chunk < min_chunk)
1594 return 0; /* chunk size too small */
1595 if (chunk == 0) /* rounding problem */
1596 return 0;
1597
1598 if (offset == 0) {
1599 /* start bitmap on a 4K boundary with enough space for
1600 * the bitmap
1601 */
1602 bits = (size*512) / chunk + 1;
1603 room = ((bits+7)/8 + sizeof(bitmap_super_t) +4095)/4096;
1604 room *= 8; /* convert 4K blocks to sectors */
1605 offset = -room;
1606 }
1607
1608 sb->bitmap_offset = __cpu_to_le32(offset);
1609
1610 sb->feature_map = __cpu_to_le32(__le32_to_cpu(sb->feature_map)
1611 | MD_FEATURE_BITMAP_OFFSET);
1612 memset(bms, 0, sizeof(*bms));
1613 bms->magic = __cpu_to_le32(BITMAP_MAGIC);
1614 bms->version = __cpu_to_le32(major);
1615 uuid_from_super1(st, uuid);
1616 memcpy(bms->uuid, uuid, 16);
1617 bms->chunksize = __cpu_to_le32(chunk);
1618 bms->daemon_sleep = __cpu_to_le32(delay);
1619 bms->sync_size = __cpu_to_le64(size);
1620 bms->write_behind = __cpu_to_le32(write_behind);
1621
1622 *chunkp = chunk;
1623 return 1;
1624 }
1625
1626 static void locate_bitmap1(struct supertype *st, int fd)
1627 {
1628 unsigned long long offset;
1629 struct mdp_superblock_1 *sb;
1630 int mustfree = 0;
1631
1632 if (!st->sb) {
1633 if (st->ss->load_super(st, fd, NULL))
1634 return; /* no error I hope... */
1635 mustfree = 1;
1636 }
1637 sb = st->sb;
1638
1639 offset = __le64_to_cpu(sb->super_offset);
1640 offset += (int32_t) __le32_to_cpu(sb->bitmap_offset);
1641 if (mustfree)
1642 free(sb);
1643 lseek64(fd, offset<<9, 0);
1644 }
1645
1646 static int write_bitmap1(struct supertype *st, int fd)
1647 {
1648 struct mdp_superblock_1 *sb = st->sb;
1649 bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb)+MAX_SB_SIZE);
1650 int rv = 0;
1651 void *buf;
1652 int towrite, n;
1653
1654 locate_bitmap1(st, fd);
1655
1656 if (posix_memalign(&buf, 4096, 4096))
1657 return -ENOMEM;
1658
1659 memset(buf, 0xff, 4096);
1660 memcpy(buf, (char *)bms, sizeof(bitmap_super_t));
1661
1662 towrite = __le64_to_cpu(bms->sync_size) / (__le32_to_cpu(bms->chunksize)>>9);
1663 towrite = (towrite+7) >> 3; /* bits to bytes */
1664 towrite += sizeof(bitmap_super_t);
1665 towrite = ROUND_UP(towrite, 512);
1666 while (towrite > 0) {
1667 n = towrite;
1668 if (n > 4096)
1669 n = 4096;
1670 n = awrite(fd, buf, n);
1671 if (n > 0)
1672 towrite -= n;
1673 else
1674 break;
1675 memset(buf, 0xff, 4096);
1676 }
1677 fsync(fd);
1678 if (towrite)
1679 rv = -2;
1680
1681 free(buf);
1682 return rv;
1683 }
1684
1685 static void free_super1(struct supertype *st)
1686 {
1687 if (st->sb)
1688 free(st->sb);
1689 while (st->info) {
1690 struct devinfo *di = st->info;
1691 st->info = di->next;
1692 if (di->fd >= 0)
1693 close(di->fd);
1694 free(di);
1695 }
1696 st->sb = NULL;
1697 }
1698
1699 #ifndef MDASSEMBLE
1700 static int validate_geometry1(struct supertype *st, int level,
1701 int layout, int raiddisks,
1702 int *chunk, unsigned long long size,
1703 char *subdev, unsigned long long *freesize,
1704 int verbose)
1705 {
1706 unsigned long long ldsize;
1707 int fd;
1708
1709 if (level == LEVEL_CONTAINER) {
1710 if (verbose)
1711 fprintf(stderr, Name ": 1.x metadata does not support containers\n");
1712 return 0;
1713 }
1714 if (chunk && *chunk == UnSet)
1715 *chunk = DEFAULT_CHUNK;
1716
1717 if (!subdev)
1718 return 1;
1719
1720 fd = open(subdev, O_RDONLY|O_EXCL, 0);
1721 if (fd < 0) {
1722 if (verbose)
1723 fprintf(stderr, Name ": super1.x cannot open %s: %s\n",
1724 subdev, strerror(errno));
1725 return 0;
1726 }
1727
1728 if (!get_dev_size(fd, subdev, &ldsize)) {
1729 close(fd);
1730 return 0;
1731 }
1732 close(fd);
1733
1734 *freesize = avail_size1(st, ldsize >> 9);
1735 return 1;
1736 }
1737 #endif /* MDASSEMBLE */
1738
1739 struct superswitch super1 = {
1740 #ifndef MDASSEMBLE
1741 .examine_super = examine_super1,
1742 .brief_examine_super = brief_examine_super1,
1743 .export_examine_super = export_examine_super1,
1744 .detail_super = detail_super1,
1745 .brief_detail_super = brief_detail_super1,
1746 .export_detail_super = export_detail_super1,
1747 .write_init_super = write_init_super1,
1748 .validate_geometry = validate_geometry1,
1749 .add_to_super = add_to_super1,
1750 #endif
1751 .match_home = match_home1,
1752 .uuid_from_super = uuid_from_super1,
1753 .getinfo_super = getinfo_super1,
1754 .container_content = container_content1,
1755 .update_super = update_super1,
1756 .init_super = init_super1,
1757 .store_super = store_super1,
1758 .compare_super = compare_super1,
1759 .load_super = load_super1,
1760 .match_metadata_desc = match_metadata_desc1,
1761 .avail_size = avail_size1,
1762 .add_internal_bitmap = add_internal_bitmap1,
1763 .locate_bitmap = locate_bitmap1,
1764 .write_bitmap = write_bitmap1,
1765 .free_super = free_super1,
1766 #if __BYTE_ORDER == BIG_ENDIAN
1767 .swapuuid = 0,
1768 #else
1769 .swapuuid = 1,
1770 #endif
1771 .name = "1.x",
1772 };