]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super1.c
5439b7bb1240a6dde9122bff1b0033eb75fe374e
[thirdparty/mdadm.git] / super1.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2001-2016 Neil Brown <neilb@suse.com>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neilb@suse.de>
23 */
24
25 #include <stddef.h>
26 #include "mdadm.h"
27 /*
28 * The version-1 superblock :
29 * All numeric fields are little-endian.
30 *
31 * total size: 256 bytes plus 2 per device.
32 * 1K allows 384 devices.
33 */
34 struct mdp_superblock_1 {
35 /* constant array information - 128 bytes */
36 __u32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */
37 __u32 major_version; /* 1 */
38 __u32 feature_map; /* 0 for now */
39 __u32 pad0; /* always set to 0 when writing */
40
41 __u8 set_uuid[16]; /* user-space generated. */
42 char set_name[32]; /* set and interpreted by user-space */
43
44 __u64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
45 __u32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
46 __u32 layout; /* used for raid5, raid6, raid10, and raid0 */
47 __u64 size; /* used size of component devices, in 512byte sectors */
48
49 __u32 chunksize; /* in 512byte sectors */
50 __u32 raid_disks;
51 union {
52 __u32 bitmap_offset; /* sectors after start of superblock that bitmap starts
53 * NOTE: signed, so bitmap can be before superblock
54 * only meaningful of feature_map[0] is set.
55 */
56
57 /* only meaningful when feature_map[MD_FEATURE_PPL] is set */
58 struct {
59 __s16 offset; /* sectors from start of superblock that ppl starts */
60 __u16 size; /* ppl size in sectors */
61 } ppl;
62 };
63
64 /* These are only valid with feature bit '4' */
65 __u32 new_level; /* new level we are reshaping to */
66 __u64 reshape_position; /* next address in array-space for reshape */
67 __u32 delta_disks; /* change in number of raid_disks */
68 __u32 new_layout; /* new layout */
69 __u32 new_chunk; /* new chunk size (sectors) */
70 __u32 new_offset; /* signed number to add to data_offset in new
71 * layout. 0 == no-change. This can be
72 * different on each device in the array.
73 */
74
75 /* constant this-device information - 64 bytes */
76 __u64 data_offset; /* sector start of data, often 0 */
77 __u64 data_size; /* sectors in this device that can be used for data */
78 __u64 super_offset; /* sector start of this superblock */
79 union {
80 __u64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
81 __u64 journal_tail;/* journal tail of journal device (from data_offset) */
82 };
83 __u32 dev_number; /* permanent identifier of this device - not role in raid */
84 __u32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
85 __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
86 __u8 devflags; /* per-device flags. Only one defined...*/
87 #define WriteMostly1 1 /* mask for writemostly flag in above */
88 #define FailFast1 2 /* Device should get FailFast requests */
89 /* bad block log. If there are any bad blocks the feature flag is set.
90 * if offset and size are non-zero, that space is reserved and available.
91 */
92 __u8 bblog_shift; /* shift from sectors to block size for badblock list */
93 __u16 bblog_size; /* number of sectors reserved for badblock list */
94 __u32 bblog_offset; /* sector offset from superblock to bblog, signed */
95
96 /* array state information - 64 bytes */
97 __u64 utime; /* 40 bits second, 24 bits microseconds */
98 __u64 events; /* incremented when superblock updated */
99 __u64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
100 __u32 sb_csum; /* checksum upto dev_roles[max_dev] */
101 __u32 max_dev; /* size of dev_roles[] array to consider */
102 __u8 pad3[64-32]; /* set to 0 when writing */
103
104 /* device state information. Indexed by dev_number.
105 * 2 bytes per device
106 * Note there are no per-device state flags. State information is rolled
107 * into the 'roles' value. If a device is spare or faulty, then it doesn't
108 * have a meaningful role.
109 */
110 __u16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
111 };
112
113 #define MAX_SB_SIZE 4096
114 /* bitmap super size is 256, but we round up to a sector for alignment */
115 #define BM_SUPER_SIZE 512
116 #define MAX_DEVS ((int)(MAX_SB_SIZE - sizeof(struct mdp_superblock_1)) / 2)
117 #define SUPER1_SIZE (MAX_SB_SIZE + BM_SUPER_SIZE \
118 + sizeof(struct misc_dev_info))
119
120 struct misc_dev_info {
121 __u64 device_size;
122 };
123
124 #define MULTIPLE_PPL_AREA_SIZE_SUPER1 (1024 * 1024) /* Size of the whole
125 * mutliple PPL area
126 */
127 /* feature_map bits */
128 #define MD_FEATURE_BITMAP_OFFSET 1
129 #define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
130 * must be honoured
131 */
132 #define MD_FEATURE_RESHAPE_ACTIVE 4
133 #define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
134 #define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
135 * active device with same 'role'.
136 * 'recovery_offset' is also set.
137 */
138 #define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number
139 * of devices, but is going
140 * backwards anyway.
141 */
142 #define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */
143 #define MD_FEATURE_BITMAP_VERSIONED 256 /* bitmap version number checked properly */
144 #define MD_FEATURE_JOURNAL 512 /* support write journal */
145 #define MD_FEATURE_PPL 1024 /* support PPL */
146 #define MD_FEATURE_MUTLIPLE_PPLS 2048 /* support for multiple PPLs */
147 #define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful in RAID0 */
148 #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
149 |MD_FEATURE_RECOVERY_OFFSET \
150 |MD_FEATURE_RESHAPE_ACTIVE \
151 |MD_FEATURE_BAD_BLOCKS \
152 |MD_FEATURE_REPLACEMENT \
153 |MD_FEATURE_RESHAPE_BACKWARDS \
154 |MD_FEATURE_NEW_OFFSET \
155 |MD_FEATURE_BITMAP_VERSIONED \
156 |MD_FEATURE_JOURNAL \
157 |MD_FEATURE_PPL \
158 |MD_FEATURE_MULTIPLE_PPLS \
159 |MD_FEATURE_RAID0_LAYOUT \
160 )
161
162 static int role_from_sb(struct mdp_superblock_1 *sb)
163 {
164 unsigned int d;
165 int role;
166
167 d = __le32_to_cpu(sb->dev_number);
168 if (d < __le32_to_cpu(sb->max_dev))
169 role = __le16_to_cpu(sb->dev_roles[d]);
170 else
171 role = MD_DISK_ROLE_SPARE;
172 return role;
173 }
174
175 /* return how many bytes are needed for bitmap, for cluster-md each node
176 * should have it's own bitmap */
177 static unsigned int calc_bitmap_size(bitmap_super_t *bms, unsigned int boundary)
178 {
179 unsigned long long bits, bytes;
180
181 bits = bitmap_bits(__le64_to_cpu(bms->sync_size),
182 __le32_to_cpu(bms->chunksize));
183 bytes = (bits+7) >> 3;
184 bytes += sizeof(bitmap_super_t);
185 bytes = ROUND_UP(bytes, boundary);
186
187 return bytes;
188 }
189
190 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
191 {
192 unsigned int disk_csum, csum;
193 unsigned long long newcsum;
194 int size = sizeof(*sb) + __le32_to_cpu(sb->max_dev)*2;
195 unsigned int *isuper = (unsigned int *)sb;
196
197 /* make sure I can count... */
198 if (offsetof(struct mdp_superblock_1,data_offset) != 128 ||
199 offsetof(struct mdp_superblock_1, utime) != 192 ||
200 sizeof(struct mdp_superblock_1) != 256) {
201 fprintf(stderr, "WARNING - superblock isn't sized correctly\n");
202 }
203
204 disk_csum = sb->sb_csum;
205 sb->sb_csum = 0;
206 newcsum = 0;
207 for (; size >= 4; size -= 4) {
208 newcsum += __le32_to_cpu(*isuper);
209 isuper++;
210 }
211
212 if (size == 2)
213 newcsum += __le16_to_cpu(*(unsigned short*) isuper);
214
215 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
216 sb->sb_csum = disk_csum;
217 return __cpu_to_le32(csum);
218 }
219
220 /*
221 * Information related to file descriptor used for aligned reads/writes.
222 * Cache the block size.
223 */
224 struct align_fd {
225 int fd;
226 int blk_sz;
227 };
228
229 static void init_afd(struct align_fd *afd, int fd)
230 {
231 afd->fd = fd;
232 if (!get_dev_sector_size(afd->fd, NULL, (unsigned int *)&afd->blk_sz))
233 afd->blk_sz = 512;
234 }
235
236 static char abuf[4096+4096];
237
238 static int aread(struct align_fd *afd, void *buf, int len)
239 {
240 /* aligned read.
241 * On devices with a 4K sector size, we need to read
242 * the full sector and copy relevant bits into
243 * the buffer
244 */
245 int bsize, iosize;
246 char *b;
247 int n;
248
249 bsize = afd->blk_sz;
250
251 if (!bsize || bsize > 4096 || len > 4096) {
252 if (!bsize)
253 fprintf(stderr, "WARNING - aread() called with invalid block size\n");
254 return -1;
255 }
256 b = ROUND_UP_PTR((char *)abuf, 4096);
257
258 for (iosize = 0; iosize < len; iosize += bsize)
259 ;
260 n = read(afd->fd, b, iosize);
261 if (n <= 0)
262 return n;
263 lseek(afd->fd, len - n, 1);
264 if (n > len)
265 n = len;
266 memcpy(buf, b, n);
267 return n;
268 }
269
270 static int awrite(struct align_fd *afd, void *buf, int len)
271 {
272 /* aligned write.
273 * On devices with a 4K sector size, we need to write
274 * the full sector. We pre-read if the sector is larger
275 * than the write.
276 * The address must be sector-aligned.
277 */
278 int bsize, iosize;
279 char *b;
280 int n;
281
282 bsize = afd->blk_sz;
283 if (!bsize || bsize > 4096 || len > 4096) {
284 if (!bsize)
285 fprintf(stderr, "WARNING - awrite() called with invalid block size\n");
286 return -1;
287 }
288 b = ROUND_UP_PTR((char *)abuf, 4096);
289
290 for (iosize = 0; iosize < len ; iosize += bsize)
291 ;
292
293 if (len != iosize) {
294 n = read(afd->fd, b, iosize);
295 if (n <= 0)
296 return n;
297 lseek(afd->fd, -n, 1);
298 }
299
300 memcpy(b, buf, len);
301 n = write(afd->fd, b, iosize);
302 if (n <= 0)
303 return n;
304 lseek(afd->fd, len - n, 1);
305 return len;
306 }
307
308 static inline unsigned int md_feature_any_ppl_on(__u32 feature_map)
309 {
310 return ((__cpu_to_le32(feature_map) &
311 (MD_FEATURE_PPL | MD_FEATURE_MUTLIPLE_PPLS)));
312 }
313
314 static inline unsigned int choose_ppl_space(int chunk)
315 {
316 return (PPL_HEADER_SIZE >> 9) + (chunk > 128*2 ? chunk : 128*2);
317 }
318
319 static void examine_super1(struct supertype *st, char *homehost)
320 {
321 struct mdp_superblock_1 *sb = st->sb;
322 bitmap_super_t *bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
323 time_t atime;
324 unsigned int d;
325 int role;
326 int delta_extra = 0;
327 int i;
328 char *c;
329 int l = homehost ? strlen(homehost) : 0;
330 int layout;
331 unsigned long long sb_offset;
332 struct mdinfo info;
333 int inconsistent = 0;
334
335 printf(" Magic : %08x\n", __le32_to_cpu(sb->magic));
336 printf(" Version : 1");
337 sb_offset = __le64_to_cpu(sb->super_offset);
338 if (sb_offset <= 4)
339 printf(".1\n");
340 else if (sb_offset <= 8)
341 printf(".2\n");
342 else
343 printf(".0\n");
344 printf(" Feature Map : 0x%x\n", __le32_to_cpu(sb->feature_map));
345 printf(" Array UUID : ");
346 for (i = 0; i < 16; i++) {
347 if ((i & 3) == 0 && i != 0)
348 printf(":");
349 printf("%02x", sb->set_uuid[i]);
350 }
351 printf("\n");
352 printf(" Name : %.32s", sb->set_name);
353 if (l > 0 && l < 32 &&
354 sb->set_name[l] == ':' &&
355 strncmp(sb->set_name, homehost, l) == 0)
356 printf(" (local to host %s)", homehost);
357 printf("\n");
358 if (bms->nodes > 0 &&
359 (__le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET))
360 printf(" Cluster Name : %-64s\n", bms->cluster_name);
361 atime = __le64_to_cpu(sb->ctime) & 0xFFFFFFFFFFULL;
362 printf(" Creation Time : %.24s\n", ctime(&atime));
363 c=map_num(pers, __le32_to_cpu(sb->level));
364 printf(" Raid Level : %s\n", c?c:"-unknown-");
365 printf(" Raid Devices : %d\n", __le32_to_cpu(sb->raid_disks));
366 printf("\n");
367 printf(" Avail Dev Size : %llu sectors%s\n",
368 (unsigned long long)__le64_to_cpu(sb->data_size),
369 human_size(__le64_to_cpu(sb->data_size)<<9));
370 if (__le32_to_cpu(sb->level) > 0) {
371 int ddsks = 0, ddsks_denom = 1;
372 switch(__le32_to_cpu(sb->level)) {
373 case 1: ddsks=1;break;
374 case 4:
375 case 5: ddsks = __le32_to_cpu(sb->raid_disks)-1; break;
376 case 6: ddsks = __le32_to_cpu(sb->raid_disks)-2; break;
377 case 10:
378 layout = __le32_to_cpu(sb->layout);
379 ddsks = __le32_to_cpu(sb->raid_disks);
380 ddsks_denom = (layout&255) * ((layout>>8)&255);
381 }
382 if (ddsks) {
383 long long asize = __le64_to_cpu(sb->size);
384 asize = (asize << 9) * ddsks / ddsks_denom;
385 printf(" Array Size : %llu KiB%s\n",
386 asize >> 10, human_size(asize));
387 }
388 if (sb->size != sb->data_size)
389 printf(" Used Dev Size : %llu sectors%s\n",
390 (unsigned long long)__le64_to_cpu(sb->size),
391 human_size(__le64_to_cpu(sb->size)<<9));
392 }
393 if (sb->data_offset)
394 printf(" Data Offset : %llu sectors\n",
395 (unsigned long long)__le64_to_cpu(sb->data_offset));
396 if (sb->new_offset &&
397 (__le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) {
398 unsigned long long offset = __le64_to_cpu(sb->data_offset);
399 offset += (signed)(int32_t)__le32_to_cpu(sb->new_offset);
400 printf(" New Offset : %llu sectors\n", offset);
401 }
402 printf(" Super Offset : %llu sectors\n",
403 (unsigned long long)__le64_to_cpu(sb->super_offset));
404 if (__le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)
405 printf("Recovery Offset : %llu sectors\n",
406 (unsigned long long)__le64_to_cpu(sb->recovery_offset));
407
408 st->ss->getinfo_super(st, &info, NULL);
409 if (info.space_after != 1 &&
410 !(__le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) {
411 printf(" Unused Space : before=%llu sectors, ",
412 info.space_before);
413 if (info.space_after < INT64_MAX)
414 printf("after=%llu sectors\n", info.space_after);
415 else
416 printf("after=-%llu sectors DEVICE TOO SMALL\n",
417 UINT64_MAX - info.space_after);
418 }
419 printf(" State : %s%s\n",
420 (__le64_to_cpu(sb->resync_offset) + 1) ? "active":"clean",
421 (info.space_after > INT64_MAX) ? " TRUNCATED DEVICE" : "");
422 printf(" Device UUID : ");
423 for (i = 0; i < 16; i++) {
424 if ((i & 3)==0 && i != 0)
425 printf(":");
426 printf("%02x", sb->device_uuid[i]);
427 }
428 printf("\n");
429 printf("\n");
430 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
431 printf("Internal Bitmap : %ld sectors from superblock\n",
432 (long)(int32_t)__le32_to_cpu(sb->bitmap_offset));
433 } else if (md_feature_any_ppl_on(sb->feature_map)) {
434 printf(" PPL : %u sectors at offset %d sectors from superblock\n",
435 __le16_to_cpu(sb->ppl.size),
436 __le16_to_cpu(sb->ppl.offset));
437 }
438 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE)) {
439 printf(" Reshape pos'n : %llu%s\n", (unsigned long long)
440 __le64_to_cpu(sb->reshape_position)/2,
441 human_size(__le64_to_cpu(sb->reshape_position)<<9));
442 if (__le32_to_cpu(sb->delta_disks)) {
443 printf(" Delta Devices : %d",
444 __le32_to_cpu(sb->delta_disks));
445 printf(" (%d->%d)\n",
446 __le32_to_cpu(sb->raid_disks) -
447 __le32_to_cpu(sb->delta_disks),
448 __le32_to_cpu(sb->raid_disks));
449 if ((int)__le32_to_cpu(sb->delta_disks) < 0)
450 delta_extra = -__le32_to_cpu(sb->delta_disks);
451 }
452 if (__le32_to_cpu(sb->new_level) != __le32_to_cpu(sb->level)) {
453 c = map_num(pers, __le32_to_cpu(sb->new_level));
454 printf(" New Level : %s\n", c?c:"-unknown-");
455 }
456 if (__le32_to_cpu(sb->new_layout) !=
457 __le32_to_cpu(sb->layout)) {
458 if (__le32_to_cpu(sb->level) == 5) {
459 c = map_num(r5layout,
460 __le32_to_cpu(sb->new_layout));
461 printf(" New Layout : %s\n", c?c:"-unknown-");
462 }
463 if (__le32_to_cpu(sb->level) == 6) {
464 c = map_num(r6layout,
465 __le32_to_cpu(sb->new_layout));
466 printf(" New Layout : %s\n", c?c:"-unknown-");
467 }
468 if (__le32_to_cpu(sb->level) == 10) {
469 printf(" New Layout :");
470 print_r10_layout(__le32_to_cpu(sb->new_layout));
471 printf("\n");
472 }
473 }
474 if (__le32_to_cpu(sb->new_chunk) !=
475 __le32_to_cpu(sb->chunksize))
476 printf(" New Chunksize : %dK\n",
477 __le32_to_cpu(sb->new_chunk)/2);
478 printf("\n");
479 }
480 if (sb->devflags) {
481 printf(" Flags :");
482 if (sb->devflags & WriteMostly1)
483 printf(" write-mostly");
484 if (sb->devflags & FailFast1)
485 printf(" failfast");
486 printf("\n");
487 }
488
489 atime = __le64_to_cpu(sb->utime) & 0xFFFFFFFFFFULL;
490 printf(" Update Time : %.24s\n", ctime(&atime));
491
492 if (sb->bblog_size && sb->bblog_offset) {
493 printf(" Bad Block Log : %d entries available at offset %ld sectors",
494 __le16_to_cpu(sb->bblog_size)*512/8,
495 (long)(int32_t)__le32_to_cpu(sb->bblog_offset));
496 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BAD_BLOCKS))
497 printf(" - bad blocks present.");
498 printf("\n");
499 }
500
501 if (calc_sb_1_csum(sb) == sb->sb_csum)
502 printf(" Checksum : %x - correct\n",
503 __le32_to_cpu(sb->sb_csum));
504 else
505 printf(" Checksum : %x - expected %x\n",
506 __le32_to_cpu(sb->sb_csum),
507 __le32_to_cpu(calc_sb_1_csum(sb)));
508 printf(" Events : %llu\n",
509 (unsigned long long)__le64_to_cpu(sb->events));
510 printf("\n");
511 if (__le32_to_cpu(sb->level) == 0 &&
512 (sb->feature_map & __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT))) {
513 c = map_num(r0layout, __le32_to_cpu(sb->layout));
514 printf(" Layout : %s\n", c?c:"-unknown-");
515 }
516 if (__le32_to_cpu(sb->level) == 5) {
517 c = map_num(r5layout, __le32_to_cpu(sb->layout));
518 printf(" Layout : %s\n", c?c:"-unknown-");
519 }
520 if (__le32_to_cpu(sb->level) == 6) {
521 c = map_num(r6layout, __le32_to_cpu(sb->layout));
522 printf(" Layout : %s\n", c?c:"-unknown-");
523 }
524 if (__le32_to_cpu(sb->level) == 10) {
525 int lo = __le32_to_cpu(sb->layout);
526 printf(" Layout :");
527 print_r10_layout(lo);
528 printf("\n");
529 }
530 switch(__le32_to_cpu(sb->level)) {
531 case 0:
532 case 4:
533 case 5:
534 case 6:
535 case 10:
536 printf(" Chunk Size : %dK\n",
537 __le32_to_cpu(sb->chunksize)/2);
538 break;
539 case -1:
540 printf(" Rounding : %dK\n",
541 __le32_to_cpu(sb->chunksize)/2);
542 break;
543 default:
544 break;
545 }
546 printf("\n");
547 printf(" Device Role : ");
548 role = role_from_sb(sb);
549 if (role >= MD_DISK_ROLE_FAULTY)
550 printf("spare\n");
551 else if (role == MD_DISK_ROLE_JOURNAL)
552 printf("Journal\n");
553 else if (sb->feature_map & __cpu_to_le32(MD_FEATURE_REPLACEMENT))
554 printf("Replacement device %d\n", role);
555 else
556 printf("Active device %d\n", role);
557
558 printf(" Array State : ");
559 for (d = 0; d < __le32_to_cpu(sb->raid_disks) + delta_extra; d++) {
560 int cnt = 0;
561 unsigned int i;
562 for (i = 0; i < __le32_to_cpu(sb->max_dev); i++) {
563 unsigned int role = __le16_to_cpu(sb->dev_roles[i]);
564 if (role == d)
565 cnt++;
566 }
567 if (cnt == 2 && __le32_to_cpu(sb->level) > 0)
568 printf("R");
569 else if (cnt == 1)
570 printf("A");
571 else if (cnt == 0)
572 printf(".");
573 else {
574 printf("?");
575 inconsistent = 1;
576 }
577 }
578 #if 0
579 /* This is confusing too */
580 faulty = 0;
581 for (i = 0; i < __le32_to_cpu(sb->max_dev); i++) {
582 int role = __le16_to_cpu(sb->dev_roles[i]);
583 if (role == MD_DISK_ROLE_FAULTY)
584 faulty++;
585 }
586 if (faulty)
587 printf(" %d failed", faulty);
588 #endif
589 printf(" ('A' == active, '.' == missing, 'R' == replacing)");
590 printf("\n");
591 for (d = 0; d < __le32_to_cpu(sb->max_dev); d++) {
592 unsigned int r = __le16_to_cpu(sb->dev_roles[d]);
593 if (r <= MD_DISK_ROLE_MAX &&
594 r > __le32_to_cpu(sb->raid_disks) + delta_extra)
595 inconsistent = 1;
596 }
597 if (inconsistent) {
598 printf("WARNING Array state is inconsistent - each number should appear only once\n");
599 for (d = 0; d < __le32_to_cpu(sb->max_dev); d++)
600 if (__le16_to_cpu(sb->dev_roles[d]) >=
601 MD_DISK_ROLE_FAULTY)
602 printf(" %d:-", d);
603 else
604 printf(" %d:%d", d,
605 __le16_to_cpu(sb->dev_roles[d]));
606 printf("\n");
607 }
608 }
609
610 static void brief_examine_super1(struct supertype *st, int verbose)
611 {
612 struct mdp_superblock_1 *sb = st->sb;
613 int i;
614 unsigned long long sb_offset;
615 char *nm;
616 char *c = map_num(pers, __le32_to_cpu(sb->level));
617
618 nm = strchr(sb->set_name, ':');
619 if (nm)
620 nm++;
621 else if (sb->set_name[0])
622 nm = sb->set_name;
623 else
624 nm = NULL;
625
626 printf("ARRAY ");
627 if (nm) {
628 printf(DEV_MD_DIR "%s", nm);
629 putchar(' ');
630 }
631 if (verbose && c)
632 printf(" level=%s", c);
633 sb_offset = __le64_to_cpu(sb->super_offset);
634 if (sb_offset <= 4)
635 printf(" metadata=1.1 ");
636 else if (sb_offset <= 8)
637 printf(" metadata=1.2 ");
638 else
639 printf(" metadata=1.0 ");
640 if (verbose)
641 printf("num-devices=%d ", __le32_to_cpu(sb->raid_disks));
642 printf("UUID=");
643 for (i = 0; i < 16; i++) {
644 if ((i & 3)==0 && i != 0)
645 printf(":");
646 printf("%02x", sb->set_uuid[i]);
647 }
648 printf("\n");
649 }
650
651 static void export_examine_super1(struct supertype *st)
652 {
653 struct mdp_superblock_1 *sb = st->sb;
654 int i;
655 int len = 32;
656 int layout;
657
658 printf("MD_LEVEL=%s\n", map_num_s(pers, __le32_to_cpu(sb->level)));
659 printf("MD_DEVICES=%d\n", __le32_to_cpu(sb->raid_disks));
660 for (i = 0; i < 32; i++)
661 if (sb->set_name[i] == '\n' || sb->set_name[i] == '\0') {
662 len = i;
663 break;
664 }
665 if (len)
666 printf("MD_NAME=%.*s\n", len, sb->set_name);
667 if (__le32_to_cpu(sb->level) > 0) {
668 int ddsks = 0, ddsks_denom = 1;
669 switch(__le32_to_cpu(sb->level)) {
670 case 1:
671 ddsks = 1;
672 break;
673 case 4:
674 case 5:
675 ddsks = __le32_to_cpu(sb->raid_disks)-1;
676 break;
677 case 6:
678 ddsks = __le32_to_cpu(sb->raid_disks)-2;
679 break;
680 case 10:
681 layout = __le32_to_cpu(sb->layout);
682 ddsks = __le32_to_cpu(sb->raid_disks);
683 ddsks_denom = (layout&255) * ((layout>>8)&255);
684 }
685 if (ddsks) {
686 long long asize = __le64_to_cpu(sb->size);
687 asize = (asize << 9) * ddsks / ddsks_denom;
688 printf("MD_ARRAY_SIZE=%s\n",
689 human_size_brief(asize, JEDEC));
690 }
691 }
692 printf("MD_UUID=");
693 for (i = 0; i < 16; i++) {
694 if ((i & 3) == 0 && i != 0)
695 printf(":");
696 printf("%02x", sb->set_uuid[i]);
697 }
698 printf("\n");
699 printf("MD_UPDATE_TIME=%llu\n",
700 __le64_to_cpu(sb->utime) & 0xFFFFFFFFFFULL);
701 printf("MD_DEV_UUID=");
702 for (i = 0; i < 16; i++) {
703 if ((i & 3) == 0 && i != 0)
704 printf(":");
705 printf("%02x", sb->device_uuid[i]);
706 }
707 printf("\n");
708 printf("MD_EVENTS=%llu\n",
709 (unsigned long long)__le64_to_cpu(sb->events));
710 }
711
712 static int copy_metadata1(struct supertype *st, int from, int to)
713 {
714 /* Read superblock. If it looks good, write it out.
715 * Then if a bitmap is present, copy that.
716 * And if a bad-block-list is present, copy that too.
717 */
718 void *buf;
719 unsigned long long dsize, sb_offset;
720 const int bufsize = 4*1024;
721 struct mdp_superblock_1 super, *sb;
722
723 if (posix_memalign(&buf, 4096, bufsize) != 0)
724 return 1;
725
726 if (!get_dev_size(from, NULL, &dsize))
727 goto err;
728
729 dsize >>= 9;
730 if (dsize < 24)
731 goto err;
732 switch(st->minor_version) {
733 case 0:
734 sb_offset = dsize;
735 sb_offset -= 8*2;
736 sb_offset &= ~(4*2-1);
737 break;
738 case 1:
739 sb_offset = 0;
740 break;
741 case 2:
742 sb_offset = 4*2;
743 break;
744 default:
745 goto err;
746 }
747
748 if (lseek64(from, sb_offset << 9, 0) < 0LL)
749 goto err;
750 if (read(from, buf, bufsize) != bufsize)
751 goto err;
752
753 sb = buf;
754 super = *sb; // save most of sb for when we reuse buf
755
756 if (__le32_to_cpu(super.magic) != MD_SB_MAGIC ||
757 __le32_to_cpu(super.major_version) != 1 ||
758 __le64_to_cpu(super.super_offset) != sb_offset ||
759 calc_sb_1_csum(sb) != super.sb_csum)
760 goto err;
761
762 if (lseek64(to, sb_offset << 9, 0) < 0LL)
763 goto err;
764 if (write(to, buf, bufsize) != bufsize)
765 goto err;
766
767 if (super.feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET)) {
768 unsigned long long bitmap_offset = sb_offset;
769 int bytes = 4096; // just an estimate.
770 int written = 0;
771 struct align_fd afrom, ato;
772
773 init_afd(&afrom, from);
774 init_afd(&ato, to);
775
776 bitmap_offset += (int32_t)__le32_to_cpu(super.bitmap_offset);
777
778 if (lseek64(from, bitmap_offset<<9, 0) < 0)
779 goto err;
780 if (lseek64(to, bitmap_offset<<9, 0) < 0)
781 goto err;
782
783 for (written = 0; written < bytes ; ) {
784 int n = bytes - written;
785 if (n > 4096)
786 n = 4096;
787 if (aread(&afrom, buf, n) != n)
788 goto err;
789 if (written == 0) {
790 /* have the header, can calculate
791 * correct bitmap bytes */
792 bitmap_super_t *bms;
793 bms = (void *)buf;
794 bytes = calc_bitmap_size(bms, 512);
795 if (n > bytes)
796 n = bytes;
797 }
798 if (awrite(&ato, buf, n) != n)
799 goto err;
800 written += n;
801 }
802 }
803
804 if (super.bblog_size != 0 &&
805 __le16_to_cpu(super.bblog_size) <= 100 &&
806 super.bblog_offset != 0 &&
807 (super.feature_map & __le32_to_cpu(MD_FEATURE_BAD_BLOCKS))) {
808 /* There is a bad block log */
809 unsigned long long bb_offset = sb_offset;
810 int bytes = __le16_to_cpu(super.bblog_size) * 512;
811 int written = 0;
812 struct align_fd afrom, ato;
813
814 init_afd(&afrom, from);
815 init_afd(&ato, to);
816
817 bb_offset += (int32_t)__le32_to_cpu(super.bblog_offset);
818
819 if (lseek64(from, bb_offset<<9, 0) < 0)
820 goto err;
821 if (lseek64(to, bb_offset<<9, 0) < 0)
822 goto err;
823
824 for (written = 0; written < bytes ; ) {
825 int n = bytes - written;
826 if (n > 4096)
827 n = 4096;
828 if (aread(&afrom, buf, n) != n)
829 goto err;
830
831 if (awrite(&ato, buf, n) != n)
832 goto err;
833 written += n;
834 }
835 }
836
837 free(buf);
838 return 0;
839
840 err:
841 free(buf);
842 return 1;
843 }
844
845 static void detail_super1(struct supertype *st, char *homehost, char *subarray)
846 {
847 struct mdp_superblock_1 *sb = st->sb;
848 bitmap_super_t *bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
849 int i;
850 int l = homehost ? strlen(homehost) : 0;
851
852 printf(" Name : %.32s", sb->set_name);
853 if (l > 0 && l < 32 && sb->set_name[l] == ':' &&
854 strncmp(sb->set_name, homehost, l) == 0)
855 printf(" (local to host %s)", homehost);
856 if (bms->nodes > 0 &&
857 (__le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET))
858 printf("\n Cluster Name : %-64s", bms->cluster_name);
859 printf("\n UUID : ");
860 for (i = 0; i < 16; i++) {
861 if ((i & 3) == 0 && i != 0)
862 printf(":");
863 printf("%02x", sb->set_uuid[i]);
864 }
865 printf("\n Events : %llu\n\n",
866 (unsigned long long)__le64_to_cpu(sb->events));
867 }
868
869 static void brief_detail_super1(struct supertype *st, char *subarray)
870 {
871 struct mdp_superblock_1 *sb = st->sb;
872 int i;
873
874 printf(" UUID=");
875 for (i = 0; i < 16; i++) {
876 if ((i & 3) == 0 && i != 0)
877 printf(":");
878 printf("%02x", sb->set_uuid[i]);
879 }
880 }
881
882 static void export_detail_super1(struct supertype *st)
883 {
884 struct mdp_superblock_1 *sb = st->sb;
885 int i;
886 int len = 32;
887
888 for (i = 0; i < 32; i++)
889 if (sb->set_name[i] == '\n' || sb->set_name[i] == '\0') {
890 len = i;
891 break;
892 }
893 if (len)
894 printf("MD_NAME=%.*s\n", len, sb->set_name);
895 }
896
897 static int examine_badblocks_super1(struct supertype *st, int fd, char *devname)
898 {
899 struct mdp_superblock_1 *sb = st->sb;
900 unsigned long long offset;
901 int size;
902 __u64 *bbl, *bbp;
903 int i;
904
905 if (!sb->bblog_size || __le16_to_cpu(sb->bblog_size) > 100 ||
906 !sb->bblog_offset){
907 printf("No bad-blocks list configured on %s\n", devname);
908 return 0;
909 }
910 if ((sb->feature_map & __cpu_to_le32(MD_FEATURE_BAD_BLOCKS)) == 0) {
911 printf("Bad-blocks list is empty in %s\n", devname);
912 return 0;
913 }
914
915 size = __le16_to_cpu(sb->bblog_size)* 512;
916 if (posix_memalign((void **)&bbl, 4096, size) != 0) {
917 pr_err("could not allocate badblocks list\n");
918 return 0;
919 }
920 offset = __le64_to_cpu(sb->super_offset) +
921 (int)__le32_to_cpu(sb->bblog_offset);
922 offset <<= 9;
923 if (lseek64(fd, offset, 0) < 0) {
924 pr_err("Cannot seek to bad-blocks list\n");
925 return 1;
926 }
927 if (read(fd, bbl, size) != size) {
928 pr_err("Cannot read bad-blocks list\n");
929 return 1;
930 }
931 /* 64bits per entry. 10 bits is block-count, 54 bits is block
932 * offset. Blocks are sectors unless bblog->shift makes them bigger
933 */
934 bbp = (__u64*)bbl;
935 printf("Bad-blocks on %s:\n", devname);
936 for (i = 0; i < size/8; i++, bbp++) {
937 __u64 bb = __le64_to_cpu(*bbp);
938 int count = bb & 0x3ff;
939 unsigned long long sector = bb >> 10;
940
941 if (bb + 1 == 0)
942 break;
943
944 sector <<= sb->bblog_shift;
945 count <<= sb->bblog_shift;
946
947 printf("%20llu for %d sectors\n", sector, count);
948 }
949 return 0;
950 }
951
952 static int match_home1(struct supertype *st, char *homehost)
953 {
954 struct mdp_superblock_1 *sb = st->sb;
955 int l = homehost ? strlen(homehost) : 0;
956
957 return (l > 0 && l < 32 && sb->set_name[l] == ':' &&
958 strncmp(sb->set_name, homehost, l) == 0);
959 }
960
961 static void uuid_from_super1(struct supertype *st, int uuid[4])
962 {
963 struct mdp_superblock_1 *super = st->sb;
964 char *cuuid = (char *)uuid;
965 int i;
966 for (i = 0; i < 16; i++)
967 cuuid[i] = super->set_uuid[i];
968 }
969
970 static void getinfo_super1(struct supertype *st, struct mdinfo *info, char *map)
971 {
972 struct mdp_superblock_1 *sb = st->sb;
973 struct bitmap_super_s *bsb = (void *)(((char *)sb) + MAX_SB_SIZE);
974 struct misc_dev_info *misc =
975 (void *)(((char *)sb) + MAX_SB_SIZE+BM_SUPER_SIZE);
976 int working = 0;
977 unsigned int i;
978 unsigned int role;
979 unsigned int map_disks = info->array.raid_disks;
980 unsigned long long super_offset;
981 unsigned long long data_size;
982
983 memset(info, 0, sizeof(*info));
984 info->array.major_version = 1;
985 info->array.minor_version = st->minor_version;
986 info->array.patch_version = 0;
987 info->array.raid_disks = __le32_to_cpu(sb->raid_disks);
988 info->array.level = __le32_to_cpu(sb->level);
989 info->array.layout = __le32_to_cpu(sb->layout);
990 info->array.md_minor = -1;
991 info->array.ctime = __le64_to_cpu(sb->ctime);
992 info->array.utime = __le64_to_cpu(sb->utime);
993 info->array.chunk_size = __le32_to_cpu(sb->chunksize)*512;
994 info->array.state =
995 (__le64_to_cpu(sb->resync_offset) == MaxSector) ? 1 : 0;
996
997 super_offset = __le64_to_cpu(sb->super_offset);
998 info->data_offset = __le64_to_cpu(sb->data_offset);
999 info->component_size = __le64_to_cpu(sb->size);
1000 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET)) {
1001 info->bitmap_offset = (int32_t)__le32_to_cpu(sb->bitmap_offset);
1002 if (__le32_to_cpu(bsb->nodes) > 1)
1003 info->array.state |= (1 << MD_SB_CLUSTERED);
1004 } else if (md_feature_any_ppl_on(sb->feature_map)) {
1005 info->ppl_offset = __le16_to_cpu(sb->ppl.offset);
1006 info->ppl_size = __le16_to_cpu(sb->ppl.size);
1007 info->ppl_sector = super_offset + info->ppl_offset;
1008 }
1009
1010 info->disk.major = 0;
1011 info->disk.minor = 0;
1012 info->disk.number = __le32_to_cpu(sb->dev_number);
1013 if (__le32_to_cpu(sb->dev_number) >= __le32_to_cpu(sb->max_dev) ||
1014 __le32_to_cpu(sb->dev_number) >= MAX_DEVS)
1015 role = MD_DISK_ROLE_FAULTY;
1016 else
1017 role = __le16_to_cpu(sb->dev_roles[__le32_to_cpu(sb->dev_number)]);
1018
1019 if (info->array.level <= 0)
1020 data_size = __le64_to_cpu(sb->data_size);
1021 else
1022 data_size = __le64_to_cpu(sb->size);
1023 if (info->data_offset < super_offset) {
1024 unsigned long long end;
1025 info->space_before = info->data_offset;
1026 end = super_offset;
1027
1028 if (sb->bblog_offset && sb->bblog_size) {
1029 unsigned long long bboffset = super_offset;
1030 bboffset += (int32_t)__le32_to_cpu(sb->bblog_offset);
1031 if (bboffset < end)
1032 end = bboffset;
1033 }
1034
1035 if (super_offset + info->bitmap_offset + info->ppl_offset < end)
1036 end = super_offset + info->bitmap_offset +
1037 info->ppl_offset;
1038
1039 if (info->data_offset + data_size < end)
1040 info->space_after = end - data_size - info->data_offset;
1041 else
1042 info->space_after = 0;
1043 } else {
1044 unsigned long long earliest;
1045 earliest = super_offset + (32+4)*2; /* match kernel */
1046 if (info->bitmap_offset > 0) {
1047 unsigned long long bmend = info->bitmap_offset;
1048 unsigned long long size = calc_bitmap_size(bsb, 4096);
1049 size /= 512;
1050 bmend += size;
1051 if (bmend > earliest)
1052 earliest = bmend;
1053 } else if (info->ppl_offset > 0) {
1054 unsigned long long pplend;
1055
1056 pplend = info->ppl_offset + info->ppl_size;
1057 if (pplend > earliest)
1058 earliest = pplend;
1059 }
1060 if (sb->bblog_offset && sb->bblog_size) {
1061 unsigned long long bbend = super_offset;
1062 bbend += (int32_t)__le32_to_cpu(sb->bblog_offset);
1063 bbend += __le16_to_cpu(sb->bblog_size);
1064 if (bbend > earliest)
1065 earliest = bbend;
1066 }
1067 if (earliest < info->data_offset)
1068 info->space_before = info->data_offset - earliest;
1069 else
1070 info->space_before = 0;
1071 info->space_after = misc->device_size - data_size -
1072 info->data_offset;
1073 }
1074 if (info->space_before == 0 && info->space_after == 0) {
1075 /* It will look like we don't support data_offset changes,
1076 * be we do - it's just that there is no room.
1077 * A change that reduced the number of devices should
1078 * still be allowed, so set the otherwise useless value of '1'
1079 */
1080 info->space_after = 1;
1081 }
1082
1083 info->disk.raid_disk = -1;
1084 switch(role) {
1085 case MD_DISK_ROLE_SPARE:
1086 /* spare: not active, not sync, not faulty */
1087 info->disk.state = 0;
1088 break;
1089 case MD_DISK_ROLE_FAULTY:
1090 info->disk.state = (1 << MD_DISK_FAULTY); /* faulty */
1091 break;
1092 case MD_DISK_ROLE_JOURNAL:
1093 info->disk.state = (1 << MD_DISK_JOURNAL);
1094 info->disk.raid_disk = role;
1095 /* journal uses all 4kB blocks*/
1096 info->space_after = (misc->device_size - info->data_offset) % 8;
1097 break;
1098 default:
1099 info->disk.state = 6; /* active and in sync */
1100 info->disk.raid_disk = role;
1101 }
1102 if (sb->devflags & WriteMostly1)
1103 info->disk.state |= (1 << MD_DISK_WRITEMOSTLY);
1104 if (sb->devflags & FailFast1)
1105 info->disk.state |= (1 << MD_DISK_FAILFAST);
1106 info->events = __le64_to_cpu(sb->events);
1107 sprintf(info->text_version, "1.%d", st->minor_version);
1108 info->safe_mode_delay = 200;
1109
1110 memcpy(info->uuid, sb->set_uuid, 16);
1111
1112 strncpy(info->name, sb->set_name, 32);
1113 info->name[32] = 0;
1114
1115 if ((__le32_to_cpu(sb->feature_map)&MD_FEATURE_REPLACEMENT)) {
1116 info->disk.state &= ~(1 << MD_DISK_SYNC);
1117 info->disk.state |= 1 << MD_DISK_REPLACEMENT;
1118 }
1119
1120 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RECOVERY_OFFSET))
1121 info->recovery_start = __le32_to_cpu(sb->recovery_offset);
1122 else
1123 info->recovery_start = MaxSector;
1124
1125 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE)) {
1126 info->reshape_active = 1;
1127 if ((sb->feature_map & __le32_to_cpu(MD_FEATURE_NEW_OFFSET)) &&
1128 sb->new_offset != 0)
1129 info->reshape_active |= RESHAPE_NO_BACKUP;
1130 info->reshape_progress = __le64_to_cpu(sb->reshape_position);
1131 info->new_level = __le32_to_cpu(sb->new_level);
1132 info->delta_disks = __le32_to_cpu(sb->delta_disks);
1133 info->new_layout = __le32_to_cpu(sb->new_layout);
1134 info->new_chunk = __le32_to_cpu(sb->new_chunk)<<9;
1135 if (info->delta_disks < 0)
1136 info->array.raid_disks -= info->delta_disks;
1137 } else
1138 info->reshape_active = 0;
1139
1140 info->recovery_blocked = info->reshape_active;
1141
1142 if (map)
1143 for (i = 0; i < map_disks; i++)
1144 map[i] = 0;
1145 for (i = 0; i < __le32_to_cpu(sb->max_dev); i++) {
1146 role = __le16_to_cpu(sb->dev_roles[i]);
1147 if (/*role == MD_DISK_ROLE_SPARE || */role < (unsigned) info->array.raid_disks) {
1148 working++;
1149 if (map && role < map_disks)
1150 map[role] = 1;
1151 }
1152 }
1153
1154 info->array.working_disks = working;
1155
1156 if (sb->feature_map & __le32_to_cpu(MD_FEATURE_JOURNAL)) {
1157 info->journal_device_required = 1;
1158 info->consistency_policy = CONSISTENCY_POLICY_JOURNAL;
1159 } else if (md_feature_any_ppl_on(sb->feature_map)) {
1160 info->consistency_policy = CONSISTENCY_POLICY_PPL;
1161 } else if (sb->feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET)) {
1162 info->consistency_policy = CONSISTENCY_POLICY_BITMAP;
1163 } else if (info->array.level <= 0) {
1164 info->consistency_policy = CONSISTENCY_POLICY_NONE;
1165 } else {
1166 info->consistency_policy = CONSISTENCY_POLICY_RESYNC;
1167 }
1168
1169 info->journal_clean = 0;
1170 }
1171
1172 static struct mdinfo *container_content1(struct supertype *st, char *subarray)
1173 {
1174 struct mdinfo *info;
1175
1176 if (subarray)
1177 return NULL;
1178
1179 info = xmalloc(sizeof(*info));
1180 getinfo_super1(st, info, NULL);
1181 return info;
1182 }
1183
1184 static int update_super1(struct supertype *st, struct mdinfo *info,
1185 enum update_opt update, char *devname, int verbose,
1186 int uuid_set, char *homehost)
1187 {
1188 /* NOTE: for 'assemble' and 'force' we need to return non-zero
1189 * if any change was made. For others, the return value is
1190 * ignored.
1191 */
1192 int rv = 0;
1193 struct mdp_superblock_1 *sb = st->sb;
1194 bitmap_super_t *bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
1195
1196 if (update == UOPT_HOMEHOST && homehost) {
1197 /*
1198 * Note that 'homehost' is special as it is really
1199 * a "name" update.
1200 */
1201 char *c;
1202 update = UOPT_NAME;
1203 c = strchr(sb->set_name, ':');
1204 if (c)
1205 snprintf(info->name, sizeof(info->name), "%s", c + 1);
1206 else
1207 snprintf(info->name, sizeof(info->name), "%s",
1208 sb->set_name);
1209 }
1210
1211 switch (update) {
1212 case UOPT_NAME: {
1213 int namelen;
1214
1215 if (!info->name[0])
1216 snprintf(info->name, sizeof(info->name), "%d", info->array.md_minor);
1217 memset(sb->set_name, 0, sizeof(sb->set_name));
1218
1219 namelen = strnlen(homehost, MD_NAME_MAX) + 1 + strnlen(info->name, MD_NAME_MAX);
1220 if (homehost &&
1221 strchr(info->name, ':') == NULL &&
1222 namelen < MD_NAME_MAX) {
1223 strcpy(sb->set_name, homehost);
1224 strcat(sb->set_name, ":");
1225 strcat(sb->set_name, info->name);
1226 } else {
1227 namelen = min((int)strnlen(info->name, MD_NAME_MAX),
1228 (int)sizeof(sb->set_name) - 1);
1229 memcpy(sb->set_name, info->name, namelen);
1230 memset(&sb->set_name[namelen], '\0',
1231 sizeof(sb->set_name) - namelen);
1232 }
1233 break;
1234 }
1235 case UOPT_SPEC_FORCE_ONE:
1236 /* Not enough devices for a working array,
1237 * so bring this one up-to-date
1238 */
1239 if (sb->events != __cpu_to_le64(info->events))
1240 rv = 1;
1241 sb->events = __cpu_to_le64(info->events);
1242 break;
1243 case UOPT_SPEC_FORCE_ARRAY:
1244 /* Degraded array and 'force' requests to
1245 * maybe need to mark it 'clean'.
1246 */
1247 switch(__le32_to_cpu(sb->level)) {
1248 case 4:
1249 case 5:
1250 case 6:
1251 /* need to force clean */
1252 if (sb->resync_offset != MaxSector)
1253 rv = 1;
1254 sb->resync_offset = MaxSector;
1255 }
1256 break;
1257 case UOPT_SPEC_ASSEMBLE: {
1258 int d = info->disk.number;
1259 int want;
1260 if (info->disk.state & (1<<MD_DISK_ACTIVE))
1261 want = info->disk.raid_disk;
1262 else if (info->disk.state & (1<<MD_DISK_JOURNAL))
1263 want = MD_DISK_ROLE_JOURNAL;
1264 else
1265 want = MD_DISK_ROLE_SPARE;
1266 if (sb->dev_roles[d] != __cpu_to_le16(want)) {
1267 sb->dev_roles[d] = __cpu_to_le16(want);
1268 rv = 1;
1269 }
1270 if (info->reshape_active &&
1271 sb->feature_map &
1272 __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE) &&
1273 info->delta_disks >= 0 &&
1274 info->reshape_progress <
1275 __le64_to_cpu(sb->reshape_position)) {
1276 sb->reshape_position =
1277 __cpu_to_le64(info->reshape_progress);
1278 rv = 1;
1279 }
1280 if (info->reshape_active &&
1281 sb->feature_map &
1282 __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE) &&
1283 info->delta_disks < 0 &&
1284 info->reshape_progress >
1285 __le64_to_cpu(sb->reshape_position)) {
1286 sb->reshape_position =
1287 __cpu_to_le64(info->reshape_progress);
1288 rv = 1;
1289 }
1290 break;
1291 }
1292 case UOPT_SPEC_LINEAR_GROW_NEW: {
1293 int i;
1294 int fd;
1295 int max = __le32_to_cpu(sb->max_dev);
1296
1297 if (max > MAX_DEVS)
1298 return -2;
1299
1300 for (i = 0; i < max; i++)
1301 if (__le16_to_cpu(sb->dev_roles[i]) >=
1302 MD_DISK_ROLE_FAULTY)
1303 break;
1304 if (i != info->disk.number)
1305 return -2;
1306 sb->dev_number = __cpu_to_le32(i);
1307
1308 if (i == max)
1309 sb->max_dev = __cpu_to_le32(max + 1);
1310 if (i > max)
1311 return -2;
1312
1313 random_uuid(sb->device_uuid);
1314
1315 sb->dev_roles[i] = __cpu_to_le16(info->disk.raid_disk);
1316
1317 fd = open(devname, O_RDONLY);
1318 if (fd >= 0) {
1319 unsigned long long ds;
1320 get_dev_size(fd, devname, &ds);
1321 close(fd);
1322 ds >>= 9;
1323 if (__le64_to_cpu(sb->super_offset) <
1324 __le64_to_cpu(sb->data_offset)) {
1325 sb->data_size = __cpu_to_le64(
1326 ds - __le64_to_cpu(sb->data_offset));
1327 } else {
1328 ds -= 8 * 2;
1329 ds &= ~(unsigned long long)(4 * 2 - 1);
1330 sb->super_offset = __cpu_to_le64(ds);
1331 sb->data_size = __cpu_to_le64(
1332 ds - __le64_to_cpu(sb->data_offset));
1333 }
1334 }
1335 break;
1336 }
1337 case UOPT_SPEC_LINEAR_GROW_UPDATE: {
1338 int max = __le32_to_cpu(sb->max_dev);
1339 int i = info->disk.number;
1340 if (max > MAX_DEVS || i > MAX_DEVS)
1341 return -2;
1342 if (i > max)
1343 return -2;
1344 if (i == max)
1345 sb->max_dev = __cpu_to_le32(max + 1);
1346 sb->raid_disks = __cpu_to_le32(info->array.raid_disks);
1347 sb->dev_roles[info->disk.number] =
1348 __cpu_to_le16(info->disk.raid_disk);
1349 break;
1350 }
1351 case UOPT_RESYNC:
1352 /* make sure resync happens */
1353 sb->resync_offset = 0;
1354 break;
1355 case UOPT_UUID:
1356 copy_uuid(sb->set_uuid, info->uuid, super1.swapuuid);
1357
1358 if (__le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)
1359 memcpy(bms->uuid, sb->set_uuid, 16);
1360 break;
1361 case UOPT_NO_BITMAP:
1362 sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1363 if (bms->version == BITMAP_MAJOR_CLUSTERED && !IsBitmapDirty(devname))
1364 sb->resync_offset = MaxSector;
1365 break;
1366 case UOPT_BBL: {
1367 /* only possible if there is room after the bitmap, or if
1368 * there is no bitmap
1369 */
1370 unsigned long long sb_offset = __le64_to_cpu(sb->super_offset);
1371 unsigned long long data_offset = __le64_to_cpu(sb->data_offset);
1372 long bitmap_offset = 0;
1373 long bm_sectors = 0;
1374 long space;
1375
1376 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
1377 bitmap_offset = (long)__le32_to_cpu(sb->bitmap_offset);
1378 bm_sectors = calc_bitmap_size(bms, 4096) >> 9;
1379 } else if (md_feature_any_ppl_on(sb->feature_map)) {
1380 bitmap_offset = (long)__le16_to_cpu(sb->ppl.offset);
1381 bm_sectors = (long)__le16_to_cpu(sb->ppl.size);
1382 }
1383
1384 if (sb_offset < data_offset) {
1385 /*
1386 * 1.1 or 1.2. Put bbl after bitmap leaving
1387 * at least 32K
1388 */
1389 long bb_offset;
1390 bb_offset = sb_offset + 8;
1391 if (bm_sectors && bitmap_offset > 0)
1392 bb_offset = bitmap_offset + bm_sectors;
1393 while (bb_offset < (long)sb_offset + 8 + 32*2 &&
1394 bb_offset + 8+8 <= (long)data_offset)
1395 bb_offset += 8;
1396 if (bb_offset + 8 <= (long)data_offset) {
1397 sb->bblog_size = __cpu_to_le16(8);
1398 sb->bblog_offset = __cpu_to_le32(bb_offset);
1399 }
1400 } else {
1401 if (bm_sectors && bitmap_offset < 0)
1402 space = -bitmap_offset - bm_sectors;
1403 else
1404 space = sb_offset - data_offset -
1405 __le64_to_cpu(sb->data_size);
1406 if (space >= 8) {
1407 sb->bblog_size = __cpu_to_le16(8);
1408 sb->bblog_offset = __cpu_to_le32((unsigned)-8);
1409 }
1410 }
1411 break;
1412 }
1413 case UOPT_NO_BBL:
1414 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BAD_BLOCKS))
1415 pr_err("Cannot remove active bbl from %s\n",devname);
1416 else {
1417 sb->bblog_size = 0;
1418 sb->bblog_shift = 0;
1419 sb->bblog_offset = 0;
1420 }
1421 break;
1422 case UOPT_FORCE_NO_BBL:
1423 sb->feature_map &= ~ __cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1424 sb->bblog_size = 0;
1425 sb->bblog_shift = 0;
1426 sb->bblog_offset = 0;
1427 break;
1428 case UOPT_PPL: {
1429 unsigned long long sb_offset = __le64_to_cpu(sb->super_offset);
1430 unsigned long long data_offset = __le64_to_cpu(sb->data_offset);
1431 unsigned long long data_size = __le64_to_cpu(sb->data_size);
1432 long bb_offset = __le32_to_cpu(sb->bblog_offset);
1433 int space;
1434 int offset;
1435
1436 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
1437 pr_err("Cannot add PPL to array with bitmap\n");
1438 return -2;
1439 }
1440
1441 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_JOURNAL)) {
1442 pr_err("Cannot add PPL to array with journal\n");
1443 return -2;
1444 }
1445
1446 if (sb_offset < data_offset) {
1447 if (bb_offset)
1448 space = bb_offset - 8;
1449 else
1450 space = data_offset - sb_offset - 8;
1451 offset = 8;
1452 } else {
1453 offset = -(sb_offset - data_offset - data_size);
1454 if (offset < INT16_MIN)
1455 offset = INT16_MIN;
1456 space = -(offset - bb_offset);
1457 }
1458
1459 if (space < (PPL_HEADER_SIZE >> 9) + 8) {
1460 pr_err("Not enough space to add ppl\n");
1461 return -2;
1462 }
1463
1464 if (space >= (MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9)) {
1465 space = (MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9);
1466 } else {
1467 int optimal_space = choose_ppl_space(
1468 __le32_to_cpu(sb->chunksize));
1469 if (space > optimal_space)
1470 space = optimal_space;
1471 if (space > UINT16_MAX)
1472 space = UINT16_MAX;
1473 }
1474
1475 sb->ppl.offset = __cpu_to_le16(offset);
1476 sb->ppl.size = __cpu_to_le16(space);
1477 sb->feature_map |= __cpu_to_le32(MD_FEATURE_PPL);
1478 break;
1479 }
1480 case UOPT_NO_PPL:
1481 sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_PPL |
1482 MD_FEATURE_MUTLIPLE_PPLS);
1483 break;
1484 case UOPT_DEVICESIZE:
1485 if (__le64_to_cpu(sb->super_offset) >=
1486 __le64_to_cpu(sb->data_offset))
1487 break;
1488 /*
1489 * set data_size to device size less data_offset
1490 */
1491 struct misc_dev_info *misc = (struct misc_dev_info*)
1492 (st->sb + MAX_SB_SIZE + BM_SUPER_SIZE);
1493 sb->data_size = __cpu_to_le64(
1494 misc->device_size - __le64_to_cpu(sb->data_offset));
1495 break;
1496 case UOPT_SPEC_REVERT_RESHAPE_NOBACKUP:
1497 case UOPT_REVERT_RESHAPE:
1498 rv = -2;
1499 if (!(sb->feature_map &
1500 __cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE)))
1501 pr_err("No active reshape to revert on %s\n",
1502 devname);
1503 else {
1504 __u32 temp;
1505 unsigned long long reshape_sectors;
1506 long reshape_chunk;
1507 rv = 0;
1508 /* If the reshape hasn't started, just stop it.
1509 * It is conceivable that a stripe was modified but
1510 * the metadata not updated. In that case the backup
1511 * should have been used to get passed the critical stage.
1512 * If that couldn't happen, the "-nobackup" version
1513 * will be used.
1514 */
1515 if (update == UOPT_SPEC_REVERT_RESHAPE_NOBACKUP &&
1516 sb->reshape_position == 0 &&
1517 (__le32_to_cpu(sb->delta_disks) > 0 ||
1518 (__le32_to_cpu(sb->delta_disks) == 0 &&
1519 !(sb->feature_map & __cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS))))) {
1520 sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1521 sb->raid_disks = __cpu_to_le32(__le32_to_cpu(sb->raid_disks) -
1522 __le32_to_cpu(sb->delta_disks));
1523 sb->delta_disks = 0;
1524 goto done;
1525 }
1526 /* reshape_position is a little messy.
1527 * Its value must be a multiple of the larger
1528 * chunk size, and of the "after" data disks.
1529 * So when reverting we need to change it to
1530 * be a multiple of the new "after" data disks,
1531 * which is the old "before".
1532 * If it isn't already a multiple of 'before',
1533 * the only thing we could do would be
1534 * copy some block around on the disks, which
1535 * is easy to get wrong.
1536 * So we reject a revert-reshape unless the
1537 * alignment is good.
1538 */
1539 if (is_level456(__le32_to_cpu(sb->level))) {
1540 reshape_sectors =
1541 __le64_to_cpu(sb->reshape_position);
1542 reshape_chunk = __le32_to_cpu(sb->new_chunk);
1543 reshape_chunk *= __le32_to_cpu(sb->raid_disks) -
1544 __le32_to_cpu(sb->delta_disks) -
1545 (__le32_to_cpu(sb->level)==6 ? 2 : 1);
1546 if (reshape_sectors % reshape_chunk) {
1547 pr_err("Reshape position is not suitably aligned.\n");
1548 pr_err("Try normal assembly and stop again\n");
1549 return -2;
1550 }
1551 }
1552 sb->raid_disks =
1553 __cpu_to_le32(__le32_to_cpu(sb->raid_disks) -
1554 __le32_to_cpu(sb->delta_disks));
1555 if (sb->delta_disks == 0)
1556 sb->feature_map ^= __cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1557 else
1558 sb->delta_disks = __cpu_to_le32(-__le32_to_cpu(sb->delta_disks));
1559
1560 temp = sb->new_layout;
1561 sb->new_layout = sb->layout;
1562 sb->layout = temp;
1563
1564 temp = sb->new_chunk;
1565 sb->new_chunk = sb->chunksize;
1566 sb->chunksize = temp;
1567
1568 if (sb->feature_map &
1569 __cpu_to_le32(MD_FEATURE_NEW_OFFSET)) {
1570 long offset_delta =
1571 (int32_t)__le32_to_cpu(sb->new_offset);
1572 sb->data_offset = __cpu_to_le64(__le64_to_cpu(sb->data_offset) + offset_delta);
1573 sb->new_offset = __cpu_to_le32(-offset_delta);
1574 sb->data_size = __cpu_to_le64(__le64_to_cpu(sb->data_size) - offset_delta);
1575 }
1576 done:;
1577 }
1578 break;
1579 case UOPT_SPEC__RESHAPE_PROGRESS:
1580 sb->reshape_position = __cpu_to_le64(info->reshape_progress);
1581 break;
1582 case UOPT_SPEC_WRITEMOSTLY:
1583 sb->devflags |= WriteMostly1;
1584 break;
1585 case UOPT_SPEC_READWRITE:
1586 sb->devflags &= ~WriteMostly1;
1587 break;
1588 case UOPT_SPEC_FAILFAST:
1589 sb->devflags |= FailFast1;
1590 break;
1591 case UOPT_SPEC_NOFAILFAST:
1592 sb->devflags &= ~FailFast1;
1593 break;
1594 case UOPT_LAYOUT_ORIGINAL:
1595 case UOPT_LAYOUT_ALTERNATE:
1596 case UOPT_LAYOUT_UNSPECIFIED:
1597 if (__le32_to_cpu(sb->level) != 0) {
1598 pr_err("%s: %s only supported for RAID0\n",
1599 devname ?: "", map_num(update_options, update));
1600 rv = -1;
1601 } else if (update == UOPT_LAYOUT_UNSPECIFIED) {
1602 sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
1603 sb->layout = 0;
1604 } else {
1605 sb->feature_map |= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
1606 sb->layout = __cpu_to_le32(update == UOPT_LAYOUT_ORIGINAL ? 1 : 2);
1607 }
1608 break;
1609 default:
1610 rv = -1;
1611 }
1612
1613 sb->sb_csum = calc_sb_1_csum(sb);
1614
1615 return rv;
1616 }
1617
1618 static int init_super1(struct supertype *st, mdu_array_info_t *info,
1619 struct shape *s, char *name, char *homehost,
1620 int *uuid, unsigned long long data_offset)
1621 {
1622 struct mdp_superblock_1 *sb;
1623 int spares;
1624 char defname[10];
1625 int sbsize;
1626
1627 if (posix_memalign((void **)&sb, 4096, SUPER1_SIZE) != 0) {
1628 pr_err("could not allocate superblock\n");
1629 return 0;
1630 }
1631 memset(sb, 0, SUPER1_SIZE);
1632
1633 st->sb = sb;
1634 if (info == NULL) {
1635 /* zeroing superblock */
1636 return 0;
1637 }
1638
1639 spares = info->working_disks - info->active_disks;
1640 if (info->raid_disks + spares > MAX_DEVS) {
1641 pr_err("too many devices requested: %d+%d > %d\n",
1642 info->raid_disks , spares, MAX_DEVS);
1643 return 0;
1644 }
1645
1646 sb->magic = __cpu_to_le32(MD_SB_MAGIC);
1647 sb->major_version = __cpu_to_le32(1);
1648 sb->feature_map = 0;
1649 sb->pad0 = 0;
1650
1651 if (uuid)
1652 copy_uuid(sb->set_uuid, uuid, super1.swapuuid);
1653 else
1654 random_uuid(sb->set_uuid);;
1655
1656 if (name == NULL || *name == 0) {
1657 sprintf(defname, "%d", info->md_minor);
1658 name = defname;
1659 }
1660 if (homehost &&
1661 strchr(name, ':') == NULL &&
1662 strlen(homehost) + 1 + strlen(name) < 32) {
1663 strcpy(sb->set_name, homehost);
1664 strcat(sb->set_name, ":");
1665 strcat(sb->set_name, name);
1666 } else {
1667 int namelen;
1668
1669 namelen = min((int)strlen(name),
1670 (int)sizeof(sb->set_name) - 1);
1671 memcpy(sb->set_name, name, namelen);
1672 memset(&sb->set_name[namelen], '\0',
1673 sizeof(sb->set_name) - namelen);
1674 }
1675
1676 sb->ctime = __cpu_to_le64((unsigned long long)time(0));
1677 sb->level = __cpu_to_le32(info->level);
1678 sb->layout = __cpu_to_le32(info->layout);
1679 sb->size = __cpu_to_le64(s->size*2ULL);
1680 sb->chunksize = __cpu_to_le32(info->chunk_size>>9);
1681 sb->raid_disks = __cpu_to_le32(info->raid_disks);
1682
1683 sb->data_offset = __cpu_to_le64(data_offset);
1684 sb->data_size = __cpu_to_le64(0);
1685 sb->super_offset = __cpu_to_le64(0);
1686 sb->recovery_offset = __cpu_to_le64(0);
1687
1688 sb->utime = sb->ctime;
1689 sb->events = __cpu_to_le64(1);
1690 if (info->state & (1<<MD_SB_CLEAN))
1691 sb->resync_offset = MaxSector;
1692 else
1693 sb->resync_offset = 0;
1694 sbsize = sizeof(struct mdp_superblock_1) +
1695 2 * (info->raid_disks + spares);
1696 sbsize = ROUND_UP(sbsize, 512);
1697 sb->max_dev =
1698 __cpu_to_le32((sbsize - sizeof(struct mdp_superblock_1)) / 2);
1699
1700 memset(sb->dev_roles, 0xff,
1701 MAX_SB_SIZE - sizeof(struct mdp_superblock_1));
1702
1703 if (s->consistency_policy == CONSISTENCY_POLICY_PPL)
1704 sb->feature_map |= __cpu_to_le32(MD_FEATURE_PPL);
1705
1706 return 1;
1707 }
1708
1709 struct devinfo {
1710 int fd;
1711 char *devname;
1712 long long data_offset;
1713 unsigned long long dev_size;
1714 mdu_disk_info_t disk;
1715 struct devinfo *next;
1716 };
1717
1718 /* Add a device to the superblock being created */
1719 static int add_to_super1(struct supertype *st, mdu_disk_info_t *dk,
1720 int fd, char *devname, unsigned long long data_offset)
1721 {
1722 struct mdp_superblock_1 *sb = st->sb;
1723 __u16 *rp = sb->dev_roles + dk->number;
1724 struct devinfo *di, **dip;
1725 int dk_state;
1726
1727 dk_state = dk->state & ~(1<<MD_DISK_FAILFAST);
1728 if ((dk_state & (1<<MD_DISK_ACTIVE)) &&
1729 (dk_state & (1<<MD_DISK_SYNC)))/* active, sync */
1730 *rp = __cpu_to_le16(dk->raid_disk);
1731 else if (dk_state & (1<<MD_DISK_JOURNAL))
1732 *rp = MD_DISK_ROLE_JOURNAL;
1733 else if ((dk_state & ~(1<<MD_DISK_ACTIVE)) == 0)
1734 /* active or idle -> spare */
1735 *rp = MD_DISK_ROLE_SPARE;
1736 else
1737 *rp = MD_DISK_ROLE_FAULTY;
1738
1739 if (dk->number >= (int)__le32_to_cpu(sb->max_dev) &&
1740 __le32_to_cpu(sb->max_dev) < MAX_DEVS)
1741 sb->max_dev = __cpu_to_le32(dk->number + 1);
1742
1743 sb->dev_number = __cpu_to_le32(dk->number);
1744 sb->devflags = 0; /* don't copy another disks flags */
1745 sb->sb_csum = calc_sb_1_csum(sb);
1746
1747 dip = (struct devinfo **)&st->info;
1748 while (*dip)
1749 dip = &(*dip)->next;
1750 di = xmalloc(sizeof(struct devinfo));
1751 di->fd = fd;
1752 di->devname = devname;
1753 di->disk = *dk;
1754 di->data_offset = data_offset;
1755
1756 if (is_fd_valid(fd))
1757 get_dev_size(fd, NULL, &di->dev_size);
1758
1759 di->next = NULL;
1760 *dip = di;
1761
1762 return 0;
1763 }
1764
1765 static int locate_bitmap1(struct supertype *st, int fd, int node_num);
1766
1767 static int store_super1(struct supertype *st, int fd)
1768 {
1769 struct mdp_superblock_1 *sb = st->sb;
1770 unsigned long long sb_offset;
1771 struct align_fd afd;
1772 int sbsize;
1773 unsigned long long dsize;
1774
1775 if (!get_dev_size(fd, NULL, &dsize))
1776 return 1;
1777
1778 dsize >>= 9;
1779
1780 if (dsize < 24)
1781 return 2;
1782
1783 init_afd(&afd, fd);
1784
1785 /*
1786 * Calculate the position of the superblock.
1787 * It is always aligned to a 4K boundary and
1788 * depending on minor_version, it can be:
1789 * 0: At least 8K, but less than 12K, from end of device
1790 * 1: At start of device
1791 * 2: 4K from start of device.
1792 */
1793 switch(st->minor_version) {
1794 case 0:
1795 sb_offset = dsize;
1796 sb_offset -= 8*2;
1797 sb_offset &= ~(4*2-1);
1798 break;
1799 case 1:
1800 sb_offset = 0;
1801 break;
1802 case 2:
1803 sb_offset = 4*2;
1804 break;
1805 default:
1806 return -EINVAL;
1807 }
1808
1809 if (sb_offset != __le64_to_cpu(sb->super_offset) &&
1810 0 != __le64_to_cpu(sb->super_offset)
1811 ) {
1812 pr_err("internal error - sb_offset is wrong\n");
1813 abort();
1814 }
1815
1816 if (lseek64(fd, sb_offset << 9, 0)< 0LL)
1817 return 3;
1818
1819 sbsize = ROUND_UP(sizeof(*sb) + 2 * __le32_to_cpu(sb->max_dev), 512);
1820
1821 if (awrite(&afd, sb, sbsize) != sbsize)
1822 return 4;
1823
1824 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
1825 struct bitmap_super_s *bm;
1826 bm = (struct bitmap_super_s *)(((char *)sb) + MAX_SB_SIZE);
1827 if (__le32_to_cpu(bm->magic) == BITMAP_MAGIC) {
1828 locate_bitmap1(st, fd, 0);
1829 if (awrite(&afd, bm, sizeof(*bm)) != sizeof(*bm))
1830 return 5;
1831 }
1832 }
1833 fsync(fd);
1834
1835 return 0;
1836 }
1837
1838 static int load_super1(struct supertype *st, int fd, char *devname);
1839
1840 static unsigned long choose_bm_space(unsigned long devsize)
1841 {
1842 /* if the device is bigger than 8Gig, save 64k for bitmap usage,
1843 * if bigger than 200Gig, save 128k
1844 * NOTE: result must be multiple of 4K else bad things happen
1845 * on 4K-sector devices.
1846 */
1847 if (devsize < 64*2)
1848 return 0;
1849 if (devsize - 64*2 >= 200*1024*1024*2)
1850 return 128*2;
1851 if (devsize - 4*2 > 8*1024*1024*2)
1852 return 64*2;
1853 return 4*2;
1854 }
1855
1856 static void free_super1(struct supertype *st);
1857
1858 __u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
1859
1860 static int write_init_ppl1(struct supertype *st, struct mdinfo *info, int fd)
1861 {
1862 struct mdp_superblock_1 *sb = st->sb;
1863 void *buf;
1864 struct ppl_header *ppl_hdr;
1865 int ret;
1866
1867 /* first clear entire ppl space */
1868 ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
1869 if (ret)
1870 return ret;
1871
1872 ret = posix_memalign(&buf, 4096, PPL_HEADER_SIZE);
1873 if (ret) {
1874 pr_err("Failed to allocate PPL header buffer\n");
1875 return ret;
1876 }
1877
1878 memset(buf, 0, PPL_HEADER_SIZE);
1879 ppl_hdr = buf;
1880 memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
1881 ppl_hdr->signature = __cpu_to_le32(~crc32c_le(~0, sb->set_uuid,
1882 sizeof(sb->set_uuid)));
1883 ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
1884
1885 if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
1886 ret = errno;
1887 perror("Failed to seek to PPL header location");
1888 }
1889
1890 if (!ret && write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
1891 ret = errno;
1892 perror("Write PPL header failed");
1893 }
1894
1895 if (!ret)
1896 fsync(fd);
1897
1898 free(buf);
1899 return ret;
1900 }
1901
1902 #define META_BLOCK_SIZE 4096
1903
1904 static int write_empty_r5l_meta_block(struct supertype *st, int fd)
1905 {
1906 struct r5l_meta_block *mb;
1907 struct mdp_superblock_1 *sb = st->sb;
1908 struct align_fd afd;
1909 __u32 crc;
1910
1911 init_afd(&afd, fd);
1912
1913 if (posix_memalign((void **)&mb, 4096, META_BLOCK_SIZE) != 0) {
1914 pr_err("Could not allocate memory for the meta block.\n");
1915 return 1;
1916 }
1917
1918 memset(mb, 0, META_BLOCK_SIZE);
1919
1920 mb->magic = __cpu_to_le32(R5LOG_MAGIC);
1921 mb->version = R5LOG_VERSION;
1922 mb->meta_size = __cpu_to_le32(sizeof(struct r5l_meta_block));
1923 mb->seq = __cpu_to_le64(random32());
1924 mb->position = __cpu_to_le64(0);
1925
1926 crc = crc32c_le(0xffffffff, sb->set_uuid, sizeof(sb->set_uuid));
1927 crc = crc32c_le(crc, (void *)mb, META_BLOCK_SIZE);
1928 mb->checksum = crc;
1929
1930 if (lseek64(fd, __le64_to_cpu(sb->data_offset) * 512, 0) < 0LL) {
1931 pr_err("cannot seek to offset of the meta block\n");
1932 goto fail_to_write;
1933 }
1934
1935 if (awrite(&afd, mb, META_BLOCK_SIZE) != META_BLOCK_SIZE) {
1936 pr_err("failed to store write the meta block \n");
1937 goto fail_to_write;
1938 }
1939 fsync(fd);
1940
1941 free(mb);
1942 return 0;
1943
1944 fail_to_write:
1945 free(mb);
1946 return 1;
1947 }
1948
1949 static bool has_raid0_layout(struct mdp_superblock_1 *sb)
1950 {
1951 if (sb->level == 0 && sb->layout != 0)
1952 return true;
1953 else
1954 return false;
1955 }
1956
1957 static int write_init_super1(struct supertype *st)
1958 {
1959 struct mdp_superblock_1 *sb = st->sb;
1960 struct supertype *refst;
1961 int rv = 0;
1962 unsigned long long bm_space;
1963 struct devinfo *di;
1964 unsigned long long dsize, array_size;
1965 unsigned long long sb_offset;
1966 unsigned long long data_offset;
1967 long bm_offset;
1968 bool raid0_need_layout = false;
1969
1970 /* Since linux kernel v5.4, raid0 always has a layout */
1971 if (has_raid0_layout(sb) && get_linux_version() >= 5004000)
1972 raid0_need_layout = true;
1973
1974 for (di = st->info; di; di = di->next) {
1975 if (di->disk.state & (1 << MD_DISK_JOURNAL))
1976 sb->feature_map |= __cpu_to_le32(MD_FEATURE_JOURNAL);
1977 if (has_raid0_layout(sb) && !raid0_need_layout) {
1978
1979 struct devinfo *di2 = st->info;
1980 unsigned long long s1, s2;
1981 s1 = di->dev_size;
1982 if (di->data_offset != INVALID_SECTORS)
1983 s1 -= di->data_offset;
1984 s1 /= __le32_to_cpu(sb->chunksize);
1985 s2 = di2->dev_size;
1986 if (di2->data_offset != INVALID_SECTORS)
1987 s2 -= di2->data_offset;
1988 s2 /= __le32_to_cpu(sb->chunksize);
1989 if (s1 != s2)
1990 raid0_need_layout = true;
1991 }
1992 }
1993
1994 for (di = st->info; di; di = di->next) {
1995 if (di->disk.state & (1 << MD_DISK_FAULTY))
1996 continue;
1997 if (di->fd < 0)
1998 continue;
1999
2000 while (Kill(di->devname, NULL, 0, -1, 1) == 0)
2001 ;
2002
2003 sb->dev_number = __cpu_to_le32(di->disk.number);
2004 if (di->disk.state & (1<<MD_DISK_WRITEMOSTLY))
2005 sb->devflags |= WriteMostly1;
2006 else
2007 sb->devflags &= ~WriteMostly1;
2008 if (di->disk.state & (1<<MD_DISK_FAILFAST))
2009 sb->devflags |= FailFast1;
2010 else
2011 sb->devflags &= ~FailFast1;
2012
2013 random_uuid(sb->device_uuid);
2014
2015 if (!(di->disk.state & (1<<MD_DISK_JOURNAL)))
2016 sb->events = 0;
2017
2018 refst = dup_super(st);
2019 if (load_super1(refst, di->fd, NULL)==0) {
2020 struct mdp_superblock_1 *refsb = refst->sb;
2021
2022 memcpy(sb->device_uuid, refsb->device_uuid, 16);
2023 if (memcmp(sb->set_uuid, refsb->set_uuid, 16)==0) {
2024 /* same array, so preserve events and
2025 * dev_number */
2026 sb->events = refsb->events;
2027 }
2028 free_super1(refst);
2029 }
2030 free(refst);
2031
2032 if (!get_dev_size(di->fd, NULL, &dsize)) {
2033 rv = 1;
2034 goto error_out;
2035 }
2036 dsize >>= 9;
2037
2038 if (dsize < 24) {
2039 close(di->fd);
2040 rv = 2;
2041 goto error_out;
2042 }
2043
2044 /*
2045 * Calculate the position of the superblock.
2046 * It is always aligned to a 4K boundary and
2047 * depending on minor_version, it can be:
2048 * 0: At least 8K, but less than 12K, from end of device
2049 * 1: At start of device
2050 * 2: 4K from start of device.
2051 * data_offset has already been set.
2052 */
2053 array_size = __le64_to_cpu(sb->size);
2054
2055 /* work out how much space we left for a bitmap */
2056 if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
2057 bitmap_super_t *bms = (bitmap_super_t *)
2058 (((char *)sb) + MAX_SB_SIZE);
2059 bm_space = calc_bitmap_size(bms, 4096) >> 9;
2060 bm_offset = (long)__le32_to_cpu(sb->bitmap_offset);
2061 } else if (md_feature_any_ppl_on(sb->feature_map)) {
2062 bm_space = MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9;
2063 if (st->minor_version == 0)
2064 bm_offset = -bm_space - 8;
2065 else
2066 bm_offset = 8;
2067 sb->ppl.offset = __cpu_to_le16(bm_offset);
2068 sb->ppl.size = __cpu_to_le16(bm_space);
2069 } else {
2070 bm_space = choose_bm_space(array_size);
2071 bm_offset = 8;
2072 }
2073
2074 data_offset = di->data_offset;
2075 if (data_offset == INVALID_SECTORS)
2076 data_offset = st->data_offset;
2077 switch(st->minor_version) {
2078 case 0:
2079 /* Add 8 sectors for bad block log */
2080 bm_space += 8;
2081 if (data_offset == INVALID_SECTORS)
2082 data_offset = 0;
2083 sb_offset = dsize;
2084 sb_offset -= 8*2;
2085 sb_offset &= ~(4*2-1);
2086 sb->data_offset = __cpu_to_le64(data_offset);
2087 sb->super_offset = __cpu_to_le64(sb_offset);
2088 if (sb_offset < array_size + bm_space)
2089 bm_space = sb_offset - array_size;
2090 sb->data_size = __cpu_to_le64(sb_offset - bm_space);
2091 if (bm_space >= 8) {
2092 sb->bblog_size = __cpu_to_le16(8);
2093 sb->bblog_offset = __cpu_to_le32((unsigned)-8);
2094 }
2095 break;
2096 case 1:
2097 case 2:
2098 sb_offset = st->minor_version == 2 ? 8 : 0;
2099 sb->super_offset = __cpu_to_le64(sb_offset);
2100 if (data_offset == INVALID_SECTORS)
2101 data_offset = sb_offset + 16;
2102
2103 sb->data_offset = __cpu_to_le64(data_offset);
2104 sb->data_size = __cpu_to_le64(dsize - data_offset);
2105 if (data_offset >= sb_offset+bm_offset+bm_space+8) {
2106 sb->bblog_size = __cpu_to_le16(8);
2107 sb->bblog_offset = __cpu_to_le32(bm_offset +
2108 bm_space);
2109 } else if (data_offset >= sb_offset + 16) {
2110 sb->bblog_size = __cpu_to_le16(8);
2111 /* '8' sectors for the bblog, and 'sb_offset'
2112 * because we want offset from superblock, not
2113 * start of device.
2114 */
2115 sb->bblog_offset = __cpu_to_le32(data_offset -
2116 8 - sb_offset);
2117 }
2118 break;
2119 default:
2120 pr_err("Failed to write invalid metadata format 1.%i to %s\n",
2121 st->minor_version, di->devname);
2122 rv = -EINVAL;
2123 goto out;
2124 }
2125 /*
2126 * Disable badblock log on clusters, or when
2127 * explicitly requested
2128 */
2129 if (st->nodes > 0 || conf_get_create_info()->bblist == 0) {
2130 sb->bblog_size = 0;
2131 sb->bblog_offset = 0;
2132 }
2133
2134 /* RAID0 needs a layout if devices aren't all the same size */
2135 if (raid0_need_layout)
2136 sb->feature_map |= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
2137
2138 sb->sb_csum = calc_sb_1_csum(sb);
2139 rv = store_super1(st, di->fd);
2140
2141 if (rv == 0 && (di->disk.state & (1 << MD_DISK_JOURNAL))) {
2142 rv = write_empty_r5l_meta_block(st, di->fd);
2143 if (rv)
2144 goto error_out;
2145 }
2146
2147 if (rv == 0 &&
2148 (__le32_to_cpu(sb->feature_map) &
2149 MD_FEATURE_BITMAP_OFFSET)) {
2150 rv = st->ss->write_bitmap(st, di->fd, NodeNumUpdate);
2151 } else if (rv == 0 &&
2152 md_feature_any_ppl_on(sb->feature_map)) {
2153 struct mdinfo info;
2154
2155 st->ss->getinfo_super(st, &info, NULL);
2156 rv = st->ss->write_init_ppl(st, &info, di->fd);
2157 }
2158
2159 close(di->fd);
2160 di->fd = -1;
2161 if (rv)
2162 goto error_out;
2163 }
2164 error_out:
2165 if (rv)
2166 pr_err("Failed to write metadata to %s\n", di->devname);
2167 out:
2168 return rv;
2169 }
2170
2171 static int compare_super1(struct supertype *st, struct supertype *tst,
2172 int verbose)
2173 {
2174 /*
2175 * return:
2176 * 0 same, or first was empty, and second was copied
2177 * 1 second had wrong number
2178 * 2 wrong uuid
2179 * 3 wrong other info
2180 */
2181 struct mdp_superblock_1 *first = st->sb;
2182 struct mdp_superblock_1 *second = tst->sb;
2183
2184 if (second->magic != __cpu_to_le32(MD_SB_MAGIC))
2185 return 1;
2186 if (second->major_version != __cpu_to_le32(1))
2187 return 1;
2188
2189 if (!first) {
2190 if (posix_memalign((void **)&first, 4096, SUPER1_SIZE) != 0) {
2191 pr_err("could not allocate superblock\n");
2192 return 1;
2193 }
2194 memcpy(first, second, SUPER1_SIZE);
2195 st->sb = first;
2196 return 0;
2197 }
2198 if (memcmp(first->set_uuid, second->set_uuid, 16)!= 0)
2199 return 2;
2200
2201 if (first->ctime != second->ctime ||
2202 first->level != second->level ||
2203 first->layout != second->layout ||
2204 first->size != second->size ||
2205 first->chunksize != second->chunksize ||
2206 first->raid_disks != second->raid_disks)
2207 return 3;
2208 return 0;
2209 }
2210
2211 static int load_super1(struct supertype *st, int fd, char *devname)
2212 {
2213 unsigned long long dsize;
2214 unsigned long long sb_offset;
2215 struct mdp_superblock_1 *super;
2216 int uuid[4];
2217 struct bitmap_super_s *bsb;
2218 struct misc_dev_info *misc;
2219 struct align_fd afd;
2220
2221 free_super1(st);
2222
2223 init_afd(&afd, fd);
2224
2225 if (st->ss == NULL || st->minor_version == -1) {
2226 int bestvers = -1;
2227 struct supertype tst;
2228 __u64 bestctime = 0;
2229 /* guess... choose latest ctime */
2230 memset(&tst, 0, sizeof(tst));
2231 tst.ss = &super1;
2232 for (tst.minor_version = 0; tst.minor_version <= 2;
2233 tst.minor_version++) {
2234 tst.ignore_hw_compat = st->ignore_hw_compat;
2235 switch(load_super1(&tst, fd, devname)) {
2236 case 0: super = tst.sb;
2237 if (bestvers == -1 ||
2238 bestctime < __le64_to_cpu(super->ctime)) {
2239 bestvers = tst.minor_version;
2240 bestctime = __le64_to_cpu(super->ctime);
2241 }
2242 free(super);
2243 tst.sb = NULL;
2244 break;
2245 case 1: return 1; /*bad device */
2246 case 2: break; /* bad, try next */
2247 }
2248 }
2249 if (bestvers != -1) {
2250 int rv;
2251 tst.minor_version = bestvers;
2252 tst.ss = &super1;
2253 tst.max_devs = MAX_DEVS;
2254 rv = load_super1(&tst, fd, devname);
2255 if (rv == 0)
2256 *st = tst;
2257 return rv;
2258 }
2259 return 2;
2260 }
2261 if (!get_dev_size(fd, devname, &dsize))
2262 return 1;
2263 dsize >>= 9;
2264
2265 if (dsize < 24) {
2266 if (devname)
2267 pr_err("%s is too small for md: size is %llu sectors.\n",
2268 devname, dsize);
2269 return 1;
2270 }
2271
2272 /*
2273 * Calculate the position of the superblock.
2274 * It is always aligned to a 4K boundary and
2275 * depending on minor_version, it can be:
2276 * 0: At least 8K, but less than 12K, from end of device
2277 * 1: At start of device
2278 * 2: 4K from start of device.
2279 */
2280 switch(st->minor_version) {
2281 case 0:
2282 sb_offset = dsize;
2283 sb_offset -= 8*2;
2284 sb_offset &= ~(4*2-1);
2285 break;
2286 case 1:
2287 sb_offset = 0;
2288 break;
2289 case 2:
2290 sb_offset = 4*2;
2291 break;
2292 default:
2293 return -EINVAL;
2294 }
2295
2296 if (lseek64(fd, sb_offset << 9, 0)< 0LL) {
2297 if (devname)
2298 pr_err("Cannot seek to superblock on %s: %s\n",
2299 devname, strerror(errno));
2300 return 1;
2301 }
2302
2303 if (posix_memalign((void **)&super, 4096, SUPER1_SIZE) != 0) {
2304 pr_err("could not allocate superblock\n");
2305 return 1;
2306 }
2307
2308 memset(super, 0, SUPER1_SIZE);
2309
2310 if (aread(&afd, super, MAX_SB_SIZE) != MAX_SB_SIZE) {
2311 if (devname)
2312 pr_err("Cannot read superblock on %s\n",
2313 devname);
2314 free(super);
2315 return 1;
2316 }
2317
2318 if (__le32_to_cpu(super->magic) != MD_SB_MAGIC) {
2319 if (devname)
2320 pr_err("No super block found on %s (Expected magic %08x, got %08x)\n",
2321 devname, MD_SB_MAGIC,
2322 __le32_to_cpu(super->magic));
2323 free(super);
2324 return 2;
2325 }
2326
2327 if (__le32_to_cpu(super->major_version) != 1) {
2328 if (devname)
2329 pr_err("Cannot interpret superblock on %s - version is %d\n",
2330 devname, __le32_to_cpu(super->major_version));
2331 free(super);
2332 return 2;
2333 }
2334 if (__le64_to_cpu(super->super_offset) != sb_offset) {
2335 if (devname)
2336 pr_err("No superblock found on %s (super_offset is wrong)\n",
2337 devname);
2338 free(super);
2339 return 2;
2340 }
2341
2342 bsb = (struct bitmap_super_s *)(((char *)super) + MAX_SB_SIZE);
2343
2344 misc = (struct misc_dev_info*)
2345 (((char *)super) + MAX_SB_SIZE+BM_SUPER_SIZE);
2346 misc->device_size = dsize;
2347 if (st->data_offset == INVALID_SECTORS)
2348 st->data_offset = __le64_to_cpu(super->data_offset);
2349
2350 if (st->minor_version >= 1 &&
2351 st->ignore_hw_compat == 0 &&
2352 ((role_from_sb(super) != MD_DISK_ROLE_JOURNAL &&
2353 dsize < (__le64_to_cpu(super->data_offset) +
2354 __le64_to_cpu(super->size))) ||
2355 dsize < (__le64_to_cpu(super->data_offset) +
2356 __le64_to_cpu(super->data_size)))) {
2357 if (devname)
2358 pr_err("Device %s is not large enough for data described in superblock\n",
2359 devname);
2360 free(super);
2361 return 2;
2362 }
2363 st->sb = super;
2364
2365 /* Now check on the bitmap superblock */
2366 if ((__le32_to_cpu(super->feature_map)&MD_FEATURE_BITMAP_OFFSET) == 0)
2367 return 0;
2368 /* Read the bitmap superblock and make sure it looks
2369 * valid. If it doesn't clear the bit. An --assemble --force
2370 * should get that written out.
2371 */
2372 locate_bitmap1(st, fd, 0);
2373 if (aread(&afd, bsb, 512) != 512)
2374 goto no_bitmap;
2375
2376 uuid_from_super1(st, uuid);
2377 if (__le32_to_cpu(bsb->magic) != BITMAP_MAGIC ||
2378 memcmp(bsb->uuid, uuid, 16) != 0)
2379 goto no_bitmap;
2380 return 0;
2381
2382 no_bitmap:
2383 super->feature_map = __cpu_to_le32(__le32_to_cpu(super->feature_map) &
2384 ~MD_FEATURE_BITMAP_OFFSET);
2385 return 0;
2386 }
2387
2388 static struct supertype *match_metadata_desc1(char *arg)
2389 {
2390 struct supertype *st = xcalloc(1, sizeof(*st));
2391
2392 st->container_devnm[0] = 0;
2393 st->ss = &super1;
2394 st->max_devs = MAX_DEVS;
2395 st->sb = NULL;
2396 st->data_offset = INVALID_SECTORS;
2397 /* leading zeros can be safely ignored. --detail generates them. */
2398 while (*arg == '0')
2399 arg++;
2400 if (strcmp(arg, "1.0") == 0 || strcmp(arg, "1.00") == 0) {
2401 st->minor_version = 0;
2402 return st;
2403 }
2404 if (strcmp(arg, "1.1") == 0 || strcmp(arg, "1.01") == 0
2405 ) {
2406 st->minor_version = 1;
2407 return st;
2408 }
2409 if (strcmp(arg, "1.2") == 0 ||
2410 #ifndef DEFAULT_OLD_METADATA /* ifdef in super0.c */
2411 strcmp(arg, "default") == 0 ||
2412 #endif /* DEFAULT_OLD_METADATA */
2413 strcmp(arg, "1.02") == 0) {
2414 st->minor_version = 2;
2415 return st;
2416 }
2417 if (strcmp(arg, "1") == 0 || strcmp(arg, "default") == 0) {
2418 st->minor_version = -1;
2419 return st;
2420 }
2421
2422 free(st);
2423 return NULL;
2424 }
2425
2426 /* find available size on device with this devsize, using
2427 * superblock type st, and reserving 'reserve' sectors for
2428 * a possible bitmap
2429 */
2430 static __u64 avail_size1(struct supertype *st, __u64 devsize,
2431 unsigned long long data_offset)
2432 {
2433 struct mdp_superblock_1 *super = st->sb;
2434 int bmspace = 0;
2435 int bbspace = 0;
2436 if (devsize < 24)
2437 return 0;
2438
2439 if (__le32_to_cpu(super->feature_map) & MD_FEATURE_BITMAP_OFFSET) {
2440 /* hot-add. allow for actual size of bitmap */
2441 struct bitmap_super_s *bsb;
2442 bsb = (struct bitmap_super_s *)(((char *)super) + MAX_SB_SIZE);
2443 bmspace = calc_bitmap_size(bsb, 4096) >> 9;
2444 } else if (md_feature_any_ppl_on(super->feature_map)) {
2445 bmspace = __le16_to_cpu(super->ppl.size);
2446 }
2447
2448 /* Allow space for bad block log */
2449 if (super->bblog_size)
2450 bbspace = __le16_to_cpu(super->bblog_size);
2451
2452 if (st->minor_version < 0)
2453 /* not specified, so time to set default */
2454 st->minor_version = 2;
2455
2456 if (data_offset == INVALID_SECTORS)
2457 data_offset = st->data_offset;
2458
2459 if (data_offset != INVALID_SECTORS)
2460 switch(st->minor_version) {
2461 case 0:
2462 return devsize - data_offset - 8*2 - bbspace;
2463 case 1:
2464 case 2:
2465 return devsize - data_offset;
2466 default:
2467 return 0;
2468 }
2469
2470 devsize -= bmspace;
2471
2472 switch(st->minor_version) {
2473 case 0:
2474 /* at end */
2475 return ((devsize - 8*2 - bbspace ) & ~(4*2-1));
2476 case 1:
2477 /* at start, 4K for superblock and possible bitmap */
2478 return devsize - 4*2 - bbspace;
2479 case 2:
2480 /* 4k from start, 4K for superblock and possible bitmap */
2481 return devsize - (4+4)*2 - bbspace;
2482 }
2483 return 0;
2484 }
2485
2486 static int
2487 add_internal_bitmap1(struct supertype *st,
2488 int *chunkp, int delay, int write_behind,
2489 unsigned long long size,
2490 int may_change, int major)
2491 {
2492 /*
2493 * If not may_change, then this is a 'Grow' without sysfs support for
2494 * bitmaps, and the bitmap must fit after the superblock at 1K offset.
2495 * If may_change, then this is create or a Grow with sysfs support,
2496 * and we can put the bitmap wherever we like.
2497 *
2498 * size is in sectors, chunk is in bytes !!!
2499 */
2500
2501 unsigned long long bits;
2502 unsigned long long max_bits;
2503 unsigned long long min_chunk;
2504 long offset;
2505 long bbl_offset, bbl_size;
2506 unsigned long long chunk = *chunkp;
2507 int room = 0;
2508 int creating = 0;
2509 int len;
2510 struct mdp_superblock_1 *sb = st->sb;
2511 bitmap_super_t *bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
2512 int uuid[4];
2513
2514 if (__le64_to_cpu(sb->data_size) == 0)
2515 /*
2516 * Must be creating the array, else data_size
2517 * would be non-zero
2518 */
2519 creating = 1;
2520 switch(st->minor_version) {
2521 case 0:
2522 /*
2523 * either 3K after the superblock (when hot-add),
2524 * or some amount of space before.
2525 */
2526 if (creating) {
2527 /*
2528 * We are creating array, so we *know* how much room has
2529 * been left.
2530 */
2531 offset = 0;
2532 bbl_size = 8;
2533 room =
2534 choose_bm_space(__le64_to_cpu(sb->size)) + bbl_size;
2535 } else {
2536 room = __le64_to_cpu(sb->super_offset)
2537 - __le64_to_cpu(sb->data_offset)
2538 - __le64_to_cpu(sb->data_size);
2539 bbl_size = __le16_to_cpu(sb->bblog_size);
2540 if (bbl_size < 8)
2541 bbl_size = 8;
2542 bbl_offset = (__s32)__le32_to_cpu(sb->bblog_offset);
2543 if (bbl_size < -bbl_offset)
2544 bbl_size = -bbl_offset;
2545
2546 if (!may_change ||
2547 (room < 3*2 && __le32_to_cpu(sb->max_dev) <= 384)) {
2548 room = 3*2;
2549 offset = 1*2;
2550 bbl_size = 0;
2551 } else {
2552 offset = 0; /* means movable offset */
2553 }
2554 }
2555 break;
2556 case 1:
2557 case 2: /* between superblock and data */
2558 if (creating) {
2559 offset = 4*2;
2560 bbl_size = 8;
2561 room =
2562 choose_bm_space(__le64_to_cpu(sb->size)) + bbl_size;
2563 } else {
2564 room = __le64_to_cpu(sb->data_offset)
2565 - __le64_to_cpu(sb->super_offset);
2566 bbl_size = __le16_to_cpu(sb->bblog_size);
2567 if (bbl_size)
2568 room =
2569 __le32_to_cpu(sb->bblog_offset) + bbl_size;
2570 else
2571 bbl_size = 8;
2572
2573 if (!may_change) {
2574 room -= 2; /* Leave 1K for superblock */
2575 offset = 2;
2576 bbl_size = 0;
2577 } else {
2578 room -= 4*2; /* leave 4K for superblock */
2579 offset = 4*2;
2580 }
2581 }
2582 break;
2583 default:
2584 return -ENOSPC;
2585 }
2586
2587 room -= bbl_size;
2588 if (chunk == UnSet && room > 128*2)
2589 /* Limit to 128K of bitmap when chunk size not requested */
2590 room = 128*2;
2591
2592 if (room <= 1)
2593 /* No room for a bitmap */
2594 return -ENOSPC;
2595
2596 max_bits = (room * 512 - sizeof(bitmap_super_t)) * 8;
2597
2598 min_chunk = 4096; /* sub-page chunks don't work yet.. */
2599 bits = (size * 512) / min_chunk + 1;
2600 while (bits > max_bits) {
2601 min_chunk *= 2;
2602 bits = (bits + 1) / 2;
2603 }
2604 if (chunk == UnSet) {
2605 /* For practical purpose, 64Meg is a good
2606 * default chunk size for internal bitmaps.
2607 */
2608 chunk = min_chunk;
2609 if (chunk < 64*1024*1024)
2610 chunk = 64*1024*1024;
2611 } else if (chunk < min_chunk)
2612 return -EINVAL; /* chunk size too small */
2613 if (chunk == 0) /* rounding problem */
2614 return -EINVAL;
2615
2616 if (offset == 0) {
2617 /* start bitmap on a 4K boundary with enough space for
2618 * the bitmap
2619 */
2620 bits = (size * 512) / chunk + 1;
2621 room = ((bits + 7) / 8 + sizeof(bitmap_super_t) + 4095) / 4096;
2622 room *= 8; /* convert 4K blocks to sectors */
2623 offset = -room - bbl_size;
2624 }
2625
2626 sb->bitmap_offset = (int32_t)__cpu_to_le32(offset);
2627
2628 sb->feature_map = __cpu_to_le32(__le32_to_cpu(sb->feature_map) |
2629 MD_FEATURE_BITMAP_OFFSET);
2630 memset(bms, 0, sizeof(*bms));
2631 bms->magic = __cpu_to_le32(BITMAP_MAGIC);
2632 bms->version = __cpu_to_le32(major);
2633 uuid_from_super1(st, uuid);
2634 memcpy(bms->uuid, uuid, 16);
2635 bms->chunksize = __cpu_to_le32(chunk);
2636 bms->daemon_sleep = __cpu_to_le32(delay);
2637 bms->sync_size = __cpu_to_le64(size);
2638 bms->write_behind = __cpu_to_le32(write_behind);
2639 bms->nodes = __cpu_to_le32(st->nodes);
2640 if (st->nodes)
2641 sb->feature_map = __cpu_to_le32(__le32_to_cpu(sb->feature_map) |
2642 MD_FEATURE_BITMAP_VERSIONED);
2643 if (st->cluster_name) {
2644 len = sizeof(bms->cluster_name);
2645 strncpy((char *)bms->cluster_name, st->cluster_name, len);
2646 bms->cluster_name[len - 1] = '\0';
2647 }
2648
2649 *chunkp = chunk;
2650 return 0;
2651 }
2652
2653 static int locate_bitmap1(struct supertype *st, int fd, int node_num)
2654 {
2655 unsigned long long offset, bm_sectors_per_node;
2656 struct mdp_superblock_1 *sb;
2657 bitmap_super_t *bms;
2658 int mustfree = 0;
2659 int ret;
2660
2661 if (!st->sb) {
2662 if (st->ss->load_super(st, fd, NULL))
2663 return -1; /* no error I hope... */
2664 mustfree = 1;
2665 }
2666 sb = st->sb;
2667
2668 if ((__le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET))
2669 ret = 0;
2670 else
2671 ret = -1;
2672
2673 offset = __le64_to_cpu(sb->super_offset) + (int32_t)__le32_to_cpu(sb->bitmap_offset);
2674 if (node_num) {
2675 bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
2676 bm_sectors_per_node = calc_bitmap_size(bms, 4096) >> 9;
2677 offset += bm_sectors_per_node * node_num;
2678 }
2679 if (mustfree)
2680 free(sb);
2681 lseek64(fd, offset<<9, 0);
2682 return ret;
2683 }
2684
2685 static int write_bitmap1(struct supertype *st, int fd, enum bitmap_update update)
2686 {
2687 struct mdp_superblock_1 *sb = st->sb;
2688 bitmap_super_t *bms = (bitmap_super_t *)(((char *)sb) + MAX_SB_SIZE);
2689 int rv = 0;
2690 void *buf;
2691 int towrite, n, len;
2692 struct align_fd afd;
2693 unsigned int i = 0;
2694 unsigned long long total_bm_space, bm_space_per_node;
2695
2696 switch (update) {
2697 case NameUpdate:
2698 /* update cluster name */
2699 if (st->cluster_name) {
2700 len = sizeof(bms->cluster_name);
2701 memset((char *)bms->cluster_name, 0, len);
2702 strncpy((char *)bms->cluster_name,
2703 st->cluster_name, len);
2704 bms->cluster_name[len - 1] = '\0';
2705 }
2706 break;
2707 case NodeNumUpdate:
2708 /* cluster md only supports superblock 1.2 now */
2709 if (st->minor_version != 2 &&
2710 bms->version == BITMAP_MAJOR_CLUSTERED) {
2711 pr_err("Warning: cluster md only works with superblock 1.2\n");
2712 return -EINVAL;
2713 }
2714
2715 if (bms->version == BITMAP_MAJOR_CLUSTERED) {
2716 if (st->nodes == 1) {
2717 /* the parameter for nodes is not valid */
2718 pr_err("Warning: cluster-md at least needs two nodes\n");
2719 return -EINVAL;
2720 } else if (st->nodes == 0) {
2721 /*
2722 * parameter "--nodes" is not specified, (eg, add a disk to
2723 * clustered raid)
2724 */
2725 break;
2726 } else if (__cpu_to_le32(st->nodes) < bms->nodes) {
2727 /*
2728 * Since the nodes num is not increased, no
2729 * need to check the space enough or not,
2730 * just update bms->nodes
2731 */
2732 bms->nodes = __cpu_to_le32(st->nodes);
2733 break;
2734 }
2735 } else {
2736 /*
2737 * no need to change bms->nodes for other
2738 * bitmap types
2739 */
2740 if (st->nodes)
2741 pr_err("Warning: --nodes option is only suitable for clustered bitmap\n");
2742 break;
2743 }
2744
2745 /*
2746 * Each node has an independent bitmap, it is necessary to
2747 * calculate the space is enough or not, first get how many
2748 * bytes for the total bitmap
2749 */
2750 bm_space_per_node = calc_bitmap_size(bms, 4096);
2751
2752 total_bm_space = 512 * (__le64_to_cpu(sb->data_offset) -
2753 __le64_to_cpu(sb->super_offset));
2754 /* leave another 4k for superblock */
2755 total_bm_space = total_bm_space - 4096;
2756
2757 if (bm_space_per_node * st->nodes > total_bm_space) {
2758 pr_err("Warning: The max num of nodes can't exceed %llu\n",
2759 total_bm_space / bm_space_per_node);
2760 return -ENOMEM;
2761 }
2762
2763 bms->nodes = __cpu_to_le32(st->nodes);
2764 break;
2765 case NoUpdate:
2766 default:
2767 break;
2768 }
2769
2770 init_afd(&afd, fd);
2771
2772 if (locate_bitmap1(st, fd, 0) < 0) {
2773 pr_err("Error: Invalid bitmap\n");
2774 return -EINVAL;
2775 }
2776
2777 if (posix_memalign(&buf, 4096, 4096))
2778 return -ENOMEM;
2779
2780 do {
2781 /* Only the bitmap[0] should resync
2782 * whole device on initial assembly
2783 */
2784 if (i)
2785 memset(buf, 0x00, 4096);
2786 else
2787 memset(buf, 0xff, 4096);
2788 memcpy(buf, (char *)bms, sizeof(bitmap_super_t));
2789
2790 /*
2791 * use 4096 boundary if bitmap_offset is aligned
2792 * with 8 sectors, then it should compatible with
2793 * older mdadm.
2794 */
2795 if (__le32_to_cpu(sb->bitmap_offset) & 7)
2796 towrite = calc_bitmap_size(bms, 512);
2797 else
2798 towrite = calc_bitmap_size(bms, 4096);
2799 while (towrite > 0) {
2800 n = towrite;
2801 if (n > 4096)
2802 n = 4096;
2803 n = awrite(&afd, buf, n);
2804 if (n > 0)
2805 towrite -= n;
2806 else
2807 break;
2808 if (i)
2809 memset(buf, 0x00, 4096);
2810 else
2811 memset(buf, 0xff, 4096);
2812 }
2813 fsync(fd);
2814 if (towrite) {
2815 rv = -2;
2816 break;
2817 }
2818 } while (++i < __le32_to_cpu(bms->nodes));
2819
2820 free(buf);
2821 return rv;
2822 }
2823
2824 static void free_super1(struct supertype *st)
2825 {
2826
2827 if (st->sb)
2828 free(st->sb);
2829 while (st->info) {
2830 struct devinfo *di = st->info;
2831 st->info = di->next;
2832 if (di->fd >= 0)
2833 close(di->fd);
2834 free(di);
2835 }
2836 st->sb = NULL;
2837 }
2838
2839 static int validate_geometry1(struct supertype *st, int level,
2840 int layout, int raiddisks,
2841 int *chunk, unsigned long long size,
2842 unsigned long long data_offset,
2843 char *subdev, unsigned long long *freesize,
2844 int consistency_policy, int verbose)
2845 {
2846 unsigned long long ldsize, devsize;
2847 int bmspace;
2848 unsigned long long headroom;
2849 unsigned long long overhead;
2850 int fd;
2851
2852 if (is_container(level)) {
2853 if (verbose)
2854 pr_err("1.x metadata does not support containers\n");
2855 return 0;
2856 }
2857 if (*chunk == UnSet)
2858 *chunk = DEFAULT_CHUNK;
2859
2860 if (!subdev)
2861 return 1;
2862
2863 if (st->minor_version < 0)
2864 /* not specified, so time to set default */
2865 st->minor_version = 2;
2866
2867 fd = open(subdev, O_RDONLY|O_EXCL, 0);
2868 if (fd < 0) {
2869 if (verbose)
2870 pr_err("super1.x cannot open %s: %s\n",
2871 subdev, strerror(errno));
2872 return 0;
2873 }
2874
2875 if (!get_dev_size(fd, subdev, &ldsize)) {
2876 close(fd);
2877 return 0;
2878 }
2879 close(fd);
2880
2881 devsize = ldsize >> 9;
2882
2883 /* creating: allow suitable space for bitmap or PPL */
2884 if (consistency_policy == CONSISTENCY_POLICY_PPL)
2885 bmspace = MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9;
2886 else
2887 bmspace = choose_bm_space(devsize);
2888
2889 if (data_offset == INVALID_SECTORS)
2890 data_offset = st->data_offset;
2891 if (data_offset == INVALID_SECTORS)
2892 switch (st->minor_version) {
2893 case 0:
2894 data_offset = 0;
2895 break;
2896 case 1:
2897 case 2:
2898 /* Choose data offset appropriate for this device
2899 * and use as default for whole array.
2900 * The data_offset must allow for bitmap space
2901 * and base metadata, should allow for some headroom
2902 * for reshape, and should be rounded to multiple
2903 * of 1M.
2904 * Headroom is limited to 128M, but aim for about 0.1%
2905 */
2906 headroom = 128*1024*2;
2907 while ((headroom << 10) > devsize &&
2908 (*chunk == 0 ||
2909 headroom / 2 >= ((unsigned)(*chunk)*2)*2))
2910 headroom >>= 1;
2911 data_offset = 12*2 + bmspace + headroom;
2912 #define ONE_MEG (2*1024)
2913 data_offset = ROUND_UP(data_offset, ONE_MEG);
2914 break;
2915 }
2916 if (st->data_offset == INVALID_SECTORS)
2917 st->data_offset = data_offset;
2918 switch(st->minor_version) {
2919 case 0: /* metadata at end. Round down and subtract space to reserve */
2920 devsize = (devsize & ~(4ULL*2-1));
2921 /* space for metadata, bblog, bitmap/ppl */
2922 overhead = 8*2 + 8 + bmspace;
2923 if (devsize < overhead) /* detect underflow */
2924 goto dev_too_small_err;
2925 devsize -= overhead;
2926 break;
2927 case 1:
2928 case 2:
2929 if (devsize < data_offset) /* detect underflow */
2930 goto dev_too_small_err;
2931 devsize -= data_offset;
2932 break;
2933 }
2934 *freesize = devsize;
2935 return 1;
2936
2937 /* Error condition, device cannot even hold the overhead. */
2938 dev_too_small_err:
2939 fprintf(stderr, "device %s is too small (%lluK) for "
2940 "required metadata!\n", subdev, devsize>>1);
2941 *freesize = 0;
2942 return 0;
2943 }
2944
2945 void *super1_make_v0(struct supertype *st, struct mdinfo *info, mdp_super_t *sb0)
2946 {
2947 /* Create a v1.0 superblock based on 'info'*/
2948 void *ret;
2949 struct mdp_superblock_1 *sb;
2950 int i;
2951 unsigned long long offset;
2952
2953 if (posix_memalign(&ret, 4096, 1024) != 0)
2954 return NULL;
2955 sb = ret;
2956 memset(ret, 0, 1024);
2957 sb->magic = __cpu_to_le32(MD_SB_MAGIC);
2958 sb->major_version = __cpu_to_le32(1);
2959
2960 copy_uuid(sb->set_uuid, info->uuid, super1.swapuuid);
2961 sprintf(sb->set_name, "%d", sb0->md_minor);
2962 sb->ctime = __cpu_to_le32(info->array.ctime + 1);
2963 sb->level = __cpu_to_le32(info->array.level);
2964 sb->layout = __cpu_to_le32(info->array.layout);
2965 sb->size = __cpu_to_le64(info->component_size);
2966 sb->chunksize = __cpu_to_le32(info->array.chunk_size / 512);
2967 sb->raid_disks = __cpu_to_le32(info->array.raid_disks);
2968 if (info->array.level > 0)
2969 sb->data_size = sb->size;
2970 else
2971 sb->data_size = st->ss->avail_size(st, st->devsize / 512, 0);
2972 sb->resync_offset = MaxSector;
2973 sb->max_dev = __cpu_to_le32(MD_SB_DISKS);
2974 sb->dev_number = __cpu_to_le32(info->disk.number);
2975 sb->utime = __cpu_to_le64(info->array.utime);
2976
2977 offset = st->devsize/512 - 8*2;
2978 offset &= ~(4*2-1);
2979 sb->super_offset = __cpu_to_le64(offset);
2980 //*(__u64*)(st->other + 128 + 8 + 8) = __cpu_to_le64(offset);
2981
2982 random_uuid(sb->device_uuid);
2983
2984 for (i = 0; i < MD_SB_DISKS; i++) {
2985 int state = sb0->disks[i].state;
2986 sb->dev_roles[i] = MD_DISK_ROLE_SPARE;
2987 if ((state & (1<<MD_DISK_SYNC)) &&
2988 !(state & (1<<MD_DISK_FAULTY)))
2989 sb->dev_roles[i] = __cpu_to_le16(sb0->disks[i].raid_disk);
2990 }
2991 sb->sb_csum = calc_sb_1_csum(sb);
2992 return ret;
2993 }
2994
2995 struct superswitch super1 = {
2996 .examine_super = examine_super1,
2997 .brief_examine_super = brief_examine_super1,
2998 .export_examine_super = export_examine_super1,
2999 .detail_super = detail_super1,
3000 .brief_detail_super = brief_detail_super1,
3001 .export_detail_super = export_detail_super1,
3002 .write_init_super = write_init_super1,
3003 .validate_geometry = validate_geometry1,
3004 .add_to_super = add_to_super1,
3005 .examine_badblocks = examine_badblocks_super1,
3006 .copy_metadata = copy_metadata1,
3007 .write_init_ppl = write_init_ppl1,
3008 .match_home = match_home1,
3009 .uuid_from_super = uuid_from_super1,
3010 .getinfo_super = getinfo_super1,
3011 .container_content = container_content1,
3012 .update_super = update_super1,
3013 .init_super = init_super1,
3014 .store_super = store_super1,
3015 .compare_super = compare_super1,
3016 .load_super = load_super1,
3017 .match_metadata_desc = match_metadata_desc1,
3018 .avail_size = avail_size1,
3019 .add_internal_bitmap = add_internal_bitmap1,
3020 .locate_bitmap = locate_bitmap1,
3021 .write_bitmap = write_bitmap1,
3022 .free_super = free_super1,
3023 #if __BYTE_ORDER == BIG_ENDIAN
3024 .swapuuid = 0,
3025 #else
3026 .swapuuid = 1,
3027 #endif
3028 .name = "1.x",
3029 };