2 * mdadm - manage Linux "md" devices aka RAID arrays.
4 * Copyright (C) 2001-2016 Neil Brown <neilb@suse.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Email: <neilb@suse.de>
29 #define MAX_SB_SIZE 4096
30 /* bitmap super size is 256, but we round up to a sector for alignment */
31 #define BM_SUPER_SIZE 512
32 #define MAX_DEVS ((int)(MAX_SB_SIZE - sizeof(struct mdp_superblock_1)) / 2)
33 #define SUPER1_SIZE (MAX_SB_SIZE + BM_SUPER_SIZE \
34 + sizeof(struct misc_dev_info))
36 struct misc_dev_info
{
40 #define MULTIPLE_PPL_AREA_SIZE_SUPER1 (1024 * 1024) /* Size of the whole
44 static int role_from_sb(struct mdp_superblock_1
*sb
)
49 d
= __le32_to_cpu(sb
->dev_number
);
50 if (d
< __le32_to_cpu(sb
->max_dev
))
51 role
= __le16_to_cpu(sb
->dev_roles
[d
]);
53 role
= MD_DISK_ROLE_SPARE
;
57 /* return how many bytes are needed for bitmap, for cluster-md each node
58 * should have it's own bitmap */
59 static unsigned int calc_bitmap_size(bitmap_super_t
*bms
, unsigned int boundary
)
61 unsigned long long bits
, bytes
;
63 bits
= bitmap_bits(__le64_to_cpu(bms
->sync_size
),
64 __le32_to_cpu(bms
->chunksize
));
65 bytes
= (bits
+7) >> 3;
66 bytes
+= sizeof(bitmap_super_t
);
67 bytes
= ROUND_UP(bytes
, boundary
);
72 static unsigned int calc_sb_1_csum(struct mdp_superblock_1
* sb
)
74 unsigned int disk_csum
, csum
;
75 unsigned long long newcsum
;
76 int size
= sizeof(*sb
) + __le32_to_cpu(sb
->max_dev
)*2;
77 unsigned int *isuper
= (unsigned int *)sb
;
79 /* make sure I can count... */
80 if (offsetof(struct mdp_superblock_1
,data_offset
) != 128 ||
81 offsetof(struct mdp_superblock_1
, utime
) != 192 ||
82 sizeof(struct mdp_superblock_1
) != 256) {
83 fprintf(stderr
, "WARNING - superblock isn't sized correctly\n");
86 disk_csum
= sb
->sb_csum
;
89 for (; size
>= 4; size
-= 4) {
90 newcsum
+= __le32_to_cpu(*isuper
);
95 newcsum
+= __le16_to_cpu(*(unsigned short*) isuper
);
97 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
98 sb
->sb_csum
= disk_csum
;
99 return __cpu_to_le32(csum
);
103 * Information related to file descriptor used for aligned reads/writes.
104 * Cache the block size.
111 static void init_afd(struct align_fd
*afd
, int fd
)
114 if (!get_dev_sector_size(afd
->fd
, NULL
, (unsigned int *)&afd
->blk_sz
))
118 static char abuf
[4096+4096];
120 static int aread(struct align_fd
*afd
, void *buf
, int len
)
123 * On devices with a 4K sector size, we need to read
124 * the full sector and copy relevant bits into
133 if (!bsize
|| bsize
> 4096 || len
> 4096) {
135 fprintf(stderr
, "WARNING - aread() called with invalid block size\n");
138 b
= ROUND_UP_PTR((char *)abuf
, 4096);
140 for (iosize
= 0; iosize
< len
; iosize
+= bsize
)
142 n
= read(afd
->fd
, b
, iosize
);
145 if (lseek(afd
->fd
, len
- n
, 1) < 0) {
146 pr_err("lseek fails\n");
155 static int awrite(struct align_fd
*afd
, void *buf
, int len
)
158 * On devices with a 4K sector size, we need to write
159 * the full sector. We pre-read if the sector is larger
161 * The address must be sector-aligned.
168 if (!bsize
|| bsize
> 4096 || len
> 4096) {
170 fprintf(stderr
, "WARNING - awrite() called with invalid block size\n");
173 b
= ROUND_UP_PTR((char *)abuf
, 4096);
175 for (iosize
= 0; iosize
< len
; iosize
+= bsize
)
179 n
= read(afd
->fd
, b
, iosize
);
182 if (lseek(afd
->fd
, -n
, 1) < 0) {
183 pr_err("lseek fails\n");
189 n
= write(afd
->fd
, b
, iosize
);
192 if (lseek(afd
->fd
, len
- n
, 1) < 0) {
193 pr_err("lseek fails\n");
199 static inline unsigned int md_feature_any_ppl_on(__u32 feature_map
)
201 return ((__cpu_to_le32(feature_map
) &
202 (MD_FEATURE_PPL
| MD_FEATURE_MULTIPLE_PPLS
)));
205 static inline unsigned int choose_ppl_space(int chunk
)
207 return (PPL_HEADER_SIZE
>> 9) + (chunk
> 128*2 ? chunk
: 128*2);
210 static void examine_super1(struct supertype
*st
, char *homehost
)
212 struct mdp_superblock_1
*sb
= st
->sb
;
213 bitmap_super_t
*bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
220 int l
= homehost
? strlen(homehost
) : 0;
222 unsigned long long sb_offset
;
224 int inconsistent
= 0;
225 unsigned int expected_csum
= 0;
227 expected_csum
= calc_sb_1_csum(sb
);
229 printf(" Magic : %08x\n", __le32_to_cpu(sb
->magic
));
230 printf(" Version : 1");
231 sb_offset
= __le64_to_cpu(sb
->super_offset
);
234 else if (sb_offset
<= 8)
238 printf(" Feature Map : 0x%x\n", __le32_to_cpu(sb
->feature_map
));
239 printf(" Array UUID : ");
240 for (i
= 0; i
< 16; i
++) {
241 if ((i
& 3) == 0 && i
!= 0)
243 printf("%02x", sb
->set_uuid
[i
]);
246 printf(" Name : %.32s", sb
->set_name
);
247 if (l
> 0 && l
< 32 &&
248 sb
->set_name
[l
] == ':' &&
249 strncmp(sb
->set_name
, homehost
, l
) == 0)
250 printf(" (local to host %s)", homehost
);
252 if (bms
->nodes
> 0 &&
253 (__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
))
254 printf(" Cluster Name : %-64s\n", bms
->cluster_name
);
255 atime
= __le64_to_cpu(sb
->ctime
) & 0xFFFFFFFFFFULL
;
256 printf(" Creation Time : %.24s\n", ctime(&atime
));
257 c
=map_num(pers
, __le32_to_cpu(sb
->level
));
258 printf(" Raid Level : %s\n", c
?c
:"-unknown-");
259 printf(" Raid Devices : %d\n", __le32_to_cpu(sb
->raid_disks
));
261 printf(" Avail Dev Size : %llu sectors%s\n",
262 (unsigned long long)__le64_to_cpu(sb
->data_size
),
263 human_size(__le64_to_cpu(sb
->data_size
)<<9));
264 if (__le32_to_cpu(sb
->level
) > 0) {
265 int ddsks
= 0, ddsks_denom
= 1;
266 switch(__le32_to_cpu(sb
->level
)) {
267 case 1: ddsks
=1;break;
269 case 5: ddsks
= __le32_to_cpu(sb
->raid_disks
)-1; break;
270 case 6: ddsks
= __le32_to_cpu(sb
->raid_disks
)-2; break;
272 layout
= __le32_to_cpu(sb
->layout
);
273 ddsks
= __le32_to_cpu(sb
->raid_disks
);
274 ddsks_denom
= (layout
&255) * ((layout
>>8)&255);
277 long long asize
= __le64_to_cpu(sb
->size
);
278 asize
= (asize
<< 9) * ddsks
/ ddsks_denom
;
279 printf(" Array Size : %llu KiB%s\n",
280 asize
>> 10, human_size(asize
));
282 if (sb
->size
!= sb
->data_size
)
283 printf(" Used Dev Size : %llu sectors%s\n",
284 (unsigned long long)__le64_to_cpu(sb
->size
),
285 human_size(__le64_to_cpu(sb
->size
)<<9));
288 printf(" Data Offset : %llu sectors\n",
289 (unsigned long long)__le64_to_cpu(sb
->data_offset
));
290 if (sb
->new_offset
&&
291 (__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_NEW_OFFSET
)) {
292 unsigned long long offset
= __le64_to_cpu(sb
->data_offset
);
293 offset
+= (signed)(int32_t)__le32_to_cpu(sb
->new_offset
);
294 printf(" New Offset : %llu sectors\n", offset
);
296 printf(" Super Offset : %llu sectors\n",
297 (unsigned long long)__le64_to_cpu(sb
->super_offset
));
298 if (__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RECOVERY_OFFSET
)
299 printf("Recovery Offset : %llu sectors\n",
300 (unsigned long long)__le64_to_cpu(sb
->recovery_offset
));
302 st
->ss
->getinfo_super(st
, &info
, NULL
);
303 if (info
.space_after
!= 1 &&
304 !(__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_NEW_OFFSET
)) {
305 printf(" Unused Space : before=%llu sectors, ",
307 if (info
.space_after
< INT64_MAX
)
308 printf("after=%llu sectors\n", info
.space_after
);
310 printf("after=-%llu sectors DEVICE TOO SMALL\n",
311 UINT64_MAX
- info
.space_after
);
313 printf(" State : %s%s\n",
314 (__le64_to_cpu(sb
->resync_offset
) + 1) ? "active":"clean",
315 (info
.space_after
> INT64_MAX
) ? " TRUNCATED DEVICE" : "");
316 printf(" Device UUID : ");
317 for (i
= 0; i
< 16; i
++) {
318 if ((i
& 3)==0 && i
!= 0)
320 printf("%02x", sb
->device_uuid
[i
]);
324 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
)) {
325 printf("Internal Bitmap : %ld sectors from superblock\n",
326 (long)(int32_t)__le32_to_cpu(sb
->bitmap_offset
));
327 } else if (md_feature_any_ppl_on(sb
->feature_map
)) {
328 printf(" PPL : %u sectors at offset %d sectors from superblock\n",
329 __le16_to_cpu(sb
->ppl
.size
),
330 __le16_to_cpu(sb
->ppl
.offset
));
332 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
)) {
333 printf(" Reshape pos'n : %llu%s\n", (unsigned long long)
334 __le64_to_cpu(sb
->reshape_position
)/2,
335 human_size(__le64_to_cpu(sb
->reshape_position
)<<9));
336 if (__le32_to_cpu(sb
->delta_disks
)) {
337 printf(" Delta Devices : %d",
338 __le32_to_cpu(sb
->delta_disks
));
339 printf(" (%d->%d)\n",
340 __le32_to_cpu(sb
->raid_disks
) -
341 __le32_to_cpu(sb
->delta_disks
),
342 __le32_to_cpu(sb
->raid_disks
));
343 if ((int)__le32_to_cpu(sb
->delta_disks
) < 0)
344 delta_extra
= -__le32_to_cpu(sb
->delta_disks
);
346 if (__le32_to_cpu(sb
->new_level
) != __le32_to_cpu(sb
->level
)) {
347 c
= map_num(pers
, __le32_to_cpu(sb
->new_level
));
348 printf(" New Level : %s\n", c
?c
:"-unknown-");
350 if (__le32_to_cpu(sb
->new_layout
) !=
351 __le32_to_cpu(sb
->layout
)) {
352 if (__le32_to_cpu(sb
->level
) == 5) {
353 c
= map_num(r5layout
,
354 __le32_to_cpu(sb
->new_layout
));
355 printf(" New Layout : %s\n", c
?c
:"-unknown-");
357 if (__le32_to_cpu(sb
->level
) == 6) {
358 c
= map_num(r6layout
,
359 __le32_to_cpu(sb
->new_layout
));
360 printf(" New Layout : %s\n", c
?c
:"-unknown-");
362 if (__le32_to_cpu(sb
->level
) == 10) {
363 printf(" New Layout :");
364 print_r10_layout(__le32_to_cpu(sb
->new_layout
));
368 if (__le32_to_cpu(sb
->new_chunk
) !=
369 __le32_to_cpu(sb
->chunksize
))
370 printf(" New Chunksize : %dK\n",
371 __le32_to_cpu(sb
->new_chunk
)/2);
376 if (sb
->devflags
& WriteMostly1
)
377 printf(" write-mostly");
378 if (sb
->devflags
& FailFast1
)
383 atime
= __le64_to_cpu(sb
->utime
) & 0xFFFFFFFFFFULL
;
384 printf(" Update Time : %.24s\n", ctime(&atime
));
386 if (sb
->bblog_size
&& sb
->bblog_offset
) {
387 printf(" Bad Block Log : %d entries available at offset %ld sectors",
388 __le16_to_cpu(sb
->bblog_size
)*512/8,
389 (long)(int32_t)__le32_to_cpu(sb
->bblog_offset
));
390 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BAD_BLOCKS
))
391 printf(" - bad blocks present.");
395 if (expected_csum
== sb
->sb_csum
)
396 printf(" Checksum : %x - correct\n",
397 __le32_to_cpu(sb
->sb_csum
));
399 printf(" Checksum : %x - expected %x\n",
400 __le32_to_cpu(sb
->sb_csum
),
401 __le32_to_cpu(expected_csum
));
402 printf(" Events : %llu\n",
403 (unsigned long long)__le64_to_cpu(sb
->events
));
405 if (__le32_to_cpu(sb
->level
) == 0 &&
406 (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT
))) {
407 c
= map_num(r0layout
, __le32_to_cpu(sb
->layout
));
408 printf(" Layout : %s\n", c
?c
:"-unknown-");
410 if (__le32_to_cpu(sb
->level
) == 5) {
411 c
= map_num(r5layout
, __le32_to_cpu(sb
->layout
));
412 printf(" Layout : %s\n", c
?c
:"-unknown-");
414 if (__le32_to_cpu(sb
->level
) == 6) {
415 c
= map_num(r6layout
, __le32_to_cpu(sb
->layout
));
416 printf(" Layout : %s\n", c
?c
:"-unknown-");
418 if (__le32_to_cpu(sb
->level
) == 10) {
419 int lo
= __le32_to_cpu(sb
->layout
);
421 print_r10_layout(lo
);
424 switch(__le32_to_cpu(sb
->level
)) {
430 printf(" Chunk Size : %dK\n",
431 __le32_to_cpu(sb
->chunksize
)/2);
434 printf(" Rounding : %dK\n",
435 __le32_to_cpu(sb
->chunksize
)/2);
441 printf(" Device Role : ");
442 role
= role_from_sb(sb
);
443 if (role
>= MD_DISK_ROLE_FAULTY
)
445 else if (role
== MD_DISK_ROLE_JOURNAL
)
447 else if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_REPLACEMENT
))
448 printf("Replacement device %d\n", role
);
450 printf("Active device %d\n", role
);
452 printf(" Array State : ");
453 for (d
= 0; d
< __le32_to_cpu(sb
->raid_disks
) + delta_extra
; d
++) {
456 for (i
= 0; i
< __le32_to_cpu(sb
->max_dev
); i
++) {
457 unsigned int role
= __le16_to_cpu(sb
->dev_roles
[i
]);
461 if (cnt
== 2 && __le32_to_cpu(sb
->level
) > 0)
472 printf(" ('A' == active, '.' == missing, 'R' == replacing)");
474 for (d
= 0; d
< __le32_to_cpu(sb
->max_dev
); d
++) {
475 unsigned int r
= __le16_to_cpu(sb
->dev_roles
[d
]);
476 if (r
<= MD_DISK_ROLE_MAX
&&
477 r
> __le32_to_cpu(sb
->raid_disks
) + delta_extra
)
481 printf("WARNING Array state is inconsistent - each number should appear only once\n");
482 for (d
= 0; d
< __le32_to_cpu(sb
->max_dev
); d
++)
483 if (__le16_to_cpu(sb
->dev_roles
[d
]) >=
488 __le16_to_cpu(sb
->dev_roles
[d
]));
493 static void brief_examine_super1(struct supertype
*st
, int verbose
)
495 struct mdp_superblock_1
*sb
= st
->sb
;
497 unsigned long long sb_offset
;
499 char *c
= map_num(pers
, __le32_to_cpu(sb
->level
));
501 nm
= strchr(sb
->set_name
, ':');
504 else if (sb
->set_name
[0])
511 printf(DEV_MD_DIR
"%s", nm
);
515 printf(" level=%s", c
);
516 sb_offset
= __le64_to_cpu(sb
->super_offset
);
518 printf(" metadata=1.1 ");
519 else if (sb_offset
<= 8)
520 printf(" metadata=1.2 ");
522 printf(" metadata=1.0 ");
524 printf("num-devices=%d ", __le32_to_cpu(sb
->raid_disks
));
526 for (i
= 0; i
< 16; i
++) {
527 if ((i
& 3)==0 && i
!= 0)
529 printf("%02x", sb
->set_uuid
[i
]);
534 static void export_examine_super1(struct supertype
*st
)
536 struct mdp_superblock_1
*sb
= st
->sb
;
541 printf("MD_LEVEL=%s\n", map_num_s(pers
, __le32_to_cpu(sb
->level
)));
542 printf("MD_DEVICES=%d\n", __le32_to_cpu(sb
->raid_disks
));
543 for (i
= 0; i
< 32; i
++)
544 if (sb
->set_name
[i
] == '\n' || sb
->set_name
[i
] == '\0') {
549 printf("MD_NAME=%.*s\n", len
, sb
->set_name
);
550 if (__le32_to_cpu(sb
->level
) > 0) {
551 int ddsks
= 0, ddsks_denom
= 1;
552 switch(__le32_to_cpu(sb
->level
)) {
558 ddsks
= __le32_to_cpu(sb
->raid_disks
)-1;
561 ddsks
= __le32_to_cpu(sb
->raid_disks
)-2;
564 layout
= __le32_to_cpu(sb
->layout
);
565 ddsks
= __le32_to_cpu(sb
->raid_disks
);
566 ddsks_denom
= (layout
&255) * ((layout
>>8)&255);
569 long long asize
= __le64_to_cpu(sb
->size
);
570 asize
= (asize
<< 9) * ddsks
/ ddsks_denom
;
571 printf("MD_ARRAY_SIZE=%s\n",
572 human_size_brief(asize
, JEDEC
));
576 for (i
= 0; i
< 16; i
++) {
577 if ((i
& 3) == 0 && i
!= 0)
579 printf("%02x", sb
->set_uuid
[i
]);
582 printf("MD_UPDATE_TIME=%llu\n",
583 __le64_to_cpu(sb
->utime
) & 0xFFFFFFFFFFULL
);
584 printf("MD_DEV_UUID=");
585 for (i
= 0; i
< 16; i
++) {
586 if ((i
& 3) == 0 && i
!= 0)
588 printf("%02x", sb
->device_uuid
[i
]);
591 printf("MD_EVENTS=%llu\n",
592 (unsigned long long)__le64_to_cpu(sb
->events
));
595 static int copy_metadata1(struct supertype
*st
, int from
, int to
)
597 /* Read superblock. If it looks good, write it out.
598 * Then if a bitmap is present, copy that.
599 * And if a bad-block-list is present, copy that too.
602 unsigned long long dsize
, sb_offset
;
603 const int bufsize
= 4*1024;
604 struct mdp_superblock_1 super
, *sb
;
606 if (posix_memalign(&buf
, 4096, bufsize
) != 0)
609 if (!get_dev_size(from
, NULL
, &dsize
))
615 switch(st
->minor_version
) {
619 sb_offset
&= ~(4*2-1);
631 if (lseek64(from
, sb_offset
<< 9, 0) < 0LL)
633 if (read(from
, buf
, bufsize
) != bufsize
)
637 super
= *sb
; // save most of sb for when we reuse buf
639 if (__le32_to_cpu(super
.magic
) != MD_SB_MAGIC
||
640 __le32_to_cpu(super
.major_version
) != 1 ||
641 __le64_to_cpu(super
.super_offset
) != sb_offset
||
642 calc_sb_1_csum(sb
) != super
.sb_csum
)
645 if (lseek64(to
, sb_offset
<< 9, 0) < 0LL)
647 if (write(to
, buf
, bufsize
) != bufsize
)
650 if (super
.feature_map
& __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET
)) {
651 unsigned long long bitmap_offset
= sb_offset
;
652 int bytes
= 4096; // just an estimate.
654 struct align_fd afrom
, ato
;
656 init_afd(&afrom
, from
);
659 bitmap_offset
+= (int32_t)__le32_to_cpu(super
.bitmap_offset
);
661 if (lseek64(from
, bitmap_offset
<<9, 0) < 0)
663 if (lseek64(to
, bitmap_offset
<<9, 0) < 0)
666 for (written
= 0; written
< bytes
; ) {
667 int n
= bytes
- written
;
670 if (aread(&afrom
, buf
, n
) != n
)
673 /* have the header, can calculate
674 * correct bitmap bytes */
677 bytes
= calc_bitmap_size(bms
, 512);
681 if (awrite(&ato
, buf
, n
) != n
)
687 if (super
.bblog_size
!= 0 &&
688 __le16_to_cpu(super
.bblog_size
) <= 100 &&
689 super
.bblog_offset
!= 0 &&
690 (super
.feature_map
& __le32_to_cpu(MD_FEATURE_BAD_BLOCKS
))) {
691 /* There is a bad block log */
692 unsigned long long bb_offset
= sb_offset
;
693 int bytes
= __le16_to_cpu(super
.bblog_size
) * 512;
695 struct align_fd afrom
, ato
;
697 init_afd(&afrom
, from
);
700 bb_offset
+= (int32_t)__le32_to_cpu(super
.bblog_offset
);
702 if (lseek64(from
, bb_offset
<<9, 0) < 0)
704 if (lseek64(to
, bb_offset
<<9, 0) < 0)
707 for (written
= 0; written
< bytes
; ) {
708 int n
= bytes
- written
;
711 if (aread(&afrom
, buf
, n
) != n
)
714 if (awrite(&ato
, buf
, n
) != n
)
728 static void detail_super1(struct supertype
*st
, char *homehost
, char *subarray
)
730 struct mdp_superblock_1
*sb
= st
->sb
;
731 bitmap_super_t
*bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
733 int l
= homehost
? strlen(homehost
) : 0;
735 printf(" Name : %.32s", sb
->set_name
);
736 if (l
> 0 && l
< 32 && sb
->set_name
[l
] == ':' &&
737 strncmp(sb
->set_name
, homehost
, l
) == 0)
738 printf(" (local to host %s)", homehost
);
739 if (bms
->nodes
> 0 &&
740 (__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
))
741 printf("\n Cluster Name : %-64s", bms
->cluster_name
);
742 printf("\n UUID : ");
743 for (i
= 0; i
< 16; i
++) {
744 if ((i
& 3) == 0 && i
!= 0)
746 printf("%02x", sb
->set_uuid
[i
]);
748 printf("\n Events : %llu\n\n",
749 (unsigned long long)__le64_to_cpu(sb
->events
));
752 static void brief_detail_super1(struct supertype
*st
, char *subarray
)
754 struct mdp_superblock_1
*sb
= st
->sb
;
758 for (i
= 0; i
< 16; i
++) {
759 if ((i
& 3) == 0 && i
!= 0)
761 printf("%02x", sb
->set_uuid
[i
]);
765 static void export_detail_super1(struct supertype
*st
)
767 struct mdp_superblock_1
*sb
= st
->sb
;
771 for (i
= 0; i
< 32; i
++)
772 if (sb
->set_name
[i
] == '\n' || sb
->set_name
[i
] == '\0') {
777 printf("MD_NAME=%.*s\n", len
, sb
->set_name
);
780 static int examine_badblocks_super1(struct supertype
*st
, int fd
, char *devname
)
782 struct mdp_superblock_1
*sb
= st
->sb
;
783 unsigned long long offset
;
788 if (!sb
->bblog_size
|| __le16_to_cpu(sb
->bblog_size
) > 100 ||
790 printf("No bad-blocks list configured on %s\n", devname
);
793 if ((sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BAD_BLOCKS
)) == 0) {
794 printf("Bad-blocks list is empty in %s\n", devname
);
798 size
= __le16_to_cpu(sb
->bblog_size
)* 512;
799 if (posix_memalign((void **)&bbl
, 4096, size
) != 0) {
800 pr_err("could not allocate badblocks list\n");
803 offset
= __le64_to_cpu(sb
->super_offset
) +
804 (int)__le32_to_cpu(sb
->bblog_offset
);
806 if (lseek64(fd
, offset
, 0) < 0) {
807 pr_err("Cannot seek to bad-blocks list\n");
811 if (read(fd
, bbl
, size
) != size
) {
812 pr_err("Cannot read bad-blocks list\n");
816 /* 64bits per entry. 10 bits is block-count, 54 bits is block
817 * offset. Blocks are sectors unless bblog->shift makes them bigger
820 printf("Bad-blocks on %s:\n", devname
);
821 for (i
= 0; i
< size
/8; i
++, bbp
++) {
822 __u64 bb
= __le64_to_cpu(*bbp
);
823 int count
= bb
& 0x3ff;
824 unsigned long long sector
= bb
>> 10;
829 sector
<<= sb
->bblog_shift
;
830 count
<<= sb
->bblog_shift
;
832 printf("%20llu for %d sectors\n", sector
, count
);
838 static int match_home1(struct supertype
*st
, char *homehost
)
840 struct mdp_superblock_1
*sb
= st
->sb
;
841 int l
= homehost
? strlen(homehost
) : 0;
843 return (l
> 0 && l
< 32 && sb
->set_name
[l
] == ':' &&
844 strncmp(sb
->set_name
, homehost
, l
) == 0);
847 static void uuid_from_super1(struct supertype
*st
, int uuid
[4])
849 struct mdp_superblock_1
*super
= st
->sb
;
850 char *cuuid
= (char *)uuid
;
852 for (i
= 0; i
< 16; i
++)
853 cuuid
[i
] = super
->set_uuid
[i
];
856 static void getinfo_super1(struct supertype
*st
, struct mdinfo
*info
, char *map
)
858 struct mdp_superblock_1
*sb
= st
->sb
;
859 struct bitmap_super_s
*bsb
= (void *)(((char *)sb
) + MAX_SB_SIZE
);
860 struct misc_dev_info
*misc
=
861 (void *)(((char *)sb
) + MAX_SB_SIZE
+BM_SUPER_SIZE
);
865 unsigned int map_disks
= info
->array
.raid_disks
;
866 unsigned long long super_offset
;
867 unsigned long long data_size
;
869 memset(info
, 0, sizeof(*info
));
870 info
->array
.major_version
= 1;
871 info
->array
.minor_version
= st
->minor_version
;
872 info
->array
.patch_version
= 0;
873 info
->array
.raid_disks
= __le32_to_cpu(sb
->raid_disks
);
874 info
->array
.level
= __le32_to_cpu(sb
->level
);
875 info
->array
.layout
= __le32_to_cpu(sb
->layout
);
876 info
->array
.md_minor
= -1;
877 info
->array
.ctime
= __le64_to_cpu(sb
->ctime
);
878 info
->array
.utime
= __le64_to_cpu(sb
->utime
);
879 info
->array
.chunk_size
= __le32_to_cpu(sb
->chunksize
)*512;
881 (__le64_to_cpu(sb
->resync_offset
) == MaxSector
) ? 1 : 0;
883 super_offset
= __le64_to_cpu(sb
->super_offset
);
884 info
->data_offset
= __le64_to_cpu(sb
->data_offset
);
885 info
->component_size
= __le64_to_cpu(sb
->size
);
886 if (sb
->feature_map
& __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET
)) {
887 info
->bitmap_offset
= (int32_t)__le32_to_cpu(sb
->bitmap_offset
);
888 if (__le32_to_cpu(bsb
->nodes
) > 1)
889 info
->array
.state
|= (1 << MD_SB_CLUSTERED
);
890 } else if (md_feature_any_ppl_on(sb
->feature_map
)) {
891 info
->ppl_offset
= __le16_to_cpu(sb
->ppl
.offset
);
892 info
->ppl_size
= __le16_to_cpu(sb
->ppl
.size
);
893 info
->ppl_sector
= super_offset
+ info
->ppl_offset
;
896 info
->disk
.major
= 0;
897 info
->disk
.minor
= 0;
898 info
->disk
.number
= __le32_to_cpu(sb
->dev_number
);
899 if (__le32_to_cpu(sb
->dev_number
) >= __le32_to_cpu(sb
->max_dev
) ||
900 __le32_to_cpu(sb
->dev_number
) >= MAX_DEVS
)
901 role
= MD_DISK_ROLE_FAULTY
;
903 role
= __le16_to_cpu(sb
->dev_roles
[__le32_to_cpu(sb
->dev_number
)]);
905 if (info
->array
.level
<= 0)
906 data_size
= __le64_to_cpu(sb
->data_size
);
908 data_size
= __le64_to_cpu(sb
->size
);
909 if (info
->data_offset
< super_offset
) {
910 unsigned long long end
;
911 info
->space_before
= info
->data_offset
;
914 if (sb
->bblog_offset
&& sb
->bblog_size
) {
915 unsigned long long bboffset
= super_offset
;
916 bboffset
+= (int32_t)__le32_to_cpu(sb
->bblog_offset
);
921 if (super_offset
+ info
->bitmap_offset
+ info
->ppl_offset
< end
)
922 end
= super_offset
+ info
->bitmap_offset
+
925 if (info
->data_offset
+ data_size
< end
)
926 info
->space_after
= end
- data_size
- info
->data_offset
;
928 info
->space_after
= 0;
930 unsigned long long earliest
;
931 earliest
= super_offset
+ (32+4)*2; /* match kernel */
932 if (info
->bitmap_offset
> 0) {
933 unsigned long long bmend
= info
->bitmap_offset
;
934 unsigned long long size
= calc_bitmap_size(bsb
, 4096);
937 if (bmend
> earliest
)
939 } else if (info
->ppl_offset
> 0) {
940 unsigned long long pplend
;
942 pplend
= info
->ppl_offset
+ info
->ppl_size
;
943 if (pplend
> earliest
)
946 if (sb
->bblog_offset
&& sb
->bblog_size
) {
947 unsigned long long bbend
= super_offset
;
948 bbend
+= (int32_t)__le32_to_cpu(sb
->bblog_offset
);
949 bbend
+= __le16_to_cpu(sb
->bblog_size
);
950 if (bbend
> earliest
)
953 if (earliest
< info
->data_offset
)
954 info
->space_before
= info
->data_offset
- earliest
;
956 info
->space_before
= 0;
957 info
->space_after
= misc
->device_size
- data_size
-
960 if (info
->space_before
== 0 && info
->space_after
== 0) {
961 /* It will look like we don't support data_offset changes,
962 * be we do - it's just that there is no room.
963 * A change that reduced the number of devices should
964 * still be allowed, so set the otherwise useless value of '1'
966 info
->space_after
= 1;
969 info
->disk
.raid_disk
= -1;
971 case MD_DISK_ROLE_SPARE
:
972 /* spare: not active, not sync, not faulty */
973 info
->disk
.state
= 0;
975 case MD_DISK_ROLE_FAULTY
:
976 info
->disk
.state
= (1 << MD_DISK_FAULTY
); /* faulty */
978 case MD_DISK_ROLE_JOURNAL
:
979 info
->disk
.state
= (1 << MD_DISK_JOURNAL
);
980 info
->disk
.raid_disk
= role
;
981 /* journal uses all 4kB blocks*/
982 info
->space_after
= (misc
->device_size
- info
->data_offset
) % 8;
985 info
->disk
.state
= 6; /* active and in sync */
986 info
->disk
.raid_disk
= role
;
988 if (sb
->devflags
& WriteMostly1
)
989 info
->disk
.state
|= (1 << MD_DISK_WRITEMOSTLY
);
990 if (sb
->devflags
& FailFast1
)
991 info
->disk
.state
|= (1 << MD_DISK_FAILFAST
);
992 info
->events
= __le64_to_cpu(sb
->events
);
993 sprintf(info
->text_version
, "1.%d", st
->minor_version
);
994 info
->safe_mode_delay
= 200;
996 memcpy(info
->uuid
, sb
->set_uuid
, 16);
998 strncpy(info
->name
, sb
->set_name
, 32);
1001 if ((__le32_to_cpu(sb
->feature_map
)&MD_FEATURE_REPLACEMENT
)) {
1002 info
->disk
.state
&= ~(1 << MD_DISK_SYNC
);
1003 info
->disk
.state
|= 1 << MD_DISK_REPLACEMENT
;
1006 if (sb
->feature_map
& __le32_to_cpu(MD_FEATURE_RECOVERY_OFFSET
))
1007 info
->recovery_start
= __le32_to_cpu(sb
->recovery_offset
);
1009 info
->recovery_start
= MaxSector
;
1011 if (sb
->feature_map
& __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE
)) {
1012 info
->reshape_active
= 1;
1013 if ((sb
->feature_map
& __le32_to_cpu(MD_FEATURE_NEW_OFFSET
)) &&
1014 sb
->new_offset
!= 0)
1015 info
->reshape_active
|= RESHAPE_NO_BACKUP
;
1016 info
->reshape_progress
= __le64_to_cpu(sb
->reshape_position
);
1017 info
->new_level
= __le32_to_cpu(sb
->new_level
);
1018 info
->delta_disks
= __le32_to_cpu(sb
->delta_disks
);
1019 info
->new_layout
= __le32_to_cpu(sb
->new_layout
);
1020 info
->new_chunk
= __le32_to_cpu(sb
->new_chunk
)<<9;
1021 if (info
->delta_disks
< 0)
1022 info
->array
.raid_disks
-= info
->delta_disks
;
1024 info
->reshape_active
= 0;
1026 info
->recovery_blocked
= info
->reshape_active
;
1029 for (i
= 0; i
< map_disks
; i
++)
1031 for (i
= 0; i
< __le32_to_cpu(sb
->max_dev
); i
++) {
1032 role
= __le16_to_cpu(sb
->dev_roles
[i
]);
1033 if (/*role == MD_DISK_ROLE_SPARE || */role
< (unsigned) info
->array
.raid_disks
) {
1035 if (map
&& role
< map_disks
)
1040 info
->array
.working_disks
= working
;
1042 if (sb
->feature_map
& __le32_to_cpu(MD_FEATURE_JOURNAL
)) {
1043 info
->journal_device_required
= 1;
1044 info
->consistency_policy
= CONSISTENCY_POLICY_JOURNAL
;
1045 } else if (md_feature_any_ppl_on(sb
->feature_map
)) {
1046 info
->consistency_policy
= CONSISTENCY_POLICY_PPL
;
1047 } else if (sb
->feature_map
& __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET
)) {
1048 info
->consistency_policy
= CONSISTENCY_POLICY_BITMAP
;
1049 } else if (info
->array
.level
<= 0) {
1050 info
->consistency_policy
= CONSISTENCY_POLICY_NONE
;
1052 info
->consistency_policy
= CONSISTENCY_POLICY_RESYNC
;
1055 info
->journal_clean
= 0;
1058 static struct mdinfo
*container_content1(struct supertype
*st
, char *subarray
)
1060 struct mdinfo
*info
;
1065 info
= xmalloc(sizeof(*info
));
1066 getinfo_super1(st
, info
, NULL
);
1070 static int update_super1(struct supertype
*st
, struct mdinfo
*info
,
1071 enum update_opt update
, char *devname
, int verbose
,
1072 int uuid_set
, char *homehost
)
1074 /* NOTE: for 'assemble' and 'force' we need to return non-zero
1075 * if any change was made. For others, the return value is
1079 struct mdp_superblock_1
*sb
= st
->sb
;
1080 bitmap_super_t
*bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
1082 if (update
== UOPT_HOMEHOST
&& homehost
) {
1084 * Note that 'homehost' is special as it is really
1089 c
= strchr(sb
->set_name
, ':');
1091 snprintf(info
->name
, sizeof(info
->name
), "%s", c
+ 1);
1093 snprintf(info
->name
, sizeof(info
->name
), "%s",
1102 snprintf(info
->name
, sizeof(info
->name
), "%d", info
->array
.md_minor
);
1103 memset(sb
->set_name
, 0, sizeof(sb
->set_name
));
1105 namelen
= strnlen(homehost
, MD_NAME_MAX
) + 1 + strnlen(info
->name
, MD_NAME_MAX
);
1107 strchr(info
->name
, ':') == NULL
&&
1108 namelen
< MD_NAME_MAX
) {
1109 strcpy(sb
->set_name
, homehost
);
1110 strcat(sb
->set_name
, ":");
1111 strcat(sb
->set_name
, info
->name
);
1113 namelen
= min((int)strnlen(info
->name
, MD_NAME_MAX
),
1114 (int)sizeof(sb
->set_name
) - 1);
1115 memcpy(sb
->set_name
, info
->name
, namelen
);
1116 memset(&sb
->set_name
[namelen
], '\0',
1117 sizeof(sb
->set_name
) - namelen
);
1121 case UOPT_SPEC_FORCE_ONE
:
1122 /* Not enough devices for a working array,
1123 * so bring this one up-to-date
1125 if (sb
->events
!= __cpu_to_le64(info
->events
))
1127 sb
->events
= __cpu_to_le64(info
->events
);
1129 case UOPT_SPEC_FORCE_ARRAY
:
1130 /* Degraded array and 'force' requests to
1131 * maybe need to mark it 'clean'.
1133 switch(__le32_to_cpu(sb
->level
)) {
1137 /* need to force clean */
1138 if (sb
->resync_offset
!= MaxSector
)
1140 sb
->resync_offset
= MaxSector
;
1143 case UOPT_SPEC_ASSEMBLE
: {
1144 int d
= info
->disk
.number
;
1146 if (info
->disk
.state
& (1<<MD_DISK_ACTIVE
))
1147 want
= info
->disk
.raid_disk
;
1148 else if (info
->disk
.state
& (1<<MD_DISK_JOURNAL
))
1149 want
= MD_DISK_ROLE_JOURNAL
;
1151 want
= MD_DISK_ROLE_SPARE
;
1152 if (sb
->dev_roles
[d
] != __cpu_to_le16(want
)) {
1153 sb
->dev_roles
[d
] = __cpu_to_le16(want
);
1156 if (info
->reshape_active
&&
1158 __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE
) &&
1159 info
->delta_disks
>= 0 &&
1160 info
->reshape_progress
<
1161 __le64_to_cpu(sb
->reshape_position
)) {
1162 sb
->reshape_position
=
1163 __cpu_to_le64(info
->reshape_progress
);
1166 if (info
->reshape_active
&&
1168 __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE
) &&
1169 info
->delta_disks
< 0 &&
1170 info
->reshape_progress
>
1171 __le64_to_cpu(sb
->reshape_position
)) {
1172 sb
->reshape_position
=
1173 __cpu_to_le64(info
->reshape_progress
);
1178 case UOPT_SPEC_LINEAR_GROW_NEW
: {
1181 int max
= __le32_to_cpu(sb
->max_dev
);
1186 for (i
= 0; i
< max
; i
++)
1187 if (__le16_to_cpu(sb
->dev_roles
[i
]) >=
1188 MD_DISK_ROLE_FAULTY
)
1190 if (i
!= info
->disk
.number
)
1192 sb
->dev_number
= __cpu_to_le32(i
);
1195 sb
->max_dev
= __cpu_to_le32(max
+ 1);
1199 random_uuid(sb
->device_uuid
);
1201 sb
->dev_roles
[i
] = __cpu_to_le16(info
->disk
.raid_disk
);
1203 fd
= open(devname
, O_RDONLY
);
1205 unsigned long long ds
;
1206 get_dev_size(fd
, devname
, &ds
);
1209 if (__le64_to_cpu(sb
->super_offset
) <
1210 __le64_to_cpu(sb
->data_offset
)) {
1211 sb
->data_size
= __cpu_to_le64(
1212 ds
- __le64_to_cpu(sb
->data_offset
));
1215 ds
&= ~(unsigned long long)(4 * 2 - 1);
1216 sb
->super_offset
= __cpu_to_le64(ds
);
1217 sb
->data_size
= __cpu_to_le64(
1218 ds
- __le64_to_cpu(sb
->data_offset
));
1223 case UOPT_SPEC_LINEAR_GROW_UPDATE
: {
1224 int max
= __le32_to_cpu(sb
->max_dev
);
1225 int i
= info
->disk
.number
;
1226 if (max
> MAX_DEVS
|| i
> MAX_DEVS
)
1231 sb
->max_dev
= __cpu_to_le32(max
+ 1);
1232 sb
->raid_disks
= __cpu_to_le32(info
->array
.raid_disks
);
1233 sb
->dev_roles
[info
->disk
.number
] =
1234 __cpu_to_le16(info
->disk
.raid_disk
);
1238 /* make sure resync happens */
1239 sb
->resync_offset
= 0;
1242 copy_uuid(sb
->set_uuid
, info
->uuid
, super1
.swapuuid
);
1244 if (__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)
1245 memcpy(bms
->uuid
, sb
->set_uuid
, 16);
1247 case UOPT_NO_BITMAP
:
1248 sb
->feature_map
&= ~__cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1249 if (bms
->version
== BITMAP_MAJOR_CLUSTERED
&& !IsBitmapDirty(devname
))
1250 sb
->resync_offset
= MaxSector
;
1253 /* only possible if there is room after the bitmap, or if
1254 * there is no bitmap
1256 unsigned long long sb_offset
= __le64_to_cpu(sb
->super_offset
);
1257 unsigned long long data_offset
= __le64_to_cpu(sb
->data_offset
);
1258 long bitmap_offset
= 0;
1259 long bm_sectors
= 0;
1262 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
)) {
1263 bitmap_offset
= (long)__le32_to_cpu(sb
->bitmap_offset
);
1264 bm_sectors
= calc_bitmap_size(bms
, 4096) >> 9;
1265 } else if (md_feature_any_ppl_on(sb
->feature_map
)) {
1266 bitmap_offset
= (long)__le16_to_cpu(sb
->ppl
.offset
);
1267 bm_sectors
= (long)__le16_to_cpu(sb
->ppl
.size
);
1270 if (sb_offset
< data_offset
) {
1272 * 1.1 or 1.2. Put bbl after bitmap leaving
1276 bb_offset
= sb_offset
+ 8;
1277 if (bm_sectors
&& bitmap_offset
> 0)
1278 bb_offset
= bitmap_offset
+ bm_sectors
;
1279 while (bb_offset
< (long)sb_offset
+ 8 + 32*2 &&
1280 bb_offset
+ 8+8 <= (long)data_offset
)
1282 if (bb_offset
+ 8 <= (long)data_offset
) {
1283 sb
->bblog_size
= __cpu_to_le16(8);
1284 sb
->bblog_offset
= __cpu_to_le32(bb_offset
);
1287 if (bm_sectors
&& bitmap_offset
< 0)
1288 space
= -bitmap_offset
- bm_sectors
;
1290 space
= sb_offset
- data_offset
-
1291 __le64_to_cpu(sb
->data_size
);
1293 sb
->bblog_size
= __cpu_to_le16(8);
1294 sb
->bblog_offset
= __cpu_to_le32((unsigned)-8);
1300 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BAD_BLOCKS
))
1301 pr_err("Cannot remove active bbl from %s\n",devname
);
1304 sb
->bblog_shift
= 0;
1305 sb
->bblog_offset
= 0;
1308 case UOPT_FORCE_NO_BBL
:
1309 sb
->feature_map
&= ~ __cpu_to_le32(MD_FEATURE_BAD_BLOCKS
);
1311 sb
->bblog_shift
= 0;
1312 sb
->bblog_offset
= 0;
1315 unsigned long long sb_offset
= __le64_to_cpu(sb
->super_offset
);
1316 unsigned long long data_offset
= __le64_to_cpu(sb
->data_offset
);
1317 unsigned long long data_size
= __le64_to_cpu(sb
->data_size
);
1318 long bb_offset
= __le32_to_cpu(sb
->bblog_offset
);
1322 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
)) {
1323 pr_err("Cannot add PPL to array with bitmap\n");
1327 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_JOURNAL
)) {
1328 pr_err("Cannot add PPL to array with journal\n");
1332 if (sb_offset
< data_offset
) {
1334 space
= bb_offset
- 8;
1336 space
= data_offset
- sb_offset
- 8;
1339 offset
= -(sb_offset
- data_offset
- data_size
);
1340 if (offset
< INT16_MIN
)
1342 space
= -(offset
- bb_offset
);
1345 if (space
< (PPL_HEADER_SIZE
>> 9) + 8) {
1346 pr_err("Not enough space to add ppl\n");
1350 if (space
>= (MULTIPLE_PPL_AREA_SIZE_SUPER1
>> 9)) {
1351 space
= (MULTIPLE_PPL_AREA_SIZE_SUPER1
>> 9);
1353 int optimal_space
= choose_ppl_space(
1354 __le32_to_cpu(sb
->chunksize
));
1355 if (space
> optimal_space
)
1356 space
= optimal_space
;
1359 sb
->ppl
.offset
= __cpu_to_le16(offset
);
1360 sb
->ppl
.size
= __cpu_to_le16(space
);
1361 sb
->feature_map
|= __cpu_to_le32(MD_FEATURE_PPL
);
1365 sb
->feature_map
&= ~__cpu_to_le32(MD_FEATURE_PPL
|
1366 MD_FEATURE_MULTIPLE_PPLS
);
1368 case UOPT_DEVICESIZE
:
1369 if (__le64_to_cpu(sb
->super_offset
) >=
1370 __le64_to_cpu(sb
->data_offset
))
1373 * set data_size to device size less data_offset
1375 struct misc_dev_info
*misc
= (struct misc_dev_info
*)
1376 (st
->sb
+ MAX_SB_SIZE
+ BM_SUPER_SIZE
);
1377 sb
->data_size
= __cpu_to_le64(
1378 misc
->device_size
- __le64_to_cpu(sb
->data_offset
));
1380 case UOPT_SPEC_REVERT_RESHAPE_NOBACKUP
:
1381 case UOPT_REVERT_RESHAPE
:
1383 if (!(sb
->feature_map
&
1384 __cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
)))
1385 pr_err("No active reshape to revert on %s\n",
1389 unsigned long long reshape_sectors
;
1392 /* If the reshape hasn't started, just stop it.
1393 * It is conceivable that a stripe was modified but
1394 * the metadata not updated. In that case the backup
1395 * should have been used to get passed the critical stage.
1396 * If that couldn't happen, the "-nobackup" version
1399 if (update
== UOPT_SPEC_REVERT_RESHAPE_NOBACKUP
&&
1400 sb
->reshape_position
== 0 &&
1401 (__le32_to_cpu(sb
->delta_disks
) > 0 ||
1402 (__le32_to_cpu(sb
->delta_disks
) == 0 &&
1403 !(sb
->feature_map
& __cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS
))))) {
1404 sb
->feature_map
&= ~__cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1405 sb
->raid_disks
= __cpu_to_le32(__le32_to_cpu(sb
->raid_disks
) -
1406 __le32_to_cpu(sb
->delta_disks
));
1407 sb
->delta_disks
= 0;
1410 /* reshape_position is a little messy.
1411 * Its value must be a multiple of the larger
1412 * chunk size, and of the "after" data disks.
1413 * So when reverting we need to change it to
1414 * be a multiple of the new "after" data disks,
1415 * which is the old "before".
1416 * If it isn't already a multiple of 'before',
1417 * the only thing we could do would be
1418 * copy some block around on the disks, which
1419 * is easy to get wrong.
1420 * So we reject a revert-reshape unless the
1421 * alignment is good.
1423 if (is_level456(__le32_to_cpu(sb
->level
))) {
1425 __le64_to_cpu(sb
->reshape_position
);
1426 reshape_chunk
= __le32_to_cpu(sb
->new_chunk
);
1427 reshape_chunk
*= __le32_to_cpu(sb
->raid_disks
) -
1428 __le32_to_cpu(sb
->delta_disks
) -
1429 (__le32_to_cpu(sb
->level
)==6 ? 2 : 1);
1430 if (reshape_sectors
% reshape_chunk
) {
1431 pr_err("Reshape position is not suitably aligned.\n");
1432 pr_err("Try normal assembly and stop again\n");
1437 __cpu_to_le32(__le32_to_cpu(sb
->raid_disks
) -
1438 __le32_to_cpu(sb
->delta_disks
));
1439 if (sb
->delta_disks
== 0)
1440 sb
->feature_map
^= __cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS
);
1442 sb
->delta_disks
= __cpu_to_le32(-__le32_to_cpu(sb
->delta_disks
));
1444 temp
= sb
->new_layout
;
1445 sb
->new_layout
= sb
->layout
;
1448 temp
= sb
->new_chunk
;
1449 sb
->new_chunk
= sb
->chunksize
;
1450 sb
->chunksize
= temp
;
1452 if (sb
->feature_map
&
1453 __cpu_to_le32(MD_FEATURE_NEW_OFFSET
)) {
1455 (int32_t)__le32_to_cpu(sb
->new_offset
);
1456 sb
->data_offset
= __cpu_to_le64(__le64_to_cpu(sb
->data_offset
) + offset_delta
);
1457 sb
->new_offset
= __cpu_to_le32(-offset_delta
);
1458 sb
->data_size
= __cpu_to_le64(__le64_to_cpu(sb
->data_size
) - offset_delta
);
1463 case UOPT_SPEC__RESHAPE_PROGRESS
:
1464 sb
->reshape_position
= __cpu_to_le64(info
->reshape_progress
);
1466 case UOPT_SPEC_WRITEMOSTLY
:
1467 sb
->devflags
|= WriteMostly1
;
1469 case UOPT_SPEC_READWRITE
:
1470 sb
->devflags
&= ~WriteMostly1
;
1472 case UOPT_SPEC_FAILFAST
:
1473 sb
->devflags
|= FailFast1
;
1475 case UOPT_SPEC_NOFAILFAST
:
1476 sb
->devflags
&= ~FailFast1
;
1478 case UOPT_LAYOUT_ORIGINAL
:
1479 case UOPT_LAYOUT_ALTERNATE
:
1480 case UOPT_LAYOUT_UNSPECIFIED
:
1481 if (__le32_to_cpu(sb
->level
) != 0) {
1482 pr_err("%s: %s only supported for RAID0\n",
1483 devname
?: "", map_num(update_options
, update
));
1485 } else if (update
== UOPT_LAYOUT_UNSPECIFIED
) {
1486 sb
->feature_map
&= ~__cpu_to_le32(MD_FEATURE_RAID0_LAYOUT
);
1489 sb
->feature_map
|= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT
);
1490 sb
->layout
= __cpu_to_le32(update
== UOPT_LAYOUT_ORIGINAL
? 1 : 2);
1497 sb
->sb_csum
= calc_sb_1_csum(sb
);
1502 static int init_super1(struct supertype
*st
, mdu_array_info_t
*info
,
1503 struct shape
*s
, char *name
, char *homehost
,
1504 int *uuid
, unsigned long long data_offset
)
1506 struct mdp_superblock_1
*sb
;
1511 if (posix_memalign((void **)&sb
, 4096, SUPER1_SIZE
) != 0) {
1512 pr_err("could not allocate superblock\n");
1515 memset(sb
, 0, SUPER1_SIZE
);
1519 /* zeroing superblock */
1523 spares
= info
->working_disks
- info
->active_disks
;
1524 if (info
->raid_disks
+ spares
> MAX_DEVS
) {
1525 pr_err("too many devices requested: %d+%d > %d\n",
1526 info
->raid_disks
, spares
, MAX_DEVS
);
1530 sb
->magic
= __cpu_to_le32(MD_SB_MAGIC
);
1531 sb
->major_version
= __cpu_to_le32(1);
1532 sb
->feature_map
= 0;
1536 copy_uuid(sb
->set_uuid
, uuid
, super1
.swapuuid
);
1538 random_uuid(sb
->set_uuid
);;
1540 if (name
== NULL
|| *name
== 0) {
1541 sprintf(defname
, "%d", info
->md_minor
);
1545 strchr(name
, ':') == NULL
&&
1546 strlen(homehost
) + 1 + strlen(name
) < 32) {
1547 strcpy(sb
->set_name
, homehost
);
1548 strcat(sb
->set_name
, ":");
1549 strcat(sb
->set_name
, name
);
1553 namelen
= min((int)strlen(name
),
1554 (int)sizeof(sb
->set_name
) - 1);
1555 memcpy(sb
->set_name
, name
, namelen
);
1556 memset(&sb
->set_name
[namelen
], '\0',
1557 sizeof(sb
->set_name
) - namelen
);
1560 if (info
->level
== 0 && info
->layout
== UnSet
)
1561 /* Metadata chooses default layout for RAID0 */
1562 info
->layout
= RAID0_ORIG_LAYOUT
;
1564 sb
->ctime
= __cpu_to_le64((unsigned long long)time(0));
1565 sb
->level
= __cpu_to_le32(info
->level
);
1566 sb
->layout
= __cpu_to_le32(info
->layout
);
1567 sb
->size
= __cpu_to_le64(s
->size
*2ULL);
1568 sb
->chunksize
= __cpu_to_le32(info
->chunk_size
>>9);
1569 sb
->raid_disks
= __cpu_to_le32(info
->raid_disks
);
1571 sb
->data_offset
= __cpu_to_le64(data_offset
);
1572 sb
->data_size
= __cpu_to_le64(0);
1573 sb
->super_offset
= __cpu_to_le64(0);
1574 sb
->recovery_offset
= __cpu_to_le64(0);
1576 sb
->utime
= sb
->ctime
;
1577 sb
->events
= __cpu_to_le64(1);
1578 if (info
->state
& (1<<MD_SB_CLEAN
))
1579 sb
->resync_offset
= MaxSector
;
1581 sb
->resync_offset
= 0;
1582 sbsize
= sizeof(struct mdp_superblock_1
) +
1583 2 * (info
->raid_disks
+ spares
);
1584 sbsize
= ROUND_UP(sbsize
, 512);
1586 __cpu_to_le32((sbsize
- sizeof(struct mdp_superblock_1
)) / 2);
1588 memset(sb
->dev_roles
, 0xff,
1589 MAX_SB_SIZE
- sizeof(struct mdp_superblock_1
));
1591 if (s
->consistency_policy
== CONSISTENCY_POLICY_PPL
)
1592 sb
->feature_map
|= __cpu_to_le32(MD_FEATURE_PPL
);
1600 long long data_offset
;
1601 unsigned long long dev_size
;
1602 mdu_disk_info_t disk
;
1603 struct devinfo
*next
;
1606 /* Add a device to the superblock being created */
1607 static int add_to_super1(struct supertype
*st
, mdu_disk_info_t
*dk
,
1608 int fd
, char *devname
, unsigned long long data_offset
)
1610 struct mdp_superblock_1
*sb
= st
->sb
;
1611 __u16
*rp
= sb
->dev_roles
+ dk
->number
;
1612 struct devinfo
*di
, **dip
;
1615 dk_state
= dk
->state
& ~(1<<MD_DISK_FAILFAST
);
1616 if ((dk_state
& (1<<MD_DISK_ACTIVE
)) &&
1617 (dk_state
& (1<<MD_DISK_SYNC
)))/* active, sync */
1618 *rp
= __cpu_to_le16(dk
->raid_disk
);
1619 else if (dk_state
& (1<<MD_DISK_JOURNAL
))
1620 *rp
= MD_DISK_ROLE_JOURNAL
;
1621 else if ((dk_state
& ~(1<<MD_DISK_ACTIVE
)) == 0)
1622 /* active or idle -> spare */
1623 *rp
= MD_DISK_ROLE_SPARE
;
1625 *rp
= MD_DISK_ROLE_FAULTY
;
1627 if (dk
->number
>= (int)__le32_to_cpu(sb
->max_dev
) &&
1628 __le32_to_cpu(sb
->max_dev
) < MAX_DEVS
)
1629 sb
->max_dev
= __cpu_to_le32(dk
->number
+ 1);
1631 sb
->dev_number
= __cpu_to_le32(dk
->number
);
1632 sb
->devflags
= 0; /* don't copy another disks flags */
1633 sb
->sb_csum
= calc_sb_1_csum(sb
);
1635 dip
= (struct devinfo
**)&st
->info
;
1637 dip
= &(*dip
)->next
;
1638 di
= xmalloc(sizeof(struct devinfo
));
1640 di
->devname
= devname
;
1642 di
->data_offset
= data_offset
;
1644 if (is_fd_valid(fd
))
1645 get_dev_size(fd
, NULL
, &di
->dev_size
);
1653 static int locate_bitmap1(struct supertype
*st
, int fd
, int node_num
);
1655 static int store_super1(struct supertype
*st
, int fd
)
1657 struct mdp_superblock_1
*sb
= st
->sb
;
1658 unsigned long long sb_offset
;
1659 struct align_fd afd
;
1661 unsigned long long dsize
;
1663 if (!get_dev_size(fd
, NULL
, &dsize
))
1674 * Calculate the position of the superblock.
1675 * It is always aligned to a 4K boundary and
1676 * depending on minor_version, it can be:
1677 * 0: At least 8K, but less than 12K, from end of device
1678 * 1: At start of device
1679 * 2: 4K from start of device.
1681 switch(st
->minor_version
) {
1685 sb_offset
&= ~(4*2-1);
1697 if (sb_offset
!= __le64_to_cpu(sb
->super_offset
) &&
1698 0 != __le64_to_cpu(sb
->super_offset
)
1700 pr_err("internal error - sb_offset is wrong\n");
1704 if (lseek64(fd
, sb_offset
<< 9, 0)< 0LL)
1707 sbsize
= ROUND_UP(sizeof(*sb
) + 2 * __le32_to_cpu(sb
->max_dev
), 512);
1709 if (awrite(&afd
, sb
, sbsize
) != sbsize
)
1712 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
)) {
1713 struct bitmap_super_s
*bm
;
1714 bm
= (struct bitmap_super_s
*)(((char *)sb
) + MAX_SB_SIZE
);
1715 if (__le32_to_cpu(bm
->magic
) == BITMAP_MAGIC
) {
1716 locate_bitmap1(st
, fd
, 0);
1717 if (awrite(&afd
, bm
, sizeof(*bm
)) != sizeof(*bm
))
1726 static int load_super1(struct supertype
*st
, int fd
, char *devname
);
1728 static unsigned long choose_bm_space(unsigned long devsize
)
1730 /* if the device is bigger than 8Gig, save 64k for bitmap usage,
1731 * if bigger than 200Gig, save 128k
1732 * NOTE: result must be multiple of 4K else bad things happen
1733 * on 4K-sector devices.
1737 if (devsize
- 64*2 >= 200*1024*1024*2)
1739 if (devsize
- 4*2 > 8*1024*1024*2)
1744 static void free_super1(struct supertype
*st
);
1746 __u32
crc32c_le(__u32 crc
, unsigned char const *p
, size_t len
);
1748 static int write_init_ppl1(struct supertype
*st
, struct mdinfo
*info
, int fd
)
1750 struct mdp_superblock_1
*sb
= st
->sb
;
1752 struct ppl_header
*ppl_hdr
;
1755 /* first clear entire ppl space */
1756 ret
= zero_disk_range(fd
, info
->ppl_sector
, info
->ppl_size
);
1760 ret
= posix_memalign(&buf
, 4096, PPL_HEADER_SIZE
);
1762 pr_err("Failed to allocate PPL header buffer\n");
1766 memset(buf
, 0, PPL_HEADER_SIZE
);
1768 memset(ppl_hdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
1769 ppl_hdr
->signature
= __cpu_to_le32(~crc32c_le(~0, sb
->set_uuid
,
1770 sizeof(sb
->set_uuid
)));
1771 ppl_hdr
->checksum
= __cpu_to_le32(~crc32c_le(~0, buf
, PPL_HEADER_SIZE
));
1773 if (lseek64(fd
, info
->ppl_sector
* 512, SEEK_SET
) < 0) {
1775 perror("Failed to seek to PPL header location");
1778 if (!ret
&& write(fd
, buf
, PPL_HEADER_SIZE
) != PPL_HEADER_SIZE
) {
1780 perror("Write PPL header failed");
1790 #define META_BLOCK_SIZE 4096
1792 static int write_empty_r5l_meta_block(struct supertype
*st
, int fd
)
1794 struct r5l_meta_block
*mb
;
1795 struct mdp_superblock_1
*sb
= st
->sb
;
1796 struct align_fd afd
;
1801 if (posix_memalign((void **)&mb
, 4096, META_BLOCK_SIZE
) != 0) {
1802 pr_err("Could not allocate memory for the meta block.\n");
1806 memset(mb
, 0, META_BLOCK_SIZE
);
1808 mb
->magic
= __cpu_to_le32(R5LOG_MAGIC
);
1809 mb
->version
= R5LOG_VERSION
;
1810 mb
->meta_size
= __cpu_to_le32(sizeof(struct r5l_meta_block
));
1811 mb
->seq
= __cpu_to_le64(random32());
1812 mb
->position
= __cpu_to_le64(0);
1814 crc
= crc32c_le(0xffffffff, sb
->set_uuid
, sizeof(sb
->set_uuid
));
1815 crc
= crc32c_le(crc
, (void *)mb
, META_BLOCK_SIZE
);
1818 if (lseek64(fd
, __le64_to_cpu(sb
->data_offset
) * 512, 0) < 0LL) {
1819 pr_err("cannot seek to offset of the meta block\n");
1823 if (awrite(&afd
, mb
, META_BLOCK_SIZE
) != META_BLOCK_SIZE
) {
1824 pr_err("failed to store write the meta block \n");
1837 static bool has_raid0_layout(struct mdp_superblock_1
*sb
)
1839 if (sb
->level
== 0 && sb
->layout
!= 0)
1845 static int write_init_super1(struct supertype
*st
)
1847 struct mdp_superblock_1
*sb
= st
->sb
;
1848 struct supertype
*refst
;
1850 unsigned long long bm_space
;
1852 unsigned long long dsize
, array_size
;
1853 unsigned long long sb_offset
;
1854 unsigned long long data_offset
;
1856 bool raid0_need_layout
= false;
1858 /* Clear extra flags */
1859 sb
->feature_map
&= ~__cpu_to_le32(MD_FEATURE_BAD_BLOCKS
| MD_FEATURE_REPLACEMENT
);
1861 /* Since linux kernel v5.4, raid0 always has a layout */
1862 if (has_raid0_layout(sb
) && get_linux_version() >= 5004000)
1863 raid0_need_layout
= true;
1865 for (di
= st
->info
; di
; di
= di
->next
) {
1866 if (di
->disk
.state
& (1 << MD_DISK_JOURNAL
))
1867 sb
->feature_map
|= __cpu_to_le32(MD_FEATURE_JOURNAL
);
1868 if (has_raid0_layout(sb
) && !raid0_need_layout
) {
1870 struct devinfo
*di2
= st
->info
;
1871 unsigned long long s1
, s2
;
1873 if (di
->data_offset
!= INVALID_SECTORS
)
1874 s1
-= di
->data_offset
;
1875 s1
/= __le32_to_cpu(sb
->chunksize
);
1877 if (di2
->data_offset
!= INVALID_SECTORS
)
1878 s2
-= di2
->data_offset
;
1879 s2
/= __le32_to_cpu(sb
->chunksize
);
1881 raid0_need_layout
= true;
1885 for (di
= st
->info
; di
; di
= di
->next
) {
1886 if (di
->disk
.state
& (1 << MD_DISK_FAULTY
))
1891 while (Kill(di
->devname
, NULL
, 0, -1, 1) == 0)
1894 sb
->dev_number
= __cpu_to_le32(di
->disk
.number
);
1895 if (di
->disk
.state
& (1<<MD_DISK_WRITEMOSTLY
))
1896 sb
->devflags
|= WriteMostly1
;
1898 sb
->devflags
&= ~WriteMostly1
;
1899 if (di
->disk
.state
& (1<<MD_DISK_FAILFAST
))
1900 sb
->devflags
|= FailFast1
;
1902 sb
->devflags
&= ~FailFast1
;
1904 random_uuid(sb
->device_uuid
);
1906 if (!(di
->disk
.state
& (1<<MD_DISK_JOURNAL
)))
1909 refst
= dup_super(st
);
1910 if (load_super1(refst
, di
->fd
, NULL
)==0) {
1911 struct mdp_superblock_1
*refsb
= refst
->sb
;
1913 memcpy(sb
->device_uuid
, refsb
->device_uuid
, 16);
1914 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16)==0) {
1915 /* same array, so preserve events and
1917 sb
->events
= refsb
->events
;
1923 if (!get_dev_size(di
->fd
, NULL
, &dsize
)) {
1936 * Calculate the position of the superblock.
1937 * It is always aligned to a 4K boundary and
1938 * depending on minor_version, it can be:
1939 * 0: At least 8K, but less than 12K, from end of device
1940 * 1: At start of device
1941 * 2: 4K from start of device.
1942 * data_offset has already been set.
1944 array_size
= __le64_to_cpu(sb
->size
);
1946 /* work out how much space we left for a bitmap */
1947 if (sb
->feature_map
& __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
)) {
1948 bitmap_super_t
*bms
= (bitmap_super_t
*)
1949 (((char *)sb
) + MAX_SB_SIZE
);
1950 bm_space
= calc_bitmap_size(bms
, 4096) >> 9;
1951 bm_offset
= (long)__le32_to_cpu(sb
->bitmap_offset
);
1952 } else if (md_feature_any_ppl_on(sb
->feature_map
)) {
1953 bm_space
= MULTIPLE_PPL_AREA_SIZE_SUPER1
>> 9;
1954 if (st
->minor_version
== 0)
1955 bm_offset
= -bm_space
- 8;
1958 sb
->ppl
.offset
= __cpu_to_le16(bm_offset
);
1959 sb
->ppl
.size
= __cpu_to_le16(bm_space
);
1961 bm_space
= choose_bm_space(array_size
);
1965 data_offset
= di
->data_offset
;
1966 if (data_offset
== INVALID_SECTORS
)
1967 data_offset
= st
->data_offset
;
1968 switch(st
->minor_version
) {
1970 /* Add 8 sectors for bad block log */
1972 if (data_offset
== INVALID_SECTORS
)
1976 sb_offset
&= ~(4*2-1);
1977 sb
->data_offset
= __cpu_to_le64(data_offset
);
1978 sb
->super_offset
= __cpu_to_le64(sb_offset
);
1979 if (sb_offset
< array_size
+ bm_space
)
1980 bm_space
= sb_offset
- array_size
;
1981 sb
->data_size
= __cpu_to_le64(sb_offset
- bm_space
);
1982 if (bm_space
>= 8) {
1983 sb
->bblog_size
= __cpu_to_le16(8);
1984 sb
->bblog_offset
= __cpu_to_le32((unsigned)-8);
1989 sb_offset
= st
->minor_version
== 2 ? 8 : 0;
1990 sb
->super_offset
= __cpu_to_le64(sb_offset
);
1991 if (data_offset
== INVALID_SECTORS
)
1992 data_offset
= sb_offset
+ 16;
1994 sb
->data_offset
= __cpu_to_le64(data_offset
);
1995 sb
->data_size
= __cpu_to_le64(dsize
- data_offset
);
1996 if (data_offset
>= sb_offset
+bm_offset
+bm_space
+8) {
1997 sb
->bblog_size
= __cpu_to_le16(8);
1998 sb
->bblog_offset
= __cpu_to_le32(bm_offset
+
2000 } else if (data_offset
>= sb_offset
+ 16) {
2001 sb
->bblog_size
= __cpu_to_le16(8);
2002 /* '8' sectors for the bblog, and 'sb_offset'
2003 * because we want offset from superblock, not
2006 sb
->bblog_offset
= __cpu_to_le32(data_offset
-
2011 pr_err("Failed to write invalid metadata format 1.%i to %s\n",
2012 st
->minor_version
, di
->devname
);
2017 * Disable badblock log on clusters, or when
2018 * explicitly requested
2020 if (st
->nodes
> 0 || conf_get_create_info()->bblist
== 0) {
2022 sb
->bblog_offset
= 0;
2025 /* RAID0 needs a layout if devices aren't all the same size */
2026 if (raid0_need_layout
)
2027 sb
->feature_map
|= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT
);
2029 sb
->sb_csum
= calc_sb_1_csum(sb
);
2030 rv
= store_super1(st
, di
->fd
);
2032 if (rv
== 0 && (di
->disk
.state
& (1 << MD_DISK_JOURNAL
))) {
2033 rv
= write_empty_r5l_meta_block(st
, di
->fd
);
2039 (__le32_to_cpu(sb
->feature_map
) &
2040 MD_FEATURE_BITMAP_OFFSET
)) {
2041 rv
= st
->ss
->write_bitmap(st
, di
->fd
, NodeNumUpdate
);
2042 } else if (rv
== 0 &&
2043 md_feature_any_ppl_on(sb
->feature_map
)) {
2046 st
->ss
->getinfo_super(st
, &info
, NULL
);
2047 rv
= st
->ss
->write_init_ppl(st
, &info
, di
->fd
);
2057 pr_err("Failed to write metadata to %s\n", di
->devname
);
2062 static int compare_super1(struct supertype
*st
, struct supertype
*tst
,
2067 * 0 same, or first was empty, and second was copied
2068 * 1 second had wrong number
2070 * 3 wrong other info
2072 struct mdp_superblock_1
*first
= st
->sb
;
2073 struct mdp_superblock_1
*second
= tst
->sb
;
2075 if (second
->magic
!= __cpu_to_le32(MD_SB_MAGIC
))
2077 if (second
->major_version
!= __cpu_to_le32(1))
2081 if (posix_memalign((void **)&first
, 4096, SUPER1_SIZE
) != 0) {
2082 pr_err("could not allocate superblock\n");
2085 memcpy(first
, second
, SUPER1_SIZE
);
2089 if (memcmp(first
->set_uuid
, second
->set_uuid
, 16)!= 0)
2092 if (first
->ctime
!= second
->ctime
||
2093 first
->level
!= second
->level
||
2094 first
->layout
!= second
->layout
||
2095 first
->size
!= second
->size
||
2096 first
->chunksize
!= second
->chunksize
||
2097 first
->raid_disks
!= second
->raid_disks
)
2102 static int load_super1(struct supertype
*st
, int fd
, char *devname
)
2104 unsigned long long dsize
;
2105 unsigned long long sb_offset
;
2106 struct mdp_superblock_1
*super
;
2108 struct bitmap_super_s
*bsb
;
2109 struct misc_dev_info
*misc
;
2110 struct align_fd afd
;
2116 if (st
->ss
== NULL
|| st
->minor_version
== -1) {
2118 struct supertype tst
;
2119 __u64 bestctime
= 0;
2120 /* guess... choose latest ctime */
2121 memset(&tst
, 0, sizeof(tst
));
2123 for (tst
.minor_version
= 0; tst
.minor_version
<= 2;
2124 tst
.minor_version
++) {
2125 tst
.ignore_hw_compat
= st
->ignore_hw_compat
;
2126 switch(load_super1(&tst
, fd
, devname
)) {
2127 case 0: super
= tst
.sb
;
2128 if (bestvers
== -1 ||
2129 bestctime
< __le64_to_cpu(super
->ctime
)) {
2130 bestvers
= tst
.minor_version
;
2131 bestctime
= __le64_to_cpu(super
->ctime
);
2136 case 1: return 1; /*bad device */
2137 case 2: break; /* bad, try next */
2140 if (bestvers
!= -1) {
2142 tst
.minor_version
= bestvers
;
2144 tst
.max_devs
= MAX_DEVS
;
2145 rv
= load_super1(&tst
, fd
, devname
);
2152 if (!get_dev_size(fd
, devname
, &dsize
))
2158 pr_err("%s is too small for md: size is %llu sectors.\n",
2164 * Calculate the position of the superblock.
2165 * It is always aligned to a 4K boundary and
2166 * depending on minor_version, it can be:
2167 * 0: At least 8K, but less than 12K, from end of device
2168 * 1: At start of device
2169 * 2: 4K from start of device.
2171 switch(st
->minor_version
) {
2175 sb_offset
&= ~(4*2-1);
2187 if (lseek64(fd
, sb_offset
<< 9, 0)< 0LL) {
2189 pr_err("Cannot seek to superblock on %s: %s\n",
2190 devname
, strerror(errno
));
2194 if (posix_memalign((void **)&super
, 4096, SUPER1_SIZE
) != 0) {
2195 pr_err("could not allocate superblock\n");
2199 memset(super
, 0, SUPER1_SIZE
);
2201 if (aread(&afd
, super
, MAX_SB_SIZE
) != MAX_SB_SIZE
) {
2203 pr_err("Cannot read superblock on %s\n",
2209 if (__le32_to_cpu(super
->magic
) != MD_SB_MAGIC
) {
2211 pr_err("No super block found on %s (Expected magic %08x, got %08x)\n",
2212 devname
, MD_SB_MAGIC
,
2213 __le32_to_cpu(super
->magic
));
2218 if (__le32_to_cpu(super
->major_version
) != 1) {
2220 pr_err("Cannot interpret superblock on %s - version is %d\n",
2221 devname
, __le32_to_cpu(super
->major_version
));
2225 if (__le64_to_cpu(super
->super_offset
) != sb_offset
) {
2227 pr_err("No superblock found on %s (super_offset is wrong)\n",
2233 bsb
= (struct bitmap_super_s
*)(((char *)super
) + MAX_SB_SIZE
);
2235 misc
= (struct misc_dev_info
*)
2236 (((char *)super
) + MAX_SB_SIZE
+BM_SUPER_SIZE
);
2237 misc
->device_size
= dsize
;
2238 if (st
->data_offset
== INVALID_SECTORS
)
2239 st
->data_offset
= __le64_to_cpu(super
->data_offset
);
2241 if (st
->minor_version
>= 1 &&
2242 st
->ignore_hw_compat
== 0 &&
2243 ((role_from_sb(super
) != MD_DISK_ROLE_JOURNAL
&&
2244 dsize
< (__le64_to_cpu(super
->data_offset
) +
2245 __le64_to_cpu(super
->size
))) ||
2246 dsize
< (__le64_to_cpu(super
->data_offset
) +
2247 __le64_to_cpu(super
->data_size
)))) {
2249 pr_err("Device %s is not large enough for data described in superblock\n",
2256 /* Now check on the bitmap superblock */
2257 if ((__le32_to_cpu(super
->feature_map
)&MD_FEATURE_BITMAP_OFFSET
) == 0)
2259 /* Read the bitmap superblock and make sure it looks
2260 * valid. If it doesn't clear the bit. An --assemble --force
2261 * should get that written out.
2263 locate_bitmap1(st
, fd
, 0);
2264 if (aread(&afd
, bsb
, 512) != 512)
2267 uuid_from_super1(st
, uuid
);
2268 if (__le32_to_cpu(bsb
->magic
) != BITMAP_MAGIC
||
2269 memcmp(bsb
->uuid
, uuid
, 16) != 0)
2274 super
->feature_map
= __cpu_to_le32(__le32_to_cpu(super
->feature_map
) &
2275 ~MD_FEATURE_BITMAP_OFFSET
);
2279 static struct supertype
*match_metadata_desc1(char *arg
)
2281 struct supertype
*st
= xcalloc(1, sizeof(*st
));
2283 st
->container_devnm
[0] = 0;
2285 st
->max_devs
= MAX_DEVS
;
2287 st
->data_offset
= INVALID_SECTORS
;
2288 /* leading zeros can be safely ignored. --detail generates them. */
2291 if (strcmp(arg
, "1.0") == 0 || strcmp(arg
, "1.00") == 0) {
2292 st
->minor_version
= 0;
2295 if (strcmp(arg
, "1.1") == 0 || strcmp(arg
, "1.01") == 0
2297 st
->minor_version
= 1;
2300 if (strcmp(arg
, "1.2") == 0 ||
2301 #ifndef DEFAULT_OLD_METADATA /* ifdef in super0.c */
2302 strcmp(arg
, "default") == 0 ||
2303 #endif /* DEFAULT_OLD_METADATA */
2304 strcmp(arg
, "1.02") == 0) {
2305 st
->minor_version
= 2;
2308 if (strcmp(arg
, "1") == 0 || strcmp(arg
, "default") == 0) {
2309 st
->minor_version
= -1;
2317 /* find available size on device with this devsize, using
2318 * superblock type st, and reserving 'reserve' sectors for
2321 static __u64
avail_size1(struct supertype
*st
, __u64 devsize
,
2322 unsigned long long data_offset
)
2324 struct mdp_superblock_1
*super
= st
->sb
;
2330 if (__le32_to_cpu(super
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) {
2331 /* hot-add. allow for actual size of bitmap */
2332 struct bitmap_super_s
*bsb
;
2333 bsb
= (struct bitmap_super_s
*)(((char *)super
) + MAX_SB_SIZE
);
2334 bmspace
= calc_bitmap_size(bsb
, 4096) >> 9;
2335 } else if (md_feature_any_ppl_on(super
->feature_map
)) {
2336 bmspace
= __le16_to_cpu(super
->ppl
.size
);
2339 /* Allow space for bad block log */
2340 if (super
->bblog_size
)
2341 bbspace
= __le16_to_cpu(super
->bblog_size
);
2343 if (st
->minor_version
< 0)
2344 /* not specified, so time to set default */
2345 st
->minor_version
= 2;
2347 if (data_offset
== INVALID_SECTORS
)
2348 data_offset
= st
->data_offset
;
2350 if (data_offset
!= INVALID_SECTORS
)
2351 switch(st
->minor_version
) {
2353 return devsize
- data_offset
- 8*2 - bbspace
;
2356 return devsize
- data_offset
;
2363 switch(st
->minor_version
) {
2366 return ((devsize
- 8*2 - bbspace
) & ~(4*2-1));
2368 /* at start, 4K for superblock and possible bitmap */
2369 return devsize
- 4*2 - bbspace
;
2371 /* 4k from start, 4K for superblock and possible bitmap */
2372 return devsize
- (4+4)*2 - bbspace
;
2378 add_internal_bitmap1(struct supertype
*st
,
2379 int *chunkp
, int delay
, int write_behind
,
2380 unsigned long long size
,
2381 int may_change
, int major
)
2384 * If not may_change, then this is a 'Grow' without sysfs support for
2385 * bitmaps, and the bitmap must fit after the superblock at 1K offset.
2386 * If may_change, then this is create or a Grow with sysfs support,
2387 * and we can put the bitmap wherever we like.
2389 * size is in sectors, chunk is in bytes !!!
2392 unsigned long long bits
;
2393 unsigned long long max_bits
;
2394 unsigned long long min_chunk
;
2396 long bbl_offset
, bbl_size
;
2397 unsigned long long chunk
= *chunkp
;
2401 struct mdp_superblock_1
*sb
= st
->sb
;
2402 bitmap_super_t
*bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
2405 if (__le64_to_cpu(sb
->data_size
) == 0)
2407 * Must be creating the array, else data_size
2411 switch(st
->minor_version
) {
2414 * either 3K after the superblock (when hot-add),
2415 * or some amount of space before.
2419 * We are creating array, so we *know* how much room has
2425 choose_bm_space(__le64_to_cpu(sb
->size
)) + bbl_size
;
2427 room
= __le64_to_cpu(sb
->super_offset
)
2428 - __le64_to_cpu(sb
->data_offset
)
2429 - __le64_to_cpu(sb
->data_size
);
2430 bbl_size
= __le16_to_cpu(sb
->bblog_size
);
2433 bbl_offset
= (__s32
)__le32_to_cpu(sb
->bblog_offset
);
2434 if (bbl_size
< -bbl_offset
)
2435 bbl_size
= -bbl_offset
;
2438 (room
< 3*2 && __le32_to_cpu(sb
->max_dev
) <= 384)) {
2443 offset
= 0; /* means movable offset */
2448 case 2: /* between superblock and data */
2453 choose_bm_space(__le64_to_cpu(sb
->size
)) + bbl_size
;
2455 room
= __le64_to_cpu(sb
->data_offset
)
2456 - __le64_to_cpu(sb
->super_offset
);
2457 bbl_size
= __le16_to_cpu(sb
->bblog_size
);
2460 __le32_to_cpu(sb
->bblog_offset
) + bbl_size
;
2465 room
-= 2; /* Leave 1K for superblock */
2469 room
-= 4*2; /* leave 4K for superblock */
2479 if (chunk
== UnSet
&& room
> 128*2)
2480 /* Limit to 128K of bitmap when chunk size not requested */
2484 /* No room for a bitmap */
2487 max_bits
= (room
* 512 - sizeof(bitmap_super_t
)) * 8;
2489 min_chunk
= 4096; /* sub-page chunks don't work yet.. */
2490 bits
= (size
* 512) / min_chunk
+ 1;
2491 while (bits
> max_bits
) {
2493 bits
= (bits
+ 1) / 2;
2495 if (chunk
== UnSet
) {
2496 /* For practical purpose, 64Meg is a good
2497 * default chunk size for internal bitmaps.
2500 if (chunk
< 64*1024*1024)
2501 chunk
= 64*1024*1024;
2502 } else if (chunk
< min_chunk
)
2503 return -EINVAL
; /* chunk size too small */
2504 if (chunk
== 0) /* rounding problem */
2508 /* start bitmap on a 4K boundary with enough space for
2511 bits
= (size
* 512) / chunk
+ 1;
2512 room
= ((bits
+ 7) / 8 + sizeof(bitmap_super_t
) + 4095) / 4096;
2513 room
*= 8; /* convert 4K blocks to sectors */
2514 offset
= -room
- bbl_size
;
2517 sb
->bitmap_offset
= (int32_t)__cpu_to_le32(offset
);
2519 sb
->feature_map
= __cpu_to_le32(__le32_to_cpu(sb
->feature_map
) |
2520 MD_FEATURE_BITMAP_OFFSET
);
2521 memset(bms
, 0, sizeof(*bms
));
2522 bms
->magic
= __cpu_to_le32(BITMAP_MAGIC
);
2523 bms
->version
= __cpu_to_le32(major
);
2524 uuid_from_super1(st
, uuid
);
2525 memcpy(bms
->uuid
, uuid
, 16);
2526 bms
->chunksize
= __cpu_to_le32(chunk
);
2527 bms
->daemon_sleep
= __cpu_to_le32(delay
);
2528 bms
->sync_size
= __cpu_to_le64(size
);
2529 bms
->write_behind
= __cpu_to_le32(write_behind
);
2530 bms
->nodes
= __cpu_to_le32(st
->nodes
);
2532 sb
->feature_map
= __cpu_to_le32(__le32_to_cpu(sb
->feature_map
) |
2533 MD_FEATURE_CLUSTERED
);
2534 if (st
->cluster_name
) {
2535 len
= sizeof(bms
->cluster_name
);
2536 strncpy((char *)bms
->cluster_name
, st
->cluster_name
, len
);
2537 bms
->cluster_name
[len
- 1] = '\0';
2544 static int locate_bitmap1(struct supertype
*st
, int fd
, int node_num
)
2546 unsigned long long offset
, bm_sectors_per_node
;
2547 struct mdp_superblock_1
*sb
;
2548 bitmap_super_t
*bms
;
2553 if (st
->ss
->load_super(st
, fd
, NULL
))
2554 return -1; /* no error I hope... */
2559 if ((__le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
))
2564 offset
= __le64_to_cpu(sb
->super_offset
) + (int32_t)__le32_to_cpu(sb
->bitmap_offset
);
2566 bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
2567 bm_sectors_per_node
= calc_bitmap_size(bms
, 4096) >> 9;
2568 offset
+= bm_sectors_per_node
* node_num
;
2572 if (lseek64(fd
, offset
<<9, 0) < 0) {
2573 pr_err("lseek fails\n");
2579 static int write_bitmap1(struct supertype
*st
, int fd
, enum bitmap_update update
)
2581 struct mdp_superblock_1
*sb
= st
->sb
;
2582 bitmap_super_t
*bms
= (bitmap_super_t
*)(((char *)sb
) + MAX_SB_SIZE
);
2585 int towrite
, n
, len
;
2586 struct align_fd afd
;
2588 unsigned long long total_bm_space
, bm_space_per_node
;
2592 /* update cluster name */
2593 if (st
->cluster_name
) {
2594 len
= sizeof(bms
->cluster_name
);
2595 memset((char *)bms
->cluster_name
, 0, len
);
2596 strncpy((char *)bms
->cluster_name
,
2597 st
->cluster_name
, len
);
2598 bms
->cluster_name
[len
- 1] = '\0';
2602 /* cluster md only supports superblock 1.2 now */
2603 if (st
->minor_version
!= 2 &&
2604 bms
->version
== BITMAP_MAJOR_CLUSTERED
) {
2605 pr_err("Warning: cluster md only works with superblock 1.2\n");
2609 if (bms
->version
== BITMAP_MAJOR_CLUSTERED
) {
2610 if (st
->nodes
== 1) {
2611 /* the parameter for nodes is not valid */
2612 pr_err("Warning: cluster-md at least needs two nodes\n");
2614 } else if (st
->nodes
== 0) {
2616 * parameter "--nodes" is not specified, (eg, add a disk to
2620 } else if (__cpu_to_le32(st
->nodes
) < bms
->nodes
) {
2622 * Since the nodes num is not increased, no
2623 * need to check the space enough or not,
2624 * just update bms->nodes
2626 bms
->nodes
= __cpu_to_le32(st
->nodes
);
2631 * no need to change bms->nodes for other
2635 pr_err("Warning: --nodes option is only suitable for clustered bitmap\n");
2640 * Each node has an independent bitmap, it is necessary to
2641 * calculate the space is enough or not, first get how many
2642 * bytes for the total bitmap
2644 bm_space_per_node
= calc_bitmap_size(bms
, 4096);
2646 total_bm_space
= 512 * (__le64_to_cpu(sb
->data_offset
) -
2647 __le64_to_cpu(sb
->super_offset
));
2648 /* leave another 4k for superblock */
2649 total_bm_space
= total_bm_space
- 4096;
2651 if (bm_space_per_node
* st
->nodes
> total_bm_space
) {
2652 pr_err("Warning: The max num of nodes can't exceed %llu\n",
2653 total_bm_space
/ bm_space_per_node
);
2657 bms
->nodes
= __cpu_to_le32(st
->nodes
);
2666 if (locate_bitmap1(st
, fd
, 0) < 0) {
2667 pr_err("Error: Invalid bitmap\n");
2671 if (posix_memalign(&buf
, 4096, 4096))
2675 /* Only the bitmap[0] should resync
2676 * whole device on initial assembly
2679 memset(buf
, 0x00, 4096);
2681 memset(buf
, 0xff, 4096);
2682 memcpy(buf
, (char *)bms
, sizeof(bitmap_super_t
));
2685 * use 4096 boundary if bitmap_offset is aligned
2686 * with 8 sectors, then it should compatible with
2689 if (__le32_to_cpu(sb
->bitmap_offset
) & 7)
2690 towrite
= calc_bitmap_size(bms
, 512);
2692 towrite
= calc_bitmap_size(bms
, 4096);
2693 while (towrite
> 0) {
2697 n
= awrite(&afd
, buf
, n
);
2703 memset(buf
, 0x00, 4096);
2705 memset(buf
, 0xff, 4096);
2712 } while (++i
< __le32_to_cpu(bms
->nodes
));
2718 static void free_super1(struct supertype
*st
)
2724 struct devinfo
*di
= st
->info
;
2725 st
->info
= di
->next
;
2733 static int validate_geometry1(struct supertype
*st
, int level
,
2734 int layout
, int raiddisks
,
2735 int *chunk
, unsigned long long size
,
2736 unsigned long long data_offset
,
2737 char *subdev
, unsigned long long *freesize
,
2738 int consistency_policy
, int verbose
)
2740 unsigned long long ldsize
, devsize
;
2742 unsigned long long headroom
;
2743 unsigned long long overhead
;
2746 if (is_container(level
)) {
2748 pr_err("1.x metadata does not support containers\n");
2751 if (*chunk
== UnSet
)
2752 *chunk
= DEFAULT_CHUNK
;
2757 if (st
->minor_version
< 0)
2758 /* not specified, so time to set default */
2759 st
->minor_version
= 2;
2761 fd
= open(subdev
, O_RDONLY
|O_EXCL
, 0);
2764 pr_err("super1.x cannot open %s: %s\n",
2765 subdev
, strerror(errno
));
2769 if (!get_dev_size(fd
, subdev
, &ldsize
)) {
2775 devsize
= ldsize
>> 9;
2777 /* creating: allow suitable space for bitmap or PPL */
2778 if (consistency_policy
== CONSISTENCY_POLICY_PPL
)
2779 bmspace
= MULTIPLE_PPL_AREA_SIZE_SUPER1
>> 9;
2781 bmspace
= choose_bm_space(devsize
);
2783 if (data_offset
== INVALID_SECTORS
)
2784 data_offset
= st
->data_offset
;
2785 if (data_offset
== INVALID_SECTORS
)
2786 switch (st
->minor_version
) {
2792 /* Choose data offset appropriate for this device
2793 * and use as default for whole array.
2794 * The data_offset must allow for bitmap space
2795 * and base metadata, should allow for some headroom
2796 * for reshape, and should be rounded to multiple
2798 * Headroom is limited to 128M, but aim for about 0.1%
2800 headroom
= 128*1024*2;
2801 while ((headroom
<< 10) > devsize
&&
2803 headroom
/ 2 >= ((unsigned)(*chunk
)*2)*2))
2805 data_offset
= 12*2 + bmspace
+ headroom
;
2806 #define ONE_MEG (2*1024)
2807 data_offset
= ROUND_UP(data_offset
, ONE_MEG
);
2810 if (st
->data_offset
== INVALID_SECTORS
)
2811 st
->data_offset
= data_offset
;
2812 switch(st
->minor_version
) {
2813 case 0: /* metadata at end. Round down and subtract space to reserve */
2814 devsize
= (devsize
& ~(4ULL*2-1));
2815 /* space for metadata, bblog, bitmap/ppl */
2816 overhead
= 8*2 + 8 + bmspace
;
2817 if (devsize
< overhead
) /* detect underflow */
2818 goto dev_too_small_err
;
2819 devsize
-= overhead
;
2823 if (devsize
< data_offset
) /* detect underflow */
2824 goto dev_too_small_err
;
2825 devsize
-= data_offset
;
2828 *freesize
= devsize
;
2831 /* Error condition, device cannot even hold the overhead. */
2833 fprintf(stderr
, "device %s is too small (%lluK) for "
2834 "required metadata!\n", subdev
, devsize
>>1);
2839 void *super1_make_v0(struct supertype
*st
, struct mdinfo
*info
, mdp_super_t
*sb0
)
2841 /* Create a v1.0 superblock based on 'info'*/
2843 struct mdp_superblock_1
*sb
;
2845 unsigned long long offset
;
2847 if (posix_memalign(&ret
, 4096, 1024) != 0)
2850 memset(ret
, 0, 1024);
2851 sb
->magic
= __cpu_to_le32(MD_SB_MAGIC
);
2852 sb
->major_version
= __cpu_to_le32(1);
2854 copy_uuid(sb
->set_uuid
, info
->uuid
, super1
.swapuuid
);
2855 sprintf(sb
->set_name
, "%d", sb0
->md_minor
);
2856 sb
->ctime
= __cpu_to_le32(info
->array
.ctime
+ 1);
2857 sb
->level
= __cpu_to_le32(info
->array
.level
);
2858 sb
->layout
= __cpu_to_le32(info
->array
.layout
);
2859 sb
->size
= __cpu_to_le64(info
->component_size
);
2860 sb
->chunksize
= __cpu_to_le32(info
->array
.chunk_size
/ 512);
2861 sb
->raid_disks
= __cpu_to_le32(info
->array
.raid_disks
);
2862 if (info
->array
.level
> 0)
2863 sb
->data_size
= sb
->size
;
2865 sb
->data_size
= st
->ss
->avail_size(st
, st
->devsize
/ 512, 0);
2866 sb
->resync_offset
= MaxSector
;
2867 sb
->max_dev
= __cpu_to_le32(MD_SB_DISKS
);
2868 sb
->dev_number
= __cpu_to_le32(info
->disk
.number
);
2869 sb
->utime
= __cpu_to_le64(info
->array
.utime
);
2871 offset
= st
->devsize
/512 - 8*2;
2873 sb
->super_offset
= __cpu_to_le64(offset
);
2874 //*(__u64*)(st->other + 128 + 8 + 8) = __cpu_to_le64(offset);
2876 random_uuid(sb
->device_uuid
);
2878 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
2879 int state
= sb0
->disks
[i
].state
;
2880 sb
->dev_roles
[i
] = MD_DISK_ROLE_SPARE
;
2881 if ((state
& (1<<MD_DISK_SYNC
)) &&
2882 !(state
& (1<<MD_DISK_FAULTY
)))
2883 sb
->dev_roles
[i
] = __cpu_to_le16(sb0
->disks
[i
].raid_disk
);
2885 sb
->sb_csum
= calc_sb_1_csum(sb
);
2889 struct superswitch super1
= {
2890 .examine_super
= examine_super1
,
2891 .brief_examine_super
= brief_examine_super1
,
2892 .export_examine_super
= export_examine_super1
,
2893 .detail_super
= detail_super1
,
2894 .brief_detail_super
= brief_detail_super1
,
2895 .export_detail_super
= export_detail_super1
,
2896 .write_init_super
= write_init_super1
,
2897 .validate_geometry
= validate_geometry1
,
2898 .add_to_super
= add_to_super1
,
2899 .examine_badblocks
= examine_badblocks_super1
,
2900 .copy_metadata
= copy_metadata1
,
2901 .write_init_ppl
= write_init_ppl1
,
2902 .match_home
= match_home1
,
2903 .uuid_from_super
= uuid_from_super1
,
2904 .getinfo_super
= getinfo_super1
,
2905 .container_content
= container_content1
,
2906 .update_super
= update_super1
,
2907 .init_super
= init_super1
,
2908 .store_super
= store_super1
,
2909 .compare_super
= compare_super1
,
2910 .load_super
= load_super1
,
2911 .match_metadata_desc
= match_metadata_desc1
,
2912 .avail_size
= avail_size1
,
2913 .add_internal_bitmap
= add_internal_bitmap1
,
2914 .locate_bitmap
= locate_bitmap1
,
2915 .write_bitmap
= write_bitmap1
,
2916 .free_super
= free_super1
,
2917 #if __BYTE_ORDER == BIG_ENDIAN