__u32 delta_disks; /* change in number of raid_disks */
__u32 new_layout; /* new layout */
__u32 new_chunk; /* new chunk size (bytes) */
- __u8 pad1[128-124]; /* set to 0 when written */
+ __u32 new_offset; /* signed number to add to data_offset in new
+ * layout. 0 == no-change. This can be
+ * different on each device in the array.
+ */
/* constant this-device information - 64 bytes */
__u64 data_offset; /* sector start of data, often 0 */
__u32 dev_number; /* permanent identifier of this device - not role in raid */
__u32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */
- __u8 devflags; /* per-device flags. Only one defined...*/
+ __u8 devflags; /* per-device flags. Only one defined...*/
#define WriteMostly1 1 /* mask for writemostly flag in above */
- __u8 pad2[64-57]; /* set to 0 when writing */
+ /* bad block log. If there are any bad blocks the feature flag is set.
+ * if offset and size are non-zero, that space is reserved and available.
+ */
+ __u8 bblog_shift; /* shift from sectors to block size for badblocklist */
+ __u16 bblog_size; /* number of sectors reserved for badblocklist */
+ __u32 bblog_offset; /* sector offset from superblock to bblog, signed */
/* array state information - 64 bytes */
__u64 utime; /* 40 bits second, 24 btes microseconds */
* must be honoured
*/
#define MD_FEATURE_RESHAPE_ACTIVE 4
-
-#define MD_FEATURE_ALL (1|2|4)
+#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
+#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
+ * active device with same 'role'.
+ * 'recovery_offset' is also set.
+ */
+#define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number
+ * of devices, but is going
+ * backwards anyway.
+ */
+#define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */
+#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
+ |MD_FEATURE_RECOVERY_OFFSET \
+ |MD_FEATURE_RESHAPE_ACTIVE \
+ |MD_FEATURE_BAD_BLOCKS \
+ |MD_FEATURE_REPLACEMENT \
+ |MD_FEATURE_RESHAPE_BACKWARDS \
+ |MD_FEATURE_NEW_OFFSET \
+ )
#ifndef offsetof
#define offsetof(t,f) ((size_t)&(((t*)0)->f))
if (sb->data_offset)
printf(" Data Offset : %llu sectors\n",
(unsigned long long)__le64_to_cpu(sb->data_offset));
+ if (sb->new_offset) {
+ unsigned long long offset = __le64_to_cpu(sb->data_offset);
+ offset += (signed)(int32_t)__le32_to_cpu(sb->new_offset);
+ printf(" New Offset : %llu sectors\n", offset);
+ }
printf(" Super Offset : %llu sectors\n",
(unsigned long long)__le64_to_cpu(sb->super_offset));
if (__le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)
printf("Internal Bitmap : %ld sectors from superblock\n",
(long)(int32_t)__le32_to_cpu(sb->bitmap_offset));
}
- if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE)) {
+ if (sb->feature_map & __cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE)) {
printf(" Reshape pos'n : %llu%s\n", (unsigned long long)__le64_to_cpu(sb->reshape_position)/2,
human_size(__le64_to_cpu(sb->reshape_position)<<9));
if (__le32_to_cpu(sb->delta_disks)) {
atime = __le64_to_cpu(sb->utime) & 0xFFFFFFFFFFULL;
printf(" Update Time : %.24s\n", ctime(&atime));
+ if (sb->bblog_size && sb->bblog_offset) {
+ printf(" Bad Block Log : %d entries available at offset %ld sectors",
+ __le16_to_cpu(sb->bblog_size)*512/8,
+ (long)__le32_to_cpu(sb->bblog_offset));
+ if (sb->feature_map &
+ __cpu_to_le32(MD_FEATURE_BAD_BLOCKS))
+ printf(" - bad blocks present.");
+ printf("\n");
+ }
+
+
if (calc_sb_1_csum(sb) == sb->sb_csum)
printf(" Checksum : %x - correct\n", __le32_to_cpu(sb->sb_csum));
else
else
nm = NULL;
- printf("ARRAY%s%s", nm ? " /dev/md/":"", nm);
+ printf("ARRAY ");
+ if (nm) {
+ printf("/dev/md/");
+ print_escape(nm);
+ putchar(' ');
+ }
if (verbose && c)
printf(" level=%s", c);
sb_offset = __le64_to_cpu(sb->super_offset);
if ((i&3)==0 && i != 0) printf(":");
printf("%02x", sb->set_uuid[i]);
}
- if (sb->set_name[0])
- printf(" name=%.32s", sb->set_name);
+ if (sb->set_name[0]) {
+ printf(" name=");
+ print_quoted(sb->set_name);
+ }
printf("\n");
}
struct mdp_superblock_1 *sb = st->sb;
int i;
int len = 32;
+ int layout;
printf("MD_LEVEL=%s\n", map_num(pers, __le32_to_cpu(sb->level)));
printf("MD_DEVICES=%d\n", __le32_to_cpu(sb->raid_disks));
}
if (len)
printf("MD_NAME=%.*s\n", len, sb->set_name);
+ if (__le32_to_cpu(sb->level) > 0) {
+ int ddsks = 0, ddsks_denom = 1;
+ switch(__le32_to_cpu(sb->level)) {
+ case 1: ddsks=1;break;
+ case 4:
+ case 5: ddsks = __le32_to_cpu(sb->raid_disks)-1; break;
+ case 6: ddsks = __le32_to_cpu(sb->raid_disks)-2; break;
+ case 10:
+ layout = __le32_to_cpu(sb->layout);
+ ddsks = __le32_to_cpu(sb->raid_disks);
+ ddsks_denom = (layout&255) * ((layout>>8)&255);
+ }
+ if (ddsks) {
+ long long asize = __le64_to_cpu(sb->size);
+ asize = (asize << 9) * ddsks / ddsks_denom;
+ printf("MD_ARRAY_SIZE=%s\n",human_size_brief(asize,JEDEC));
+ }
+ }
printf("MD_UUID=");
for (i=0; i<16; i++) {
if ((i&3)==0 && i != 0) printf(":");
struct mdp_superblock_1 *sb = st->sb;
int i;
- if (sb->set_name[0])
- printf(" name=%.32s", sb->set_name);
+ if (sb->set_name[0]) {
+ printf(" name=");
+ print_quoted(sb->set_name);
+ }
printf(" UUID=");
for (i=0; i<16; i++) {
if ((i&3)==0 && i != 0) printf(":");
static void getinfo_super1(struct supertype *st, struct mdinfo *info, char *map)
{
struct mdp_superblock_1 *sb = st->sb;
+ struct bitmap_super_s *bsb = (void*)(((char*)sb)+MAX_SB_SIZE);
+ struct misc_dev_info *misc = (void*)(((char*)sb)+MAX_SB_SIZE+BM_SUPER_SIZE);
int working = 0;
unsigned int i;
unsigned int role;
unsigned int map_disks = info->array.raid_disks;
+ unsigned long long super_offset;
+ unsigned long long data_size;
memset(info, 0, sizeof(*info));
info->array.major_version = 1;
else
role = __le16_to_cpu(sb->dev_roles[__le32_to_cpu(sb->dev_number)]);
+ super_offset = __le64_to_cpu(sb->super_offset);
+ data_size = __le64_to_cpu(sb->size);
+ if (info->data_offset < super_offset) {
+ unsigned long long end;
+ info->space_before = info->data_offset;
+ end = super_offset;
+ if (info->bitmap_offset < 0)
+ end += info->bitmap_offset;
+ if (info->data_offset + data_size < end)
+ info->space_after = end - data_size - info->data_offset;
+ else
+ info->space_after = 0;
+ } else {
+ info->space_before = (info->data_offset -
+ super_offset);
+ if (info->bitmap_offset > 0) {
+ unsigned long long bmend = info->bitmap_offset;
+ unsigned long long size = __le64_to_cpu(bsb->sync_size);
+ size /= __le32_to_cpu(bsb->chunksize) >> 9;
+ size = (size + 7) >> 3;
+ size += sizeof(bitmap_super_t);
+ size = ROUND_UP(size, 4096);
+ size /= 512;
+ size += bmend;
+ if (size < info->space_before)
+ info->space_before -= size;
+ else
+ info->space_before = 0;
+ } else
+ info->space_before -= 8; /* superblock */
+ info->space_after = misc->device_size - data_size - info->data_offset;
+ }
+
info->disk.raid_disk = -1;
switch(role) {
case 0xFFFF:
if (sb->feature_map & __le32_to_cpu(MD_FEATURE_RESHAPE_ACTIVE)) {
info->reshape_active = 1;
+ if (info->array.level == 10)
+ info->reshape_active |= RESHAPE_NO_BACKUP;
info->reshape_progress = __le64_to_cpu(sb->reshape_position);
info->new_level = __le32_to_cpu(sb->new_level);
info->delta_disks = __le32_to_cpu(sb->delta_disks);
if (subarray)
return NULL;
- info = malloc(sizeof(*info));
+ info = xmalloc(sizeof(*info));
getinfo_super1(st, info, NULL);
return info;
}
}
} else if (strcmp(update, "no-bitmap") == 0) {
sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
+ } else if (strcmp(update, "bbl") == 0) {
+ /* only possible if there is room after the bitmap, or if
+ * there is no bitmap
+ */
+ unsigned long long sb_offset = __le64_to_cpu(sb->super_offset);
+ unsigned long long data_offset = __le64_to_cpu(sb->data_offset);
+ long bitmap_offset = (long)__le64_to_cpu(sb->bitmap_offset);
+ long bm_sectors = 0;
+ long space;
+
+ if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
+ struct bitmap_super_s *bsb;
+ bsb = (struct bitmap_super_s *)(((char*)sb)+MAX_SB_SIZE);
+ bm_sectors = bitmap_sectors(bsb);
+ }
+
+ if (sb_offset < data_offset) {
+ /* 1.1 or 1.2. Put bbl just before data
+ */
+ long bb_offset;
+ space = data_offset - sb_offset;
+ bb_offset = space - 8;
+ if (bm_sectors && bitmap_offset > 0)
+ space -= (bitmap_offset + bm_sectors);
+ else
+ space -= 8; /* The superblock */
+ if (space >= 8) {
+ sb->bblog_size = __cpu_to_le16(8);
+ sb->bblog_offset = __cpu_to_le32(bb_offset);
+ }
+ } else {
+ /* 1.0 - Put bbl just before super block */
+ if (bm_sectors && bitmap_offset < 0)
+ space = -bitmap_offset - bm_sectors;
+ else
+ space = sb_offset - data_offset -
+ __le64_to_cpu(sb->data_size);
+ if (space >= 8) {
+ sb->bblog_size = __cpu_to_le16(8);
+ sb->bblog_offset = __cpu_to_le32((unsigned)-8);
+ }
+ }
+ } else if (strcmp(update, "no-bbl") == 0) {
+ if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BAD_BLOCKS))
+ pr_err("Cannot remove active bbl from %s\n",devname);
+ else {
+ sb->bblog_size = 0;
+ sb->bblog_shift = 0;
+ sb->bblog_offset = 0;
+ }
} else if (strcmp(update, "homehost") == 0 &&
homehost) {
char *c;
}
static int init_super1(struct supertype *st, mdu_array_info_t *info,
- unsigned long long size, char *name, char *homehost, int *uuid)
+ unsigned long long size, char *name, char *homehost,
+ int *uuid, unsigned long long data_offset)
{
struct mdp_superblock_1 *sb;
int spares;
int sbsize;
if (posix_memalign((void**)&sb, 4096, SUPER1_SIZE) != 0) {
- fprintf(stderr, Name
- ": %s could not allocate superblock\n", __func__);
+ pr_err("%s could not allocate superblock\n", __func__);
return 0;
}
memset(sb, 0, SUPER1_SIZE);
spares = info->working_disks - info->active_disks;
if (info->raid_disks + spares > MAX_DEVS) {
- fprintf(stderr, Name ": too many devices requested: %d+%d > %d\n",
+ pr_err("too many devices requested: %d+%d > %d\n",
info->raid_disks , spares, MAX_DEVS);
return 0;
}
sb->chunksize = __cpu_to_le32(info->chunk_size>>9);
sb->raid_disks = __cpu_to_le32(info->raid_disks);
- sb->data_offset = __cpu_to_le64(0);
+ sb->data_offset = __cpu_to_le64(data_offset);
sb->data_size = __cpu_to_le64(0);
sb->super_offset = __cpu_to_le64(0);
sb->recovery_offset = __cpu_to_le64(0);
struct devinfo {
int fd;
char *devname;
+ long long data_offset;
mdu_disk_info_t disk;
struct devinfo *next;
};
#ifndef MDASSEMBLE
/* Add a device to the superblock being created */
static int add_to_super1(struct supertype *st, mdu_disk_info_t *dk,
- int fd, char *devname)
+ int fd, char *devname, unsigned long long data_offset)
{
struct mdp_superblock_1 *sb = st->sb;
__u16 *rp = sb->dev_roles + dk->number;
dip = (struct devinfo **)&st->info;
while (*dip)
dip = &(*dip)->next;
- di = malloc(sizeof(struct devinfo));
+ di = xmalloc(sizeof(struct devinfo));
di->fd = fd;
di->devname = devname;
di->disk = *dk;
+ di->data_offset = data_offset;
di->next = NULL;
*dip = di;
if (sb_offset != __le64_to_cpu(sb->super_offset) &&
0 != __le64_to_cpu(sb->super_offset)
) {
- fprintf(stderr, Name ": internal error - sb_offset is wrong\n");
+ pr_err("internal error - sb_offset is wrong\n");
abort();
}
struct devinfo *di;
unsigned long long dsize, array_size;
unsigned long long sb_offset, headroom;
+ unsigned long long data_offset;
- for (di = st->info; di && ! rv ; di = di->next) {
+ for (di = st->info; di; di = di->next) {
if (di->disk.state == 1)
continue;
if (di->fd < 0)
continue;
- while (Kill(di->devname, NULL, 0, 1, 1) == 0)
+ while (Kill(di->devname, NULL, 0, -1, 1) == 0)
;
sb->dev_number = __cpu_to_le32(di->disk.number);
* 2: 4K from start of device.
* Depending on the array size, we might leave extra space
* for a bitmap.
+ * Also leave 4K for bad-block log.
*/
array_size = __le64_to_cpu(sb->size);
- /* work out how much space we left for a bitmap */
- bm_space = choose_bm_space(array_size);
+ /* work out how much space we left for a bitmap,
+ * Add 8 sectors for bad block log */
+ bm_space = choose_bm_space(array_size) + 8;
/* We try to leave 0.1% at the start for reshape
* operations, but limit this to 128Meg (0.1% of 10Gig)
* which is plenty for efficient reshapes
+ * However we make it at least 2 chunks as one chunk
+ * is minimum needed for reshape.
*/
headroom = 128 * 1024 * 2;
- while (headroom << 10 > array_size)
+ while (headroom << 10 > array_size &&
+ headroom/2 >= __le32_to_cpu(sb->chunksize) * 2)
headroom >>= 1;
+ data_offset = di->data_offset;
switch(st->minor_version) {
case 0:
sb_offset = dsize;
sb_offset -= 8*2;
sb_offset &= ~(4*2-1);
sb->super_offset = __cpu_to_le64(sb_offset);
- sb->data_offset = __cpu_to_le64(0);
+ if (data_offset == INVALID_SECTORS)
+ sb->data_offset = 0;
if (sb_offset < array_size + bm_space)
bm_space = sb_offset - array_size;
sb->data_size = __cpu_to_le64(sb_offset - bm_space);
+ if (bm_space >= 8) {
+ sb->bblog_size = __cpu_to_le16(8);
+ sb->bblog_offset = __cpu_to_le32((unsigned)-8);
+ }
break;
case 1:
sb->super_offset = __cpu_to_le64(0);
- reserved = bm_space + 4*2;
- /* Try for multiple of 1Meg so it is nicely aligned */
- #define ONE_MEG (2*1024)
- reserved = ((reserved + ONE_MEG-1)/ONE_MEG) * ONE_MEG;
- if (reserved + __le64_to_cpu(sb->size) > dsize)
- reserved = dsize - __le64_to_cpu(sb->size);
- /* force 4K alignment */
- reserved &= ~7ULL;
-
- if (reserved < headroom)
- reserved = headroom;
+ if (data_offset == INVALID_SECTORS) {
+ reserved = bm_space + 4*2;
+ if (reserved < headroom)
+ reserved = headroom;
+ if (reserved + array_size > dsize)
+ reserved = dsize - array_size;
+ /* Try for multiple of 1Meg so it is nicely aligned */
+ #define ONE_MEG (2*1024)
+ if (reserved > ONE_MEG)
+ reserved = (reserved/ONE_MEG) * ONE_MEG;
+
+ /* force 4K alignment */
+ reserved &= ~7ULL;
+
+ } else
+ reserved = data_offset;
sb->data_offset = __cpu_to_le64(reserved);
sb->data_size = __cpu_to_le64(dsize - reserved);
+ if (reserved >= 16) {
+ sb->bblog_size = __cpu_to_le16(8);
+ sb->bblog_offset = __cpu_to_le32(reserved-8);
+ }
break;
case 2:
sb_offset = 4*2;
sb->super_offset = __cpu_to_le64(4*2);
- if (4*2 + 4*2 + bm_space + __le64_to_cpu(sb->size)
- > dsize)
- bm_space = dsize - __le64_to_cpu(sb->size)
- - 4*2 - 4*2;
-
- reserved = bm_space + 4*2 + 4*2;
- /* Try for multiple of 1Meg so it is nicely aligned */
- #define ONE_MEG (2*1024)
- reserved = ((reserved + ONE_MEG-1)/ONE_MEG) * ONE_MEG;
- if (reserved + __le64_to_cpu(sb->size) > dsize)
- reserved = dsize - __le64_to_cpu(sb->size);
- /* force 4K alignment */
- reserved &= ~7ULL;
-
- if (reserved < headroom)
- reserved = headroom;
+ if (data_offset == INVALID_SECTORS) {
+ if (4*2 + 4*2 + bm_space + array_size
+ > dsize)
+ bm_space = dsize - array_size
+ - 4*2 - 4*2;
+
+ reserved = bm_space + 4*2 + 4*2;
+ if (reserved < headroom)
+ reserved = headroom;
+ if (reserved + array_size > dsize)
+ reserved = dsize - array_size;
+ /* Try for multiple of 1Meg so it is nicely aligned */
+ #define ONE_MEG (2*1024)
+ if (reserved > ONE_MEG)
+ reserved = (reserved/ONE_MEG) * ONE_MEG;
+
+ /* force 4K alignment */
+ reserved &= ~7ULL;
+
+ } else
+ reserved = data_offset;
sb->data_offset = __cpu_to_le64(reserved);
sb->data_size = __cpu_to_le64(dsize - reserved);
+ if (reserved >= 16+16) {
+ sb->bblog_size = __cpu_to_le16(8);
+ /* '8' sectors for the bblog, and another '8'
+ * because we want offset from superblock, not
+ * start of device.
+ */
+ sb->bblog_offset = __cpu_to_le32(reserved-8-8);
+ }
break;
default:
- fprintf(stderr, Name ": Failed to write invalid "
- "metadata format 1.%i to %s\n",
- st->minor_version, di->devname);
+ pr_err("Failed to write invalid "
+ "metadata format 1.%i to %s\n",
+ st->minor_version, di->devname);
rv = -EINVAL;
goto out;
}
-
sb->sb_csum = calc_sb_1_csum(sb);
rv = store_super1(st, di->fd);
if (rv == 0 && (__le32_to_cpu(sb->feature_map) & 1))
rv = st->ss->write_bitmap(st, di->fd);
close(di->fd);
di->fd = -1;
+ if (rv)
+ goto error_out;
}
error_out:
if (rv)
- fprintf(stderr, Name ": Failed to write metadata to %s\n",
- di->devname);
+ pr_err("Failed to write metadata to %s\n",
+ di->devname);
out:
return rv;
}
if (!first) {
if (posix_memalign((void**)&first, 4096, SUPER1_SIZE) != 0) {
- fprintf(stderr, Name
- ": %s could not allocate superblock\n", __func__);
+ pr_err("%s could not allocate superblock\n", __func__);
return 1;
}
memcpy(first, second, SUPER1_SIZE);
if (dsize < 24) {
if (devname)
- fprintf(stderr, Name ": %s is too small for md: size is %llu sectors.\n",
+ pr_err("%s is too small for md: size is %llu sectors.\n",
devname, dsize);
return 1;
}
if (lseek64(fd, sb_offset << 9, 0)< 0LL) {
if (devname)
- fprintf(stderr, Name ": Cannot seek to superblock on %s: %s\n",
+ pr_err("Cannot seek to superblock on %s: %s\n",
devname, strerror(errno));
return 1;
}
if (posix_memalign((void**)&super, 4096, SUPER1_SIZE) != 0) {
- fprintf(stderr, Name ": %s could not allocate superblock\n",
+ pr_err("%s could not allocate superblock\n",
__func__);
return 1;
}
if (aread(&afd, super, MAX_SB_SIZE) != MAX_SB_SIZE) {
if (devname)
- fprintf(stderr, Name ": Cannot read superblock on %s\n",
+ pr_err("Cannot read superblock on %s\n",
devname);
free(super);
return 1;
if (__le32_to_cpu(super->magic) != MD_SB_MAGIC) {
if (devname)
- fprintf(stderr, Name ": No super block found on %s (Expected magic %08x, got %08x)\n",
+ pr_err("No super block found on %s (Expected magic %08x, got %08x)\n",
devname, MD_SB_MAGIC, __le32_to_cpu(super->magic));
free(super);
return 2;
if (__le32_to_cpu(super->major_version) != 1) {
if (devname)
- fprintf(stderr, Name ": Cannot interpret superblock on %s - version is %d\n",
+ pr_err("Cannot interpret superblock on %s - version is %d\n",
devname, __le32_to_cpu(super->major_version));
free(super);
return 2;
}
if (__le64_to_cpu(super->super_offset) != sb_offset) {
if (devname)
- fprintf(stderr, Name ": No superblock found on %s (super_offset is wrong)\n",
+ pr_err("No superblock found on %s (super_offset is wrong)\n",
devname);
free(super);
return 2;
static struct supertype *match_metadata_desc1(char *arg)
{
- struct supertype *st = calloc(1, sizeof(*st));
- if (!st)
- return st;
+ struct supertype *st = xcalloc(1, sizeof(*st));
st->container_dev = NoMdDev;
st->ss = &super1;
* superblock type st, and reserving 'reserve' sectors for
* a possible bitmap
*/
-static __u64 avail_size1(struct supertype *st, __u64 devsize)
+static __u64 _avail_size1(struct supertype *st, __u64 devsize,
+ unsigned long long data_offset, int chunksize)
{
struct mdp_superblock_1 *super = st->sb;
+ int bmspace = 0;
if (devsize < 24)
return 0;
if (super == NULL)
/* creating: allow suitable space for bitmap */
- devsize -= choose_bm_space(devsize);
+ bmspace = choose_bm_space(devsize);
#ifndef MDASSEMBLE
else if (__le32_to_cpu(super->feature_map)&MD_FEATURE_BITMAP_OFFSET) {
/* hot-add. allow for actual size of bitmap */
struct bitmap_super_s *bsb;
bsb = (struct bitmap_super_s *)(((char*)super)+MAX_SB_SIZE);
- devsize -= bitmap_sectors(bsb);
+ bmspace = bitmap_sectors(bsb);
}
#endif
+ /* Allow space for bad block log */
+ if (super && super->bblog_size)
+ devsize -= __le16_to_cpu(super->bblog_size);
+ else
+ devsize -= 8;
+
if (st->minor_version < 0)
/* not specified, so time to set default */
st->minor_version = 2;
+
+ if (data_offset != INVALID_SECTORS)
+ switch(st->minor_version) {
+ case 0:
+ return devsize - data_offset - 8*2;
+ case 1:
+ case 2:
+ return devsize - data_offset;
+ default:
+ return 0;
+ }
+
+ devsize -= bmspace;
+
if (super == NULL && st->minor_version > 0) {
/* haven't committed to a size yet, so allow some
* slack for space for reshape.
* Limit slack to 128M, but aim for about 0.1%
*/
unsigned long long headroom = 128*1024*2;
- while ((headroom << 10) > devsize)
+ while ((headroom << 10) > devsize &&
+ (chunksize == 0 ||
+ headroom / 2 >= ((unsigned)chunksize*2)*2))
headroom >>= 1;
devsize -= headroom;
}
}
return 0;
}
+static __u64 avail_size1(struct supertype *st, __u64 devsize,
+ unsigned long long data_offset)
+{
+ return _avail_size1(st, devsize, data_offset, 0);
+}
static int
add_internal_bitmap1(struct supertype *st,
unsigned long long max_bits;
unsigned long long min_chunk;
long offset;
+ long bbl_offset, bbl_size;
unsigned long long chunk = *chunkp;
int room = 0;
int creating = 0;
bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
int uuid[4];
+
if (__le64_to_cpu(sb->data_size) == 0)
/* Must be creating the array, else data_size would be non-zero */
creating = 1;
*/
offset = 0;
room = choose_bm_space(__le64_to_cpu(sb->size));
+ bbl_size = 8;
} else {
room = __le64_to_cpu(sb->super_offset)
- __le64_to_cpu(sb->data_offset)
- __le64_to_cpu(sb->data_size);
+ bbl_size = __le16_to_cpu(sb->bblog_size);
+ if (bbl_size < 8)
+ bbl_size = 8;
+ bbl_offset = (__s32)__le32_to_cpu(sb->bblog_offset);
+ if (bbl_size < -bbl_offset)
+ bbl_size = -bbl_offset;
if (!may_change || (room < 3*2 &&
__le32_to_cpu(sb->max_dev) <= 384)) {
room = 3*2;
offset = 1*2;
+ bbl_size = 0;
} else {
offset = 0; /* means movable offset */
}
if (creating) {
offset = 4*2;
room = choose_bm_space(__le64_to_cpu(sb->size));
+ bbl_size = 8;
} else {
room = __le64_to_cpu(sb->data_offset)
- __le64_to_cpu(sb->super_offset);
+ bbl_size = __le16_to_cpu(sb->bblog_size);
+ if (bbl_size)
+ room = __le32_to_cpu(sb->bblog_offset) + bbl_size;
+ else
+ bbl_size = 8;
+
if (!may_change) {
room -= 2; /* Leave 1K for superblock */
offset = 2;
+ bbl_size = 0;
} else {
room -= 4*2; /* leave 4K for superblock */
offset = 4*2;
return 0;
}
+ room -= bbl_size;
if (chunk == UnSet && room > 128*2)
/* Limit to 128K of bitmap when chunk size not requested */
room = 128*2;
bits = (size*512) / chunk + 1;
room = ((bits+7)/8 + sizeof(bitmap_super_t) +4095)/4096;
room *= 8; /* convert 4K blocks to sectors */
- offset = -room;
+ offset = -room - bbl_size;
}
sb->bitmap_offset = (int32_t)__cpu_to_le32(offset);
static int validate_geometry1(struct supertype *st, int level,
int layout, int raiddisks,
int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *subdev, unsigned long long *freesize,
int verbose)
{
if (level == LEVEL_CONTAINER) {
if (verbose)
- fprintf(stderr, Name ": 1.x metadata does not support containers\n");
+ pr_err("1.x metadata does not support containers\n");
return 0;
}
if (chunk && *chunk == UnSet)
fd = open(subdev, O_RDONLY|O_EXCL, 0);
if (fd < 0) {
if (verbose)
- fprintf(stderr, Name ": super1.x cannot open %s: %s\n",
+ pr_err("super1.x cannot open %s: %s\n",
subdev, strerror(errno));
return 0;
}
}
close(fd);
- *freesize = avail_size1(st, ldsize >> 9);
+ *freesize = _avail_size1(st, ldsize >> 9, data_offset, *chunk);
return 1;
}
#endif /* MDASSEMBLE */