__u64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
__u32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
- __u32 layout; /* only for raid5 currently */
+ __u32 layout; /* used for raid5, raid6, raid10, and raid0 */
__u64 size; /* used size of component devices, in 512byte sectors */
__u32 chunksize; /* in 512byte sectors */
/* bad block log. If there are any bad blocks the feature flag is set.
* if offset and size are non-zero, that space is reserved and available.
*/
- __u8 bblog_shift; /* shift from sectors to block size for badblocklist */
- __u16 bblog_size; /* number of sectors reserved for badblocklist */
+ __u8 bblog_shift; /* shift from sectors to block size for badblock list */
+ __u16 bblog_size; /* number of sectors reserved for badblock list */
__u32 bblog_offset; /* sector offset from superblock to bblog, signed */
/* array state information - 64 bytes */
- __u64 utime; /* 40 bits second, 24 btes microseconds */
+ __u64 utime; /* 40 bits second, 24 bits microseconds */
__u64 events; /* incremented when superblock updated */
__u64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
__u32 sb_csum; /* checksum upto dev_roles[max_dev] */
__u64 device_size;
};
+#define MULTIPLE_PPL_AREA_SIZE_SUPER1 (1024 * 1024) /* Size of the whole
+ * mutliple PPL area
+ */
/* feature_map bits */
#define MD_FEATURE_BITMAP_OFFSET 1
#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
#define MD_FEATURE_BITMAP_VERSIONED 256 /* bitmap version number checked properly */
#define MD_FEATURE_JOURNAL 512 /* support write journal */
#define MD_FEATURE_PPL 1024 /* support PPL */
+#define MD_FEATURE_MUTLIPLE_PPLS 2048 /* support for multiple PPLs */
+#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful in RAID0 */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
|MD_FEATURE_BITMAP_VERSIONED \
|MD_FEATURE_JOURNAL \
|MD_FEATURE_PPL \
+ |MD_FEATURE_MULTIPLE_PPLS \
+ |MD_FEATURE_RAID0_LAYOUT \
)
static int role_from_sb(struct mdp_superblock_1 *sb)
return len;
}
+static inline unsigned int md_feature_any_ppl_on(__u32 feature_map)
+{
+ return ((__cpu_to_le32(feature_map) &
+ (MD_FEATURE_PPL | MD_FEATURE_MUTLIPLE_PPLS)));
+}
+
static inline unsigned int choose_ppl_space(int chunk)
{
return (PPL_HEADER_SIZE >> 9) + (chunk > 128*2 ? chunk : 128*2);
int layout;
unsigned long long sb_offset;
struct mdinfo info;
+ int inconsistent = 0;
printf(" Magic : %08x\n", __le32_to_cpu(sb->magic));
printf(" Version : 1");
printf(" Raid Level : %s\n", c?c:"-unknown-");
printf(" Raid Devices : %d\n", __le32_to_cpu(sb->raid_disks));
printf("\n");
- printf(" Avail Dev Size : %llu%s\n",
+ printf(" Avail Dev Size : %llu sectors%s\n",
(unsigned long long)__le64_to_cpu(sb->data_size),
human_size(__le64_to_cpu(sb->data_size)<<9));
if (__le32_to_cpu(sb->level) > 0) {
if (ddsks) {
long long asize = __le64_to_cpu(sb->size);
asize = (asize << 9) * ddsks / ddsks_denom;
- printf(" Array Size : %llu%s\n",
+ printf(" Array Size : %llu KiB%s\n",
asize >> 10, human_size(asize));
}
if (sb->size != sb->data_size)
- printf(" Used Dev Size : %llu%s\n",
+ printf(" Used Dev Size : %llu sectors%s\n",
(unsigned long long)__le64_to_cpu(sb->size),
human_size(__le64_to_cpu(sb->size)<<9));
}
if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
printf("Internal Bitmap : %ld sectors from superblock\n",
(long)(int32_t)__le32_to_cpu(sb->bitmap_offset));
- } else if (sb->feature_map & __cpu_to_le32(MD_FEATURE_PPL)) {
+ } else if (md_feature_any_ppl_on(sb->feature_map)) {
printf(" PPL : %u sectors at offset %d sectors from superblock\n",
__le16_to_cpu(sb->ppl.size),
__le16_to_cpu(sb->ppl.offset));
printf(" Events : %llu\n",
(unsigned long long)__le64_to_cpu(sb->events));
printf("\n");
+ if (__le32_to_cpu(sb->level) == 0 &&
+ (sb->feature_map & __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT))) {
+ c = map_num(r0layout, __le32_to_cpu(sb->layout));
+ printf(" Layout : %s\n", c?c:"-unknown-");
+ }
if (__le32_to_cpu(sb->level) == 5) {
c = map_num(r5layout, __le32_to_cpu(sb->layout));
printf(" Layout : %s\n", c?c:"-unknown-");
if (role == d)
cnt++;
}
- if (cnt == 2)
+ if (cnt == 2 && __le32_to_cpu(sb->level) > 0)
printf("R");
else if (cnt == 1)
printf("A");
else if (cnt == 0)
printf(".");
- else
+ else {
printf("?");
+ inconsistent = 1;
+ }
}
#if 0
/* This is confusing too */
#endif
printf(" ('A' == active, '.' == missing, 'R' == replacing)");
printf("\n");
+ for (d = 0; d < __le32_to_cpu(sb->max_dev); d++) {
+ unsigned int r = __le16_to_cpu(sb->dev_roles[d]);
+ if (r <= MD_DISK_ROLE_MAX &&
+ r > __le32_to_cpu(sb->raid_disks) + delta_extra)
+ inconsistent = 1;
+ }
+ if (inconsistent) {
+ printf("WARNING Array state is inconsistent - each number should appear only once\n");
+ for (d = 0; d < __le32_to_cpu(sb->max_dev); d++)
+ if (__le16_to_cpu(sb->dev_roles[d]) >= MD_DISK_ROLE_FAULTY)
+ printf(" %d:-", d);
+ else
+ printf(" %d:%d", d, __le16_to_cpu(sb->dev_roles[d]));
+ printf("\n");
+ }
}
static void brief_examine_super1(struct supertype *st, int verbose)
return 1;
}
-static void detail_super1(struct supertype *st, char *homehost)
+static void detail_super1(struct supertype *st, char *homehost, char *subarray)
{
struct mdp_superblock_1 *sb = st->sb;
bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
(unsigned long long)__le64_to_cpu(sb->events));
}
-static void brief_detail_super1(struct supertype *st)
+static void brief_detail_super1(struct supertype *st, char *subarray)
{
struct mdp_superblock_1 *sb = st->sb;
int i;
info->array.chunk_size = __le32_to_cpu(sb->chunksize)*512;
info->array.state =
(__le64_to_cpu(sb->resync_offset) == MaxSector) ? 1 : 0;
- if (__le32_to_cpu(bsb->nodes) > 1)
- info->array.state |= (1 << MD_SB_CLUSTERED);
super_offset = __le64_to_cpu(sb->super_offset);
info->data_offset = __le64_to_cpu(sb->data_offset);
info->component_size = __le64_to_cpu(sb->size);
if (sb->feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET)) {
info->bitmap_offset = (int32_t)__le32_to_cpu(sb->bitmap_offset);
- } else if (sb->feature_map & __le32_to_cpu(MD_FEATURE_PPL)) {
+ if (__le32_to_cpu(bsb->nodes) > 1)
+ info->array.state |= (1 << MD_SB_CLUSTERED);
+ } else if (md_feature_any_ppl_on(sb->feature_map)) {
info->ppl_offset = __le16_to_cpu(sb->ppl.offset);
info->ppl_size = __le16_to_cpu(sb->ppl.size);
info->ppl_sector = super_offset + info->ppl_offset;
if (sb->feature_map & __le32_to_cpu(MD_FEATURE_JOURNAL)) {
info->journal_device_required = 1;
info->consistency_policy = CONSISTENCY_POLICY_JOURNAL;
- } else if (sb->feature_map & __le32_to_cpu(MD_FEATURE_PPL)) {
+ } else if (md_feature_any_ppl_on(sb->feature_map)) {
info->consistency_policy = CONSISTENCY_POLICY_PPL;
} else if (sb->feature_map & __le32_to_cpu(MD_FEATURE_BITMAP_OFFSET)) {
info->consistency_policy = CONSISTENCY_POLICY_BITMAP;
* ignored.
*/
int rv = 0;
- int lockid;
struct mdp_superblock_1 *sb = st->sb;
bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready()) {
- rv = cluster_get_dlmlock(&lockid);
- if (rv) {
- pr_err("Cannot get dlmlock in %s return %d\n",
- __func__, rv);
- cluster_release_dlmlock(lockid);
- return rv;
- }
- }
-
if (strcmp(update, "homehost") == 0 &&
homehost) {
/* Note that 'homehost' is special as it is really
rv = 1;
}
} else if (strcmp(update, "linear-grow-new") == 0) {
- unsigned int i;
+ int i;
int fd;
- unsigned int max = __le32_to_cpu(sb->max_dev);
+ int max = __le32_to_cpu(sb->max_dev);
+
+ if (max > MAX_DEVS)
+ return -2;
for (i = 0; i < max; i++)
if (__le16_to_cpu(sb->dev_roles[i]) >=
MD_DISK_ROLE_FAULTY)
break;
+ if (i != info->disk.number)
+ return -2;
sb->dev_number = __cpu_to_le32(i);
- info->disk.number = i;
- if (i >= max) {
+
+ if (i == max)
sb->max_dev = __cpu_to_le32(max+1);
- }
+ if (i > max)
+ return -2;
random_uuid(sb->device_uuid);
}
} else if (strcmp(update, "linear-grow-update") == 0) {
int max = __le32_to_cpu(sb->max_dev);
- sb->raid_disks = __cpu_to_le32(info->array.raid_disks);
- if (info->array.raid_disks > max) {
+ int i = info->disk.number;
+ if (max > MAX_DEVS || i > MAX_DEVS)
+ return -2;
+ if (i > max)
+ return -2;
+ if (i == max)
sb->max_dev = __cpu_to_le32(max+1);
- }
+ sb->raid_disks = __cpu_to_le32(info->array.raid_disks);
sb->dev_roles[info->disk.number] =
__cpu_to_le16(info->disk.raid_disk);
} else if (strcmp(update, "resync") == 0) {
memcpy(bms->uuid, sb->set_uuid, 16);
} else if (strcmp(update, "no-bitmap") == 0) {
sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
+ if (bms->version == BITMAP_MAJOR_CLUSTERED && !IsBitmapDirty(devname))
+ sb->resync_offset = MaxSector;
} else if (strcmp(update, "bbl") == 0) {
/* only possible if there is room after the bitmap, or if
* there is no bitmap
if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
bitmap_offset = (long)__le32_to_cpu(sb->bitmap_offset);
bm_sectors = calc_bitmap_size(bms, 4096) >> 9;
- } else if (sb->feature_map & __cpu_to_le32(MD_FEATURE_PPL)) {
+ } else if (md_feature_any_ppl_on(sb->feature_map)) {
bitmap_offset = (long)__le16_to_cpu(sb->ppl.offset);
bm_sectors = (long)__le16_to_cpu(sb->ppl.size);
}
unsigned long long data_size = __le64_to_cpu(sb->data_size);
long bb_offset = __le32_to_cpu(sb->bblog_offset);
int space;
- int optimal_space;
int offset;
if (sb->feature_map & __cpu_to_le32(MD_FEATURE_BITMAP_OFFSET)) {
return -2;
}
- optimal_space = choose_ppl_space(__le32_to_cpu(sb->chunksize));
-
- if (space > optimal_space)
- space = optimal_space;
- if (space > UINT16_MAX)
- space = UINT16_MAX;
+ if (space >= (MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9)) {
+ space = (MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9);
+ } else {
+ int optimal_space = choose_ppl_space(
+ __le32_to_cpu(sb->chunksize));
+ if (space > optimal_space)
+ space = optimal_space;
+ if (space > UINT16_MAX)
+ space = UINT16_MAX;
+ }
sb->ppl.offset = __cpu_to_le16(offset);
sb->ppl.size = __cpu_to_le16(space);
sb->feature_map |= __cpu_to_le32(MD_FEATURE_PPL);
} else if (strcmp(update, "no-ppl") == 0) {
- sb->feature_map &= ~ __cpu_to_le32(MD_FEATURE_PPL);
+ sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_PPL |
+ MD_FEATURE_MUTLIPLE_PPLS);
} else if (strcmp(update, "name") == 0) {
if (info->name[0] == 0)
sprintf(info->name, "%d", info->array.md_minor);
strcpy(sb->set_name, homehost);
strcat(sb->set_name, ":");
strcat(sb->set_name, info->name);
- } else
- strncpy(sb->set_name, info->name, sizeof(sb->set_name));
+ } else {
+ int namelen;
+
+ namelen = min((int)strlen(info->name),
+ (int)sizeof(sb->set_name) - 1);
+ memcpy(sb->set_name, info->name, namelen);
+ memset(&sb->set_name[namelen], '\0',
+ sizeof(sb->set_name) - namelen);
+ }
} else if (strcmp(update, "devicesize") == 0 &&
__le64_to_cpu(sb->super_offset) <
__le64_to_cpu(sb->data_offset)) {
sb->devflags |= FailFast1;
else if (strcmp(update, "nofailfast") == 0)
sb->devflags &= ~FailFast1;
- else
+ else if (strcmp(update, "layout-original") == 0 ||
+ strcmp(update, "layout-alternate") == 0 ||
+ strcmp(update, "layout-unspecified") == 0) {
+ if (__le32_to_cpu(sb->level) != 0) {
+ pr_err("%s: %s only supported for RAID0\n",
+ devname?:"", update);
+ rv = -1;
+ } else if (strcmp(update, "layout-unspecified") == 0) {
+ sb->feature_map &= ~__cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
+ sb->layout = 0;
+ } else {
+ sb->feature_map |= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
+ sb->layout = __cpu_to_le32(update[7] == 'o' ? 1 : 2);
+ }
+ } else
rv = -1;
sb->sb_csum = calc_sb_1_csum(sb);
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready())
- cluster_release_dlmlock(lockid);
return rv;
}
strcpy(sb->set_name, homehost);
strcat(sb->set_name, ":");
strcat(sb->set_name, name);
- } else
- strncpy(sb->set_name, name, sizeof(sb->set_name));
+ } else {
+ int namelen;
+
+ namelen = min((int)strlen(name),
+ (int)sizeof(sb->set_name) - 1);
+ memcpy(sb->set_name, name, namelen);
+ memset(&sb->set_name[namelen], '\0',
+ sizeof(sb->set_name) - namelen);
+ }
sb->ctime = __cpu_to_le64((unsigned long long)time(0));
sb->level = __cpu_to_le32(info->level);
int fd;
char *devname;
long long data_offset;
+ unsigned long long dev_size;
mdu_disk_info_t disk;
struct devinfo *next;
};
struct mdp_superblock_1 *sb = st->sb;
__u16 *rp = sb->dev_roles + dk->number;
struct devinfo *di, **dip;
- bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
- int rv, lockid;
int dk_state;
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready()) {
- rv = cluster_get_dlmlock(&lockid);
- if (rv) {
- pr_err("Cannot get dlmlock in %s return %d\n",
- __func__, rv);
- cluster_release_dlmlock(lockid);
- return rv;
- }
- }
-
dk_state = dk->state & ~(1<<MD_DISK_FAILFAST);
if ((dk_state & (1<<MD_DISK_ACTIVE)) &&
(dk_state & (1<<MD_DISK_SYNC)))/* active, sync */
di->devname = devname;
di->disk = *dk;
di->data_offset = data_offset;
+ get_dev_size(fd, NULL, &di->dev_size);
di->next = NULL;
*dip = di;
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready())
- cluster_release_dlmlock(lockid);
-
return 0;
}
struct align_fd afd;
int sbsize;
unsigned long long dsize;
- bitmap_super_t *bms = (bitmap_super_t*)(((char*)sb) + MAX_SB_SIZE);
- int rv, lockid;
-
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready()) {
- rv = cluster_get_dlmlock(&lockid);
- if (rv) {
- pr_err("Cannot get dlmlock in %s return %d\n",
- __func__, rv);
- cluster_release_dlmlock(lockid);
- return rv;
- }
- }
if (!get_dev_size(fd, NULL, &dsize))
return 1;
}
}
fsync(fd);
- if (bms->version == BITMAP_MAJOR_CLUSTERED && dlm_funs_ready())
- cluster_release_dlmlock(lockid);
return 0;
}
struct ppl_header *ppl_hdr;
int ret;
+ /* first clear entire ppl space */
+ ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
+ if (ret)
+ return ret;
+
ret = posix_memalign(&buf, 4096, PPL_HEADER_SIZE);
if (ret) {
pr_err("Failed to allocate PPL header buffer\n");
unsigned long long sb_offset;
unsigned long long data_offset;
long bm_offset;
+ int raid0_need_layout = 0;
for (di = st->info; di; di = di->next) {
if (di->disk.state & (1 << MD_DISK_JOURNAL))
sb->feature_map |= __cpu_to_le32(MD_FEATURE_JOURNAL);
+ if (sb->level == 0 && sb->layout != 0) {
+ struct devinfo *di2 = st->info;
+ unsigned long long s1, s2;
+ s1 = di->dev_size;
+ if (di->data_offset != INVALID_SECTORS)
+ s1 -= di->data_offset;
+ s1 /= __le32_to_cpu(sb->chunksize);
+ s2 = di2->dev_size;
+ if (di2->data_offset != INVALID_SECTORS)
+ s2 -= di2->data_offset;
+ s2 /= __le32_to_cpu(sb->chunksize);
+ if (s1 != s2)
+ raid0_need_layout = 1;
+ }
}
for (di = st->info; di; di = di->next) {
(((char *)sb) + MAX_SB_SIZE);
bm_space = calc_bitmap_size(bms, 4096) >> 9;
bm_offset = (long)__le32_to_cpu(sb->bitmap_offset);
- } else if (sb->feature_map & __cpu_to_le32(MD_FEATURE_PPL)) {
- bm_space =
- choose_ppl_space(__le32_to_cpu(sb->chunksize));
- if (bm_space > UINT16_MAX)
- bm_space = UINT16_MAX;
- if (st->minor_version == 0) {
+ } else if (md_feature_any_ppl_on(sb->feature_map)) {
+ bm_space = MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9;
+ if (st->minor_version == 0)
bm_offset = -bm_space - 8;
- if (bm_offset < INT16_MIN) {
- bm_offset = INT16_MIN;
- bm_space = -bm_offset - 8;
- }
- } else {
+ else
bm_offset = 8;
- }
sb->ppl.offset = __cpu_to_le16(bm_offset);
sb->ppl.size = __cpu_to_le16(bm_space);
} else {
sb->bblog_offset = 0;
}
+ /* RAID0 needs a layout if devices aren't all the same size */
+ if (raid0_need_layout)
+ sb->feature_map |= __cpu_to_le32(MD_FEATURE_RAID0_LAYOUT);
+
sb->sb_csum = calc_sb_1_csum(sb);
rv = store_super1(st, di->fd);
MD_FEATURE_BITMAP_OFFSET)) {
rv = st->ss->write_bitmap(st, di->fd, NodeNumUpdate);
} else if (rv == 0 &&
- (__le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL)) {
+ md_feature_any_ppl_on(sb->feature_map)) {
struct mdinfo info;
st->ss->getinfo_super(st, &info, NULL);
return rv;
}
-static int compare_super1(struct supertype *st, struct supertype *tst)
+static int compare_super1(struct supertype *st, struct supertype *tst,
+ int verbose)
{
/*
* return:
struct bitmap_super_s *bsb;
bsb = (struct bitmap_super_s *)(((char*)super)+MAX_SB_SIZE);
bmspace = calc_bitmap_size(bsb, 4096) >> 9;
- } else if (__le32_to_cpu(super->feature_map) & MD_FEATURE_PPL) {
+ } else if (md_feature_any_ppl_on(super->feature_map)) {
bmspace = __le16_to_cpu(super->ppl.size);
}
/*
* If not may_change, then this is a 'Grow' without sysfs support for
* bitmaps, and the bitmap must fit after the superblock at 1K offset.
- * If may_change, then this is create or a Grow with sysfs syupport,
+ * If may_change, then this is create or a Grow with sysfs support,
* and we can put the bitmap wherever we like.
*
* size is in sectors, chunk is in bytes !!!
static int locate_bitmap1(struct supertype *st, int fd, int node_num)
{
- unsigned long long offset;
+ unsigned long long offset, bm_sectors_per_node;
struct mdp_superblock_1 *sb;
+ bitmap_super_t *bms;
int mustfree = 0;
int ret;
ret = 0;
else
ret = -1;
- offset = __le64_to_cpu(sb->super_offset);
- offset += (int32_t) __le32_to_cpu(sb->bitmap_offset) * (node_num + 1);
+
+ offset = __le64_to_cpu(sb->super_offset) + __le32_to_cpu(sb->bitmap_offset);
+ if (node_num) {
+ bms = (bitmap_super_t*)(((char*)sb)+MAX_SB_SIZE);
+ bm_sectors_per_node = calc_bitmap_size(bms, 4096) >> 9;
+ offset += bm_sectors_per_node * node_num;
+ }
if (mustfree)
free(sb);
lseek64(fd, offset<<9, 0);
init_afd(&afd, fd);
- locate_bitmap1(st, fd, 0);
+ if (locate_bitmap1(st, fd, 0) < 0) {
+ pr_err("Error: Invalid bitmap\n");
+ return -EINVAL;
+ }
if (posix_memalign(&buf, 4096, 4096))
return -ENOMEM;
unsigned long long ldsize, devsize;
int bmspace;
unsigned long long headroom;
+ unsigned long long overhead;
int fd;
if (level == LEVEL_CONTAINER) {
close(fd);
devsize = ldsize >> 9;
- if (devsize < 24) {
- *freesize = 0;
- return 0;
- }
/* creating: allow suitable space for bitmap or PPL */
- bmspace = consistency_policy == CONSISTENCY_POLICY_PPL ?
- choose_ppl_space((*chunk)*2) : choose_bm_space(devsize);
+ if (consistency_policy == CONSISTENCY_POLICY_PPL)
+ bmspace = MULTIPLE_PPL_AREA_SIZE_SUPER1 >> 9;
+ else
+ bmspace = choose_bm_space(devsize);
if (data_offset == INVALID_SECTORS)
data_offset = st->data_offset;
headroom >>= 1;
data_offset = 12*2 + bmspace + headroom;
#define ONE_MEG (2*1024)
- if (data_offset > ONE_MEG)
- data_offset = (data_offset / ONE_MEG) * ONE_MEG;
+ data_offset = ROUND_UP(data_offset, ONE_MEG);
break;
}
if (st->data_offset == INVALID_SECTORS)
case 0: /* metadata at end. Round down and subtract space to reserve */
devsize = (devsize & ~(4ULL*2-1));
/* space for metadata, bblog, bitmap/ppl */
- devsize -= 8*2 + 8 + bmspace;
+ overhead = 8*2 + 8 + bmspace;
+ if (devsize < overhead) /* detect underflow */
+ goto dev_too_small_err;
+ devsize -= overhead;
break;
case 1:
case 2:
+ if (devsize < data_offset) /* detect underflow */
+ goto dev_too_small_err;
devsize -= data_offset;
break;
}
*freesize = devsize;
return 1;
+
+/* Error condition, device cannot even hold the overhead. */
+dev_too_small_err:
+ fprintf(stderr, "device %s is too small (%lluK) for "
+ "required metadata!\n", subdev, devsize>>1);
+ *freesize = 0;
+ return 0;
}
void *super1_make_v0(struct supertype *st, struct mdinfo *info, mdp_super_t *sb0)