printf(" Chunk Size : %u KiB\n",
__le16_to_cpu(map->blocks_per_strip) / 2);
printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
- printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle\n");
+ printf(" Migrate State : ");
if (dev->vol.migr_state) {
if (migr_type(dev) == MIGR_INIT)
- printf(": initializing\n");
+ printf("initialize\n");
else if (migr_type(dev) == MIGR_REBUILD)
- printf(": rebuilding\n");
+ printf("rebuild\n");
else if (migr_type(dev) == MIGR_VERIFY)
- printf(": check\n");
+ printf("check\n");
else if (migr_type(dev) == MIGR_GEN_MIGR)
- printf(": general migration\n");
+ printf("general migration\n");
else if (migr_type(dev) == MIGR_STATE_CHANGE)
- printf(": state change\n");
+ printf("state change\n");
else if (migr_type(dev) == MIGR_REPAIR)
- printf(": repair\n");
+ printf("repair\n");
else
- printf(": <unknown:%d>\n", migr_type(dev));
- }
+ printf("<unknown:%d>\n", migr_type(dev));
+ } else
+ printf("idle\n");
printf(" Map State : %s", map_state_str[map->map_state]);
if (dev->vol.migr_state) {
struct imsm_map *map = get_imsm_map(dev, 1);
/* FIXME add curr_migr_unit to resync_start conversion */
info->resync_start = 0;
else
- info->resync_start = ~0ULL;
+ info->resync_start = MaxSector;
strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
info->name[MAX_RAID_SERIAL_LEN] = 0;
first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
first->anchor->orig_family_num = sec->anchor->orig_family_num;
first->anchor->family_num = sec->anchor->family_num;
+ memcpy(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH);
for (i = 0; i < sec->anchor->num_raid_devs; i++)
imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
}
struct intel_disk *idisk;
idisk = disk_list_get(dl->serial, disk_list);
- if (is_spare(&idisk->disk) &&
+ if (idisk && is_spare(&idisk->disk) &&
!is_failed(&idisk->disk) && !is_configured(&idisk->disk))
dl->index = -1;
else {
unsigned long reserve;
if (!e)
- return ~0ULL; /* error */
+ return 0;
/* coalesce and sort all extents. also, check to see if we need to
* reserve space between member arrays
} while (e[i-1].size);
free(e);
+ if (maxsize == 0)
+ return 0;
+
+ /* FIXME assumes volume at offset 0 is the first volume in a
+ * container
+ */
if (start_extent > 0)
reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
else
reserve = 0;
if (maxsize < reserve)
- return ~0ULL;
+ return 0;
super->create_offset = ~((__u32) 0);
if (start + reserve > super->create_offset)
- return ~0ULL; /* start overflows create_offset */
+ return 0; /* start overflows create_offset */
super->create_offset = start + reserve;
return maxsize - reserve;
}
#define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg))
+static int
+validate_geometry_imsm_orom(struct intel_super *super, int level, int layout,
+ int raiddisks, int chunk, int verbose)
+{
+ if (!is_raid_level_supported(super->orom, level, raiddisks)) {
+ pr_vrb(": platform does not support raid%d with %d disk%s\n",
+ level, raiddisks, raiddisks > 1 ? "s" : "");
+ return 0;
+ }
+ if (super->orom && level != 1 &&
+ !imsm_orom_has_chunk(super->orom, chunk)) {
+ pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
+ return 0;
+ }
+ if (layout != imsm_level_to_layout(level)) {
+ if (level == 5)
+ pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
+ else if (level == 10)
+ pr_vrb(": imsm raid 10 only supports the n2 layout\n");
+ else
+ pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
+ layout, level);
+ return 0;
+ }
+
+ return 1;
+}
+
/* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
* FIX ME add ahci details
*/
if (!super)
return 0;
- if (!is_raid_level_supported(super->orom, level, raiddisks)) {
- pr_vrb(": platform does not support raid%d with %d disk%s\n",
- level, raiddisks, raiddisks > 1 ? "s" : "");
+ if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, verbose))
return 0;
- }
- if (super->orom && level != 1 &&
- !imsm_orom_has_chunk(super->orom, chunk)) {
- pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
- return 0;
- }
- if (layout != imsm_level_to_layout(level)) {
- if (level == 5)
- pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
- else if (level == 10)
- pr_vrb(": imsm raid 10 only supports the n2 layout\n");
- else
- pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
- layout, level);
- return 0;
- }
if (!dev) {
/* General test: make sure there is space for
* offset
*/
unsigned long long minsize = size;
- unsigned long long start_offset = ~0ULL;
+ unsigned long long start_offset = MaxSector;
int dcnt = 0;
if (minsize == 0)
minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
esize = e[i].start - pos;
if (esize >= minsize)
found = 1;
- if (found && start_offset == ~0ULL) {
+ if (found && start_offset == MaxSector) {
start_offset = pos;
break;
} else if (found && pos != start_offset) {
i += dl->extent_cnt;
maxsize = merge_extents(super, i);
- if (maxsize < size) {
+ if (maxsize < size || maxsize == 0) {
if (verbose)
fprintf(stderr, Name ": not enough space after merge (%llu < %llu)\n",
maxsize, size);
return 0;
- } else if (maxsize == ~0ULL) {
- if (verbose)
- fprintf(stderr, Name ": failed to merge %d extents\n", i);
- return 0;
}
*freesize = maxsize;
if (cnt < raiddisks ||
(super->orom && used && used != raiddisks) ||
- maxsize < minsize) {
+ maxsize < minsize ||
+ maxsize == 0) {
fprintf(stderr, Name ": not enough devices with space to create array.\n");
return 0; /* No enough free spaces large enough */
}
* created. add_to_super and getinfo_super
* detect when autolayout is in progress.
*/
+ if (!validate_geometry_imsm_orom(st->sb, level, layout,
+ raiddisks, chunk,
+ verbose))
+ return 0;
return reserve_space(st, raiddisks, size, chunk, freesize);
}
return 1;
case 1:
case 10:
case 5:
- break;
+ return 0;
default:
+ if (verbose)
+ fprintf(stderr, Name
+ ": IMSM only supports levels 0,1,5,10\n");
return 1;
}
* FIXME handle dirty degraded
*/
if (skip && !dev->vol.dirty)
- this->resync_start = ~0ULL;
+ this->resync_start = MaxSector;
if (skip)
continue;
info_d->disk.major = d->major;
info_d->disk.minor = d->minor;
info_d->disk.raid_disk = slot;
+ info_d->recovery_start = MaxSector;
this->array.working_disks++;
}
if (consistent == 2 &&
- (!is_resync_complete(a) ||
+ (!is_resync_complete(&a->info) ||
map_state != IMSM_T_STATE_NORMAL ||
dev->vol.migr_state))
consistent = 0;
- if (is_resync_complete(a)) {
+ if (is_resync_complete(&a->info)) {
/* complete intialization / resync,
* recovery and interrupted recovery is completed in
* ->set_disk
}
} else if (!is_resyncing(dev) && !failed) {
/* mark the start of the init process if nothing is failed */
- dprintf("imsm: mark resync start (%llu)\n", a->resync_start);
+ dprintf("imsm: mark resync start\n");
if (map->map_state == IMSM_T_STATE_UNINITIALIZED)
migrate(dev, IMSM_T_STATE_NORMAL, MIGR_INIT);
else
/* mark dirty / clean */
if (dev->vol.dirty != !consistent) {
- dprintf("imsm: mark '%s' (%llu)\n",
- consistent ? "clean" : "dirty", a->resync_start);
+ dprintf("imsm: mark '%s'\n", consistent ? "clean" : "dirty");
if (consistent)
dev->vol.dirty = 0;
else
di->disk.major = dl->major;
di->disk.minor = dl->minor;
di->disk.state = 0;
+ di->recovery_start = 0;
di->data_offset = __le32_to_cpu(map->pba_of_lba0);
di->component_size = a->info.component_size;
di->container_member = inst;