struct bbm_log *bbm_log;
const char *hba; /* device path of the raid controller for this metadata */
const struct imsm_orom *orom; /* platform firmware support */
+ struct intel_super *next; /* (temp) list for disambiguating family_num */
+};
+
+struct intel_disk {
+ struct imsm_disk disk;
+ #define IMSM_UNKNOWN_OWNER (-1)
+ int owner;
+ struct intel_disk *next;
};
struct extent {
return NULL;
st = malloc(sizeof(*st));
+ if (!st)
+ return NULL;
memset(st, 0, sizeof(*st));
st->ss = &super_imsm;
st->max_devs = IMSM_MAX_DEVICES;
return rv;
}
-#ifndef MDASSEMBLE
static int is_spare(struct imsm_disk *disk)
{
return (disk->status & SPARE_DISK) == SPARE_DISK;
return (disk->status & FAILED_DISK) == FAILED_DISK;
}
+#ifndef MDASSEMBLE
+static __u64 blocks_per_migr_unit(struct imsm_dev *dev);
+
static void print_imsm_dev(struct imsm_dev *dev, char *uuid, int disk_idx)
{
__u64 sz;
printf(" Chunk Size : %u KiB\n",
__le16_to_cpu(map->blocks_per_strip) / 2);
printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
- printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle\n");
+ printf(" Migrate State : ");
if (dev->vol.migr_state) {
if (migr_type(dev) == MIGR_INIT)
- printf(": initializing\n");
+ printf("initialize\n");
else if (migr_type(dev) == MIGR_REBUILD)
- printf(": rebuilding\n");
+ printf("rebuild\n");
else if (migr_type(dev) == MIGR_VERIFY)
- printf(": check\n");
+ printf("check\n");
else if (migr_type(dev) == MIGR_GEN_MIGR)
- printf(": general migration\n");
+ printf("general migration\n");
else if (migr_type(dev) == MIGR_STATE_CHANGE)
- printf(": state change\n");
+ printf("state change\n");
else if (migr_type(dev) == MIGR_REPAIR)
- printf(": repair\n");
+ printf("repair\n");
else
- printf(": <unknown:%d>\n", migr_type(dev));
- }
+ printf("<unknown:%d>\n", migr_type(dev));
+ } else
+ printf("idle\n");
printf(" Map State : %s", map_state_str[map->map_state]);
if (dev->vol.migr_state) {
struct imsm_map *map = get_imsm_map(dev, 1);
+
printf(" <-- %s", map_state_str[map->map_state]);
+ printf("\n Checkpoint : %u (%llu)",
+ __le32_to_cpu(dev->vol.curr_migr_unit),
+ blocks_per_migr_unit(dev));
}
printf("\n");
printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
char str[MAX_RAID_SERIAL_LEN + 1];
__u64 sz;
- if (index < 0)
+ if (index < 0 || !disk)
return;
printf("\n");
/* chop device path to 'host%d' and calculate the port number */
c = strchr(&path[hba_len], '/');
+ if (!c) {
+ if (verbose)
+ fprintf(stderr, Name ": %s - invalid path name\n", path + hba_len);
+ err = 2;
+ break;
+ }
*c = '\0';
if (sscanf(&path[hba_len], "host%d", &port) == 1)
port -= host_base;
}
#endif
+static __u32 migr_strip_blocks_resync(struct imsm_dev *dev)
+{
+ /* migr_strip_size when repairing or initializing parity */
+ struct imsm_map *map = get_imsm_map(dev, 0);
+ __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
+
+ switch (get_imsm_raid_level(map)) {
+ case 5:
+ case 10:
+ return chunk;
+ default:
+ return 128*1024 >> 9;
+ }
+}
+
+static __u32 migr_strip_blocks_rebuild(struct imsm_dev *dev)
+{
+ /* migr_strip_size when rebuilding a degraded disk, no idea why
+ * this is different than migr_strip_size_resync(), but it's good
+ * to be compatible
+ */
+ struct imsm_map *map = get_imsm_map(dev, 1);
+ __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
+
+ switch (get_imsm_raid_level(map)) {
+ case 1:
+ case 10:
+ if (map->num_members % map->num_domains == 0)
+ return 128*1024 >> 9;
+ else
+ return chunk;
+ case 5:
+ return max((__u32) 64*1024 >> 9, chunk);
+ default:
+ return 128*1024 >> 9;
+ }
+}
+
+static __u32 num_stripes_per_unit_resync(struct imsm_dev *dev)
+{
+ struct imsm_map *lo = get_imsm_map(dev, 0);
+ struct imsm_map *hi = get_imsm_map(dev, 1);
+ __u32 lo_chunk = __le32_to_cpu(lo->blocks_per_strip);
+ __u32 hi_chunk = __le32_to_cpu(hi->blocks_per_strip);
+
+ return max((__u32) 1, hi_chunk / lo_chunk);
+}
+
+static __u32 num_stripes_per_unit_rebuild(struct imsm_dev *dev)
+{
+ struct imsm_map *lo = get_imsm_map(dev, 0);
+ int level = get_imsm_raid_level(lo);
+
+ if (level == 1 || level == 10) {
+ struct imsm_map *hi = get_imsm_map(dev, 1);
+
+ return hi->num_domains;
+ } else
+ return num_stripes_per_unit_resync(dev);
+}
+
+static __u8 imsm_num_data_members(struct imsm_dev *dev)
+{
+ /* named 'imsm_' because raid0, raid1 and raid10
+ * counter-intuitively have the same number of data disks
+ */
+ struct imsm_map *map = get_imsm_map(dev, 0);
+
+ switch (get_imsm_raid_level(map)) {
+ case 0:
+ case 1:
+ case 10:
+ return map->num_members;
+ case 5:
+ return map->num_members - 1;
+ default:
+ dprintf("%s: unsupported raid level\n", __func__);
+ return 0;
+ }
+}
+
+static __u32 parity_segment_depth(struct imsm_dev *dev)
+{
+ struct imsm_map *map = get_imsm_map(dev, 0);
+ __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
+
+ switch(get_imsm_raid_level(map)) {
+ case 1:
+ case 10:
+ return chunk * map->num_domains;
+ case 5:
+ return chunk * map->num_members;
+ default:
+ return chunk;
+ }
+}
+
+static __u32 map_migr_block(struct imsm_dev *dev, __u32 block)
+{
+ struct imsm_map *map = get_imsm_map(dev, 1);
+ __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
+ __u32 strip = block / chunk;
+
+ switch (get_imsm_raid_level(map)) {
+ case 1:
+ case 10: {
+ __u32 vol_strip = (strip * map->num_domains) + 1;
+ __u32 vol_stripe = vol_strip / map->num_members;
+
+ return vol_stripe * chunk + block % chunk;
+ } case 5: {
+ __u32 stripe = strip / (map->num_members - 1);
+
+ return stripe * chunk + block % chunk;
+ }
+ default:
+ return 0;
+ }
+}
+
+static __u64 blocks_per_migr_unit(struct imsm_dev *dev)
+{
+ /* calculate the conversion factor between per member 'blocks'
+ * (md/{resync,rebuild}_start) and imsm migration units, return
+ * 0 for the 'not migrating' and 'unsupported migration' cases
+ */
+ if (!dev->vol.migr_state)
+ return 0;
+
+ switch (migr_type(dev)) {
+ case MIGR_VERIFY:
+ case MIGR_REPAIR:
+ case MIGR_INIT: {
+ struct imsm_map *map = get_imsm_map(dev, 0);
+ __u32 stripes_per_unit;
+ __u32 blocks_per_unit;
+ __u32 parity_depth;
+ __u32 migr_chunk;
+ __u32 block_map;
+ __u32 block_rel;
+ __u32 segment;
+ __u32 stripe;
+ __u8 disks;
+
+ /* yes, this is really the translation of migr_units to
+ * per-member blocks in the 'resync' case
+ */
+ stripes_per_unit = num_stripes_per_unit_resync(dev);
+ migr_chunk = migr_strip_blocks_resync(dev);
+ disks = imsm_num_data_members(dev);
+ blocks_per_unit = stripes_per_unit * migr_chunk * disks;
+ stripe = __le32_to_cpu(map->blocks_per_strip) * disks;
+ segment = blocks_per_unit / stripe;
+ block_rel = blocks_per_unit - segment * stripe;
+ parity_depth = parity_segment_depth(dev);
+ block_map = map_migr_block(dev, block_rel);
+ return block_map + parity_depth * segment;
+ }
+ case MIGR_REBUILD: {
+ __u32 stripes_per_unit;
+ __u32 migr_chunk;
+
+ stripes_per_unit = num_stripes_per_unit_rebuild(dev);
+ migr_chunk = migr_strip_blocks_rebuild(dev);
+ return migr_chunk * stripes_per_unit;
+ }
+ case MIGR_GEN_MIGR:
+ case MIGR_STATE_CHANGE:
+ default:
+ return 0;
+ }
+}
+
static int imsm_level_to_layout(int level)
{
switch (level) {
struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
struct imsm_map *map = get_imsm_map(dev, 0);
struct dl *dl;
+ char *devname;
for (dl = super->disks; dl; dl = dl->next)
if (dl->raiddisk == info->disk.raid_disk)
info->component_size = __le32_to_cpu(map->blocks_per_member);
memset(info->uuid, 0, sizeof(info->uuid));
- if (map->map_state == IMSM_T_STATE_UNINITIALIZED || dev->vol.dirty)
+ if (map->map_state == IMSM_T_STATE_UNINITIALIZED || dev->vol.dirty) {
info->resync_start = 0;
- else if (dev->vol.migr_state)
- /* FIXME add curr_migr_unit to resync_start conversion */
- info->resync_start = 0;
- else
- info->resync_start = ~0ULL;
+ } else if (dev->vol.migr_state) {
+ switch (migr_type(dev)) {
+ case MIGR_REPAIR:
+ case MIGR_INIT: {
+ __u64 blocks_per_unit = blocks_per_migr_unit(dev);
+ __u64 units = __le32_to_cpu(dev->vol.curr_migr_unit);
+
+ info->resync_start = blocks_per_unit * units;
+ break;
+ }
+ case MIGR_VERIFY:
+ /* we could emulate the checkpointing of
+ * 'sync_action=check' migrations, but for now
+ * we just immediately complete them
+ */
+ case MIGR_REBUILD:
+ /* this is handled by container_content_imsm() */
+ case MIGR_GEN_MIGR:
+ case MIGR_STATE_CHANGE:
+ /* FIXME handle other migrations */
+ default:
+ /* we are not dirty, so... */
+ info->resync_start = MaxSector;
+ }
+ } else
+ info->resync_start = MaxSector;
strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
info->name[MAX_RAID_SERIAL_LEN] = 0;
info->array.major_version = -1;
info->array.minor_version = -2;
- sprintf(info->text_version, "/%s/%d",
- devnum2devname(st->container_dev),
- info->container_member);
+ devname = devnum2devname(st->container_dev);
+ *info->text_version = '\0';
+ if (devname)
+ sprintf(info->text_version, "/%s/%d", devname, info->container_member);
+ free(devname);
info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
uuid_from_super_imsm(st, info->uuid);
}
char *update, char *devname, int verbose,
int uuid_set, char *homehost)
{
- /* FIXME */
-
/* For 'assemble' and 'force' we need to return non-zero if any
* change was made. For others, the return value is ignored.
* Update options are:
* linear only
* resync: mark as dirty so a resync will happen.
* name: update the name - preserving the homehost
+ * uuid: Change the uuid of the array to match watch is given
*
* Following are not relevant for this imsm:
* sparc2.2 : update from old dodgey metadata
* super-minor: change the preferred_minor number
* summaries: update redundant counters.
- * uuid: Change the uuid of the array to match watch is given
* homehost: update the recorded homehost
* _reshape_progress: record new reshape_progress position.
*/
- int rv = 0;
- //struct intel_super *super = st->sb;
- //struct imsm_super *mpb = super->mpb;
+ int rv = 1;
+ struct intel_super *super = st->sb;
+ struct imsm_super *mpb;
- if (strcmp(update, "grow") == 0) {
- }
- if (strcmp(update, "resync") == 0) {
- /* dev->vol.dirty = 1; */
- }
+ /* we can only update container info */
+ if (!super || super->current_vol >= 0 || !super->anchor)
+ return 1;
+
+ mpb = super->anchor;
- /* IMSM has no concept of UUID or homehost */
+ if (strcmp(update, "uuid") == 0 && uuid_set && !info->update_private)
+ fprintf(stderr,
+ Name ": '--uuid' not supported for imsm metadata\n");
+ else if (strcmp(update, "uuid") == 0 && uuid_set && info->update_private) {
+ mpb->orig_family_num = *((__u32 *) info->update_private);
+ rv = 0;
+ } else if (strcmp(update, "uuid") == 0) {
+ __u32 *new_family = malloc(sizeof(*new_family));
+
+ /* update orig_family_number with the incoming random
+ * data, report the new effective uuid, and store the
+ * new orig_family_num for future updates.
+ */
+ if (new_family) {
+ memcpy(&mpb->orig_family_num, info->uuid, sizeof(__u32));
+ uuid_from_super_imsm(st, info->uuid);
+ *new_family = mpb->orig_family_num;
+ info->update_private = new_family;
+ rv = 0;
+ }
+ } else if (strcmp(update, "assemble") == 0)
+ rv = 0;
+ else
+ fprintf(stderr,
+ Name ": '--update=%s' not supported for imsm metadata\n",
+ update);
+
+ /* successful update? recompute checksum */
+ if (rv == 0)
+ mpb->check_sum = __le32_to_cpu(__gen_imsm_checksum(mpb));
return rv;
}
return 0;
}
- if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
- return 3;
-
/* if an anchor does not have num_raid_devs set then it is a free
* floating spare
*/
if (first->anchor->num_raid_devs > 0 &&
sec->anchor->num_raid_devs > 0) {
- if (first->anchor->orig_family_num != sec->anchor->orig_family_num ||
- first->anchor->family_num != sec->anchor->family_num)
+ /* Determine if these disks might ever have been
+ * related. Further disambiguation can only take place
+ * in load_super_imsm_all
+ */
+ __u32 first_family = first->anchor->orig_family_num;
+ __u32 sec_family = sec->anchor->orig_family_num;
+
+ if (memcmp(first->anchor->sig, sec->anchor->sig,
+ MAX_SIGNATURE_LENGTH) != 0)
return 3;
+
+ if (first_family == 0)
+ first_family = first->anchor->family_num;
+ if (sec_family == 0)
+ sec_family = sec->anchor->family_num;
+
+ if (first_family != sec_family)
+ return 3;
+
}
+
/* if 'first' is a spare promote it to a populated mpb with sec's
* family number
*/
first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
first->anchor->orig_family_num = sec->anchor->orig_family_num;
first->anchor->family_num = sec->anchor->family_num;
+ memcpy(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH);
for (i = 0; i < sec->anchor->num_raid_devs; i++)
imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
}
{
struct stat st;
char path[256];
- char dname[100];
+ char dname[PATH_MAX];
char *nm;
int rv;
snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
}
-
extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
static int imsm_read_serial(int fd, char *devname,
strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
}
+#ifndef MDASSEMBLE
static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super)
{
struct dl *dl;
return dl;
}
+#endif
+
+static struct imsm_disk *
+__serial_to_disk(__u8 *serial, struct imsm_super *mpb, int *idx)
+{
+ int i;
+
+ for (i = 0; i < mpb->num_disks; i++) {
+ struct imsm_disk *disk = __get_imsm_disk(mpb, i);
+
+ if (serialcmp(disk->serial, serial) == 0) {
+ if (idx)
+ *idx = i;
+ return disk;
+ }
+ }
+
+ return NULL;
+}
static int
load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
{
+ struct imsm_disk *disk;
struct dl *dl;
struct stat stb;
int rv;
- int i;
- int alloc = 1;
+ char name[40];
__u8 serial[MAX_RAID_SERIAL_LEN];
rv = imsm_read_serial(fd, devname, serial);
if (rv != 0)
return 2;
- /* check if this is a disk we have seen before. it may be a spare in
- * super->disks while the current anchor believes it is a raid member,
- * check if we need to update dl->index
- */
- dl = serial_to_dl(serial, super);
- if (!dl)
- dl = malloc(sizeof(*dl));
- else
- alloc = 0;
-
+ dl = calloc(1, sizeof(*dl));
if (!dl) {
if (devname)
fprintf(stderr,
return 2;
}
- if (alloc) {
- fstat(fd, &stb);
- dl->major = major(stb.st_rdev);
- dl->minor = minor(stb.st_rdev);
- dl->next = super->disks;
- dl->fd = keep_fd ? fd : -1;
- dl->devname = devname ? strdup(devname) : NULL;
- serialcpy(dl->serial, serial);
- dl->index = -2;
- dl->e = NULL;
- } else if (keep_fd)
- dl->fd = fd;
+ fstat(fd, &stb);
+ dl->major = major(stb.st_rdev);
+ dl->minor = minor(stb.st_rdev);
+ dl->next = super->disks;
+ dl->fd = keep_fd ? fd : -1;
+ assert(super->disks == NULL);
+ super->disks = dl;
+ serialcpy(dl->serial, serial);
+ dl->index = -2;
+ dl->e = NULL;
+ fd2devname(fd, name);
+ if (devname)
+ dl->devname = strdup(devname);
+ else
+ dl->devname = strdup(name);
/* look up this disk's index in the current anchor */
- for (i = 0; i < super->anchor->num_disks; i++) {
- struct imsm_disk *disk_iter;
-
- disk_iter = __get_imsm_disk(super->anchor, i);
-
- if (serialcmp(disk_iter->serial, dl->serial) == 0) {
- dl->disk = *disk_iter;
- /* only set index on disks that are a member of a
- * populated contianer, i.e. one with raid_devs
- */
- if (dl->disk.status & FAILED_DISK)
- dl->index = -2;
- else if (dl->disk.status & SPARE_DISK)
- dl->index = -1;
- else
- dl->index = i;
-
- break;
- }
- }
-
- /* no match, maybe a stale failed drive */
- if (i == super->anchor->num_disks && dl->index >= 0) {
- dl->disk = *__get_imsm_disk(super->anchor, dl->index);
- if (dl->disk.status & FAILED_DISK)
+ disk = __serial_to_disk(dl->serial, super->anchor, &dl->index);
+ if (disk) {
+ dl->disk = *disk;
+ /* only set index on disks that are a member of a
+ * populated contianer, i.e. one with raid_devs
+ */
+ if (is_failed(&dl->disk))
dl->index = -2;
+ else if (is_spare(&dl->disk))
+ dl->index = -1;
}
- if (alloc)
- super->disks = dl;
-
return 0;
}
struct stat;
struct imsm_super *anchor;
__u32 check_sum;
- int rc;
get_dev_size(fd, NULL, &dsize);
return 2;
}
- rc = load_imsm_disk(fd, super, devname, 0);
- if (rc == 0)
- rc = parse_raid_devices(super);
- return rc;
+ return 0;
}
/* read the extended mpb */
*/
super->bbm_log = __get_imsm_bbm_log(super->anchor);
- rc = load_imsm_disk(fd, super, devname, 0);
- if (rc == 0)
- rc = parse_raid_devices(super);
+ return 0;
+}
+
+static int
+load_and_parse_mpb(int fd, struct intel_super *super, char *devname, int keep_fd)
+{
+ int err;
+
+ err = load_imsm_mpb(fd, super, devname);
+ if (err)
+ return err;
+ err = load_imsm_disk(fd, super, devname, keep_fd);
+ if (err)
+ return err;
+ err = parse_raid_devices(super);
- return rc;
+ return err;
}
static void __free_imsm_disk(struct dl *d)
return 0;
}
+static struct intel_disk *disk_list_get(__u8 *serial, struct intel_disk *disk_list)
+{
+ struct intel_disk *idisk = disk_list;
+
+ while (idisk) {
+ if (serialcmp(idisk->disk.serial, serial) == 0)
+ break;
+ idisk = idisk->next;
+ }
+
+ return idisk;
+}
+
+static int __prep_thunderdome(struct intel_super **table, int tbl_size,
+ struct intel_super *super,
+ struct intel_disk **disk_list)
+{
+ struct imsm_disk *d = &super->disks->disk;
+ struct imsm_super *mpb = super->anchor;
+ int i, j;
+
+ for (i = 0; i < tbl_size; i++) {
+ struct imsm_super *tbl_mpb = table[i]->anchor;
+ struct imsm_disk *tbl_d = &table[i]->disks->disk;
+
+ if (tbl_mpb->family_num == mpb->family_num) {
+ if (tbl_mpb->check_sum == mpb->check_sum) {
+ dprintf("%s: mpb from %d:%d matches %d:%d\n",
+ __func__, super->disks->major,
+ super->disks->minor,
+ table[i]->disks->major,
+ table[i]->disks->minor);
+ break;
+ }
+
+ if (((is_configured(d) && !is_configured(tbl_d)) ||
+ is_configured(d) == is_configured(tbl_d)) &&
+ tbl_mpb->generation_num < mpb->generation_num) {
+ /* current version of the mpb is a
+ * better candidate than the one in
+ * super_table, but copy over "cross
+ * generational" status
+ */
+ struct intel_disk *idisk;
+
+ dprintf("%s: mpb from %d:%d replaces %d:%d\n",
+ __func__, super->disks->major,
+ super->disks->minor,
+ table[i]->disks->major,
+ table[i]->disks->minor);
+
+ idisk = disk_list_get(tbl_d->serial, *disk_list);
+ if (idisk && is_failed(&idisk->disk))
+ tbl_d->status |= FAILED_DISK;
+ break;
+ } else {
+ struct intel_disk *idisk;
+ struct imsm_disk *disk;
+
+ /* tbl_mpb is more up to date, but copy
+ * over cross generational status before
+ * returning
+ */
+ disk = __serial_to_disk(d->serial, mpb, NULL);
+ if (disk && is_failed(disk))
+ d->status |= FAILED_DISK;
+
+ idisk = disk_list_get(d->serial, *disk_list);
+ if (idisk) {
+ idisk->owner = i;
+ if (disk && is_configured(disk))
+ idisk->disk.status |= CONFIGURED_DISK;
+ }
+
+ dprintf("%s: mpb from %d:%d prefer %d:%d\n",
+ __func__, super->disks->major,
+ super->disks->minor,
+ table[i]->disks->major,
+ table[i]->disks->minor);
+
+ return tbl_size;
+ }
+ }
+ }
+
+ if (i >= tbl_size)
+ table[tbl_size++] = super;
+ else
+ table[i] = super;
+
+ /* update/extend the merged list of imsm_disk records */
+ for (j = 0; j < mpb->num_disks; j++) {
+ struct imsm_disk *disk = __get_imsm_disk(mpb, j);
+ struct intel_disk *idisk;
+
+ idisk = disk_list_get(disk->serial, *disk_list);
+ if (idisk) {
+ idisk->disk.status |= disk->status;
+ if (is_configured(&idisk->disk) ||
+ is_failed(&idisk->disk))
+ idisk->disk.status &= ~(SPARE_DISK);
+ } else {
+ idisk = calloc(1, sizeof(*idisk));
+ if (!idisk)
+ return -1;
+ idisk->owner = IMSM_UNKNOWN_OWNER;
+ idisk->disk = *disk;
+ idisk->next = *disk_list;
+ *disk_list = idisk;
+ }
+
+ if (serialcmp(idisk->disk.serial, d->serial) == 0)
+ idisk->owner = i;
+ }
+
+ return tbl_size;
+}
+
+static struct intel_super *
+validate_members(struct intel_super *super, struct intel_disk *disk_list,
+ const int owner)
+{
+ struct imsm_super *mpb = super->anchor;
+ int ok_count = 0;
+ int i;
+
+ for (i = 0; i < mpb->num_disks; i++) {
+ struct imsm_disk *disk = __get_imsm_disk(mpb, i);
+ struct intel_disk *idisk;
+
+ idisk = disk_list_get(disk->serial, disk_list);
+ if (idisk) {
+ if (idisk->owner == owner ||
+ idisk->owner == IMSM_UNKNOWN_OWNER)
+ ok_count++;
+ else
+ dprintf("%s: '%.16s' owner %d != %d\n",
+ __func__, disk->serial, idisk->owner,
+ owner);
+ } else {
+ dprintf("%s: unknown disk %x [%d]: %.16s\n",
+ __func__, __le32_to_cpu(mpb->family_num), i,
+ disk->serial);
+ break;
+ }
+ }
+
+ if (ok_count == mpb->num_disks)
+ return super;
+ return NULL;
+}
+
+static void show_conflicts(__u32 family_num, struct intel_super *super_list)
+{
+ struct intel_super *s;
+
+ for (s = super_list; s; s = s->next) {
+ if (family_num != s->anchor->family_num)
+ continue;
+ fprintf(stderr, "Conflict, offlining family %#x on '%s'\n",
+ __le32_to_cpu(family_num), s->disks->devname);
+ }
+}
+
+static struct intel_super *
+imsm_thunderdome(struct intel_super **super_list, int len)
+{
+ struct intel_super *super_table[len];
+ struct intel_disk *disk_list = NULL;
+ struct intel_super *champion, *spare;
+ struct intel_super *s, **del;
+ int tbl_size = 0;
+ int conflict;
+ int i;
+
+ memset(super_table, 0, sizeof(super_table));
+ for (s = *super_list; s; s = s->next)
+ tbl_size = __prep_thunderdome(super_table, tbl_size, s, &disk_list);
+
+ for (i = 0; i < tbl_size; i++) {
+ struct imsm_disk *d;
+ struct intel_disk *idisk;
+ struct imsm_super *mpb = super_table[i]->anchor;
+
+ s = super_table[i];
+ d = &s->disks->disk;
+
+ /* 'd' must appear in merged disk list for its
+ * configuration to be valid
+ */
+ idisk = disk_list_get(d->serial, disk_list);
+ if (idisk && idisk->owner == i)
+ s = validate_members(s, disk_list, i);
+ else
+ s = NULL;
+
+ if (!s)
+ dprintf("%s: marking family: %#x from %d:%d offline\n",
+ __func__, mpb->family_num,
+ super_table[i]->disks->major,
+ super_table[i]->disks->minor);
+ super_table[i] = s;
+ }
+
+ /* This is where the mdadm implementation differs from the Windows
+ * driver which has no strict concept of a container. We can only
+ * assemble one family from a container, so when returning a prodigal
+ * array member to this system the code will not be able to disambiguate
+ * the container contents that should be assembled ("foreign" versus
+ * "local"). It requires user intervention to set the orig_family_num
+ * to a new value to establish a new container. The Windows driver in
+ * this situation fixes up the volume name in place and manages the
+ * foreign array as an independent entity.
+ */
+ s = NULL;
+ spare = NULL;
+ conflict = 0;
+ for (i = 0; i < tbl_size; i++) {
+ struct intel_super *tbl_ent = super_table[i];
+ int is_spare = 0;
+
+ if (!tbl_ent)
+ continue;
+
+ if (tbl_ent->anchor->num_raid_devs == 0) {
+ spare = tbl_ent;
+ is_spare = 1;
+ }
+
+ if (s && !is_spare) {
+ show_conflicts(tbl_ent->anchor->family_num, *super_list);
+ conflict++;
+ } else if (!s && !is_spare)
+ s = tbl_ent;
+ }
+
+ if (!s)
+ s = spare;
+ if (!s) {
+ champion = NULL;
+ goto out;
+ }
+ champion = s;
+
+ if (conflict)
+ fprintf(stderr, "Chose family %#x on '%s', "
+ "assemble conflicts to new container with '--update=uuid'\n",
+ __le32_to_cpu(s->anchor->family_num), s->disks->devname);
+
+ /* collect all dl's onto 'champion', and update them to
+ * champion's version of the status
+ */
+ for (s = *super_list; s; s = s->next) {
+ struct imsm_super *mpb = champion->anchor;
+ struct dl *dl = s->disks;
+
+ if (s == champion)
+ continue;
+
+ for (i = 0; i < mpb->num_disks; i++) {
+ struct imsm_disk *disk;
+
+ disk = __serial_to_disk(dl->serial, mpb, &dl->index);
+ if (disk) {
+ dl->disk = *disk;
+ /* only set index on disks that are a member of
+ * a populated contianer, i.e. one with
+ * raid_devs
+ */
+ if (is_failed(&dl->disk))
+ dl->index = -2;
+ else if (is_spare(&dl->disk))
+ dl->index = -1;
+ break;
+ }
+ }
+
+ if (i >= mpb->num_disks) {
+ struct intel_disk *idisk;
+
+ idisk = disk_list_get(dl->serial, disk_list);
+ if (idisk && is_spare(&idisk->disk) &&
+ !is_failed(&idisk->disk) && !is_configured(&idisk->disk))
+ dl->index = -1;
+ else {
+ dl->index = -2;
+ continue;
+ }
+ }
+
+ dl->next = champion->disks;
+ champion->disks = dl;
+ s->disks = NULL;
+ }
+
+ /* delete 'champion' from super_list */
+ for (del = super_list; *del; ) {
+ if (*del == champion) {
+ *del = (*del)->next;
+ break;
+ } else
+ del = &(*del)->next;
+ }
+ champion->next = NULL;
+
+ out:
+ while (disk_list) {
+ struct intel_disk *idisk = disk_list;
+
+ disk_list = disk_list->next;
+ free(idisk);
+ }
+
+ return champion;
+}
+
static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
char *devname, int keep_fd)
{
struct mdinfo *sra;
- struct intel_super *super;
- struct mdinfo *sd, *best = NULL;
- __u32 bestgen = 0;
- __u32 gen;
- char nm[20];
- int dfd;
- int rv;
+ struct intel_super *super_list = NULL;
+ struct intel_super *super = NULL;
int devnum = fd2devnum(fd);
+ struct mdinfo *sd;
int retry;
+ int err = 0;
+ int i;
enum sysfs_read_flags flags;
flags = GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE;
if (sra->array.major_version != -1 ||
sra->array.minor_version != -2 ||
- strcmp(sra->text_version, "imsm") != 0)
- return 1;
-
- super = alloc_super(0);
- if (!super)
- return 1;
-
- /* find the most up to date disk in this array, skipping spares */
- for (sd = sra->devs; sd; sd = sd->next) {
+ strcmp(sra->text_version, "imsm") != 0) {
+ err = 1;
+ goto error;
+ }
+ /* load all mpbs */
+ for (sd = sra->devs, i = 0; sd; sd = sd->next, i++) {
+ struct intel_super *s = alloc_super(0);
+ char nm[32];
+ int dfd;
+
+ err = 1;
+ if (!s)
+ goto error;
+ s->next = super_list;
+ super_list = s;
+
+ err = 2;
sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
- if (dfd < 0) {
- free_imsm(super);
- return 2;
- }
- rv = load_imsm_mpb(dfd, super, NULL);
+ if (dfd < 0)
+ goto error;
+
+ err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
/* retry the load if we might have raced against mdmon */
- if (rv == 3 && mdmon_running(devnum))
+ if (err == 3 && mdmon_running(devnum))
for (retry = 0; retry < 3; retry++) {
usleep(3000);
- rv = load_imsm_mpb(dfd, super, NULL);
- if (rv != 3)
+ err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
+ if (err != 3)
break;
}
if (!keep_fd)
close(dfd);
- if (rv == 0) {
- if (super->anchor->num_raid_devs == 0)
- gen = 0;
- else
- gen = __le32_to_cpu(super->anchor->generation_num);
- if (!best || gen > bestgen) {
- bestgen = gen;
- best = sd;
- }
- } else {
- free_imsm(super);
- return rv;
- }
- }
-
- if (!best) {
- free_imsm(super);
- return 1;
+ if (err)
+ goto error;
}
- /* load the most up to date anchor */
- sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
- dfd = dev_open(nm, O_RDONLY);
- if (dfd < 0) {
- free_imsm(super);
- return 1;
- }
- rv = load_imsm_mpb(dfd, super, NULL);
- close(dfd);
- if (rv != 0) {
- free_imsm(super);
- return 2;
- }
-
- /* re-parse the disk list with the current anchor */
- for (sd = sra->devs ; sd ; sd = sd->next) {
- sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
- dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
- if (dfd < 0) {
- free_imsm(super);
- return 2;
- }
- load_imsm_disk(dfd, super, NULL, keep_fd);
- if (!keep_fd)
- close(dfd);
+ /* all mpbs enter, maybe one leaves */
+ super = imsm_thunderdome(&super_list, i);
+ if (!super) {
+ err = 1;
+ goto error;
}
-
if (find_missing(super) != 0) {
free_imsm(super);
- return 2;
+ err = 2;
+ goto error;
}
if (st->subarray[0]) {
super->current_vol = atoi(st->subarray);
else {
free_imsm(super);
- return 1;
+ err = 1;
+ goto error;
}
}
+ err = 0;
+
+ error:
+ while (super_list) {
+ struct intel_super *s = super_list;
+
+ super_list = super_list->next;
+ free_imsm(s);
+ }
+ sysfs_free(sra);
+
+ if (err)
+ return err;
*sbp = super;
st->container_dev = devnum;
- if (st->ss == NULL) {
+ if (err == 0 && st->ss == NULL) {
st->ss = &super_imsm;
st->minor_version = 0;
st->max_devs = IMSM_MAX_DEVICES;
return 1;
}
- rv = load_imsm_mpb(fd, super, devname);
+ rv = load_and_parse_mpb(fd, super, devname, 0);
if (rv) {
if (devname)
map->ddf = 1;
if (info->level == 1 && info->raid_disks > 2) {
+ free(dev);
+ free(dv);
fprintf(stderr, Name": imsm does not support more than 2 disks"
"in a raid1 volume\n");
return 0;
size_t mpb_size;
char *version;
- if (!info) {
- st->sb = NULL;
- return 0;
- }
if (st->sb)
- return init_super_imsm_volume(st, info, size, name, homehost,
- uuid);
+ return init_super_imsm_volume(st, info, size, name, homehost, uuid);
+
+ if (info)
+ mpb_size = disks_to_mpb_size(info->nr_disks);
+ else
+ mpb_size = 512;
super = alloc_super(1);
- if (!super)
- return 0;
- mpb_size = disks_to_mpb_size(info->nr_disks);
- if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
+ if (super && posix_memalign(&super->buf, 512, mpb_size) != 0) {
free(super);
+ super = NULL;
+ }
+ if (!super) {
+ fprintf(stderr, Name
+ ": %s could not allocate superblock\n", __func__);
return 0;
}
+ memset(super->buf, 0, mpb_size);
mpb = super->buf;
- memset(mpb, 0, mpb_size);
+ mpb->mpb_size = __cpu_to_le32(mpb_size);
+ st->sb = super;
+
+ if (info == NULL) {
+ /* zeroing superblock */
+ return 0;
+ }
mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
strcpy(version, MPB_SIGNATURE);
version += strlen(MPB_SIGNATURE);
strcpy(version, MPB_VERSION_RAID0);
- mpb->mpb_size = mpb_size;
- st->sb = super;
return 1;
}
struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
+ if (!_dev || !_disk) {
+ fprintf(stderr, Name ": BUG mpb setup error\n");
+ return 1;
+ }
*_dev = *dev;
*_disk = dl->disk;
sum = random32();
dd->fd = fd;
dd->e = NULL;
rv = imsm_read_serial(fd, devname, dd->serial);
- if (rv) {
+ if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
+ memset(dd->serial, 0, MAX_RAID_SERIAL_LEN);
+ fd2devname(fd, (char *) dd->serial);
+ } else if (rv) {
fprintf(stderr,
Name ": failed to retrieve scsi serial, aborting\n");
free(dd);
return 0;
}
-static int store_imsm_mpb(int fd, struct intel_super *super);
+static int store_imsm_mpb(int fd, struct imsm_super *mpb);
+
+static union {
+ char buf[512];
+ struct imsm_super anchor;
+} spare_record __attribute__ ((aligned(512)));
/* spare records have their own family number and do not have any defined raid
* devices
*/
static int write_super_imsm_spares(struct intel_super *super, int doclose)
{
- struct imsm_super mpb_save;
struct imsm_super *mpb = super->anchor;
+ struct imsm_super *spare = &spare_record.anchor;
__u32 sum;
struct dl *d;
- mpb_save = *mpb;
- mpb->num_raid_devs = 0;
- mpb->num_disks = 1;
- mpb->mpb_size = sizeof(struct imsm_super);
- mpb->generation_num = __cpu_to_le32(1UL);
+ spare->mpb_size = __cpu_to_le32(sizeof(struct imsm_super)),
+ spare->generation_num = __cpu_to_le32(1UL),
+ spare->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
+ spare->num_disks = 1,
+ spare->num_raid_devs = 0,
+ spare->cache_size = mpb->cache_size,
+ spare->pwr_cycle_count = __cpu_to_le32(1),
+
+ snprintf((char *) spare->sig, MAX_SIGNATURE_LENGTH,
+ MPB_SIGNATURE MPB_VERSION_RAID0);
for (d = super->disks; d; d = d->next) {
if (d->index != -1)
continue;
- mpb->disk[0] = d->disk;
- sum = __gen_imsm_checksum(mpb);
- mpb->family_num = __cpu_to_le32(sum);
- mpb->orig_family_num = 0;
- sum = __gen_imsm_checksum(mpb);
- mpb->check_sum = __cpu_to_le32(sum);
+ spare->disk[0] = d->disk;
+ sum = __gen_imsm_checksum(spare);
+ spare->family_num = __cpu_to_le32(sum);
+ spare->orig_family_num = 0;
+ sum = __gen_imsm_checksum(spare);
+ spare->check_sum = __cpu_to_le32(sum);
- if (store_imsm_mpb(d->fd, super)) {
+ if (store_imsm_mpb(d->fd, spare)) {
fprintf(stderr, "%s: failed for device %d:%d %s\n",
__func__, d->major, d->minor, strerror(errno));
- *mpb = mpb_save;
return 1;
}
if (doclose) {
}
}
- *mpb = mpb_save;
return 0;
}
for (d = super->disks; d ; d = d->next) {
if (d->index < 0)
continue;
- if (store_imsm_mpb(d->fd, super))
+ if (store_imsm_mpb(d->fd, mpb))
fprintf(stderr, "%s: failed for device %d:%d %s\n",
__func__, d->major, d->minor, strerror(errno));
if (doclose) {
}
#endif
-static int store_zero_imsm(struct supertype *st, int fd)
+static int store_super_imsm(struct supertype *st, int fd)
{
- unsigned long long dsize;
- void *buf;
-
- get_dev_size(fd, NULL, &dsize);
-
- /* first block is stored on second to last sector of the disk */
- if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
- return 1;
+ struct intel_super *super = st->sb;
+ struct imsm_super *mpb = super ? super->anchor : NULL;
- if (posix_memalign(&buf, 512, 512) != 0)
+ if (!mpb)
return 1;
- memset(buf, 0, 512);
- if (write(fd, buf, 512) != 512)
- return 1;
- return 0;
+#ifndef MDASSEMBLE
+ return store_imsm_mpb(fd, mpb);
+#else
+ return 1;
+#endif
}
static int imsm_bbm_log_size(struct imsm_super *mpb)
unsigned long reserve;
if (!e)
- return ~0ULL; /* error */
+ return 0;
/* coalesce and sort all extents. also, check to see if we need to
* reserve space between member arrays
} while (e[i-1].size);
free(e);
+ if (maxsize == 0)
+ return 0;
+
+ /* FIXME assumes volume at offset 0 is the first volume in a
+ * container
+ */
if (start_extent > 0)
reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
else
reserve = 0;
if (maxsize < reserve)
- return ~0ULL;
+ return 0;
super->create_offset = ~((__u32) 0);
if (start + reserve > super->create_offset)
- return ~0ULL; /* start overflows create_offset */
+ return 0; /* start overflows create_offset */
super->create_offset = start + reserve;
return maxsize - reserve;
}
#define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg))
+static int
+validate_geometry_imsm_orom(struct intel_super *super, int level, int layout,
+ int raiddisks, int chunk, int verbose)
+{
+ if (!is_raid_level_supported(super->orom, level, raiddisks)) {
+ pr_vrb(": platform does not support raid%d with %d disk%s\n",
+ level, raiddisks, raiddisks > 1 ? "s" : "");
+ return 0;
+ }
+ if (super->orom && level != 1 &&
+ !imsm_orom_has_chunk(super->orom, chunk)) {
+ pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
+ return 0;
+ }
+ if (layout != imsm_level_to_layout(level)) {
+ if (level == 5)
+ pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
+ else if (level == 10)
+ pr_vrb(": imsm raid 10 only supports the n2 layout\n");
+ else
+ pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
+ layout, level);
+ return 0;
+ }
+
+ return 1;
+}
+
/* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
* FIX ME add ahci details
*/
if (!super)
return 0;
- if (!is_raid_level_supported(super->orom, level, raiddisks)) {
- pr_vrb(": platform does not support raid%d with %d disk%s\n",
- level, raiddisks, raiddisks > 1 ? "s" : "");
+ if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, verbose))
return 0;
- }
- if (super->orom && level != 1 &&
- !imsm_orom_has_chunk(super->orom, chunk)) {
- pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
- return 0;
- }
- if (layout != imsm_level_to_layout(level)) {
- if (level == 5)
- pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
- else if (level == 10)
- pr_vrb(": imsm raid 10 only supports the n2 layout\n");
- else
- pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
- layout, level);
- return 0;
- }
if (!dev) {
/* General test: make sure there is space for
* offset
*/
unsigned long long minsize = size;
- unsigned long long start_offset = ~0ULL;
+ unsigned long long start_offset = MaxSector;
int dcnt = 0;
if (minsize == 0)
minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
esize = e[i].start - pos;
if (esize >= minsize)
found = 1;
- if (found && start_offset == ~0ULL) {
+ if (found && start_offset == MaxSector) {
start_offset = pos;
break;
} else if (found && pos != start_offset) {
i += dl->extent_cnt;
maxsize = merge_extents(super, i);
- if (maxsize < size) {
+ if (maxsize < size || maxsize == 0) {
if (verbose)
fprintf(stderr, Name ": not enough space after merge (%llu < %llu)\n",
maxsize, size);
return 0;
- } else if (maxsize == ~0ULL) {
- if (verbose)
- fprintf(stderr, Name ": failed to merge %d extents\n", i);
- return 0;
}
*freesize = maxsize;
if (cnt < raiddisks ||
(super->orom && used && used != raiddisks) ||
- maxsize < minsize) {
+ maxsize < minsize ||
+ maxsize == 0) {
fprintf(stderr, Name ": not enough devices with space to create array.\n");
return 0; /* No enough free spaces large enough */
}
{
int fd, cfd;
struct mdinfo *sra;
+ int is_member = 0;
/* if given unused devices create a container
* if given given devices in a container create a member volume
* created. add_to_super and getinfo_super
* detect when autolayout is in progress.
*/
+ if (!validate_geometry_imsm_orom(st->sb, level, layout,
+ raiddisks, chunk,
+ verbose))
+ return 0;
return reserve_space(st, raiddisks, size, chunk, freesize);
}
return 1;
dev, freesize, verbose);
}
- /* limit creation to the following levels */
- if (!dev)
- switch (level) {
- case 0:
- case 1:
- case 10:
- case 5:
- break;
- default:
- return 1;
- }
-
/* This device needs to be a device in an 'imsm' container */
fd = open(dev, O_RDONLY|O_EXCL, 0);
if (fd >= 0) {
}
/* Well, it is in use by someone, maybe an 'imsm' container. */
cfd = open_container(fd);
+ close(fd);
if (cfd < 0) {
- close(fd);
if (verbose)
fprintf(stderr, Name ": Cannot use %s: It is busy\n",
dev);
return 0;
}
sra = sysfs_read(cfd, 0, GET_VERSION);
- close(fd);
if (sra && sra->array.major_version == -1 &&
- strcmp(sra->text_version, "imsm") == 0) {
+ strcmp(sra->text_version, "imsm") == 0)
+ is_member = 1;
+ sysfs_free(sra);
+ if (is_member) {
/* This is a member of a imsm container. Load the container
* and try to create a volume
*/
size, dev,
freesize, verbose);
}
- close(cfd);
- } else /* may belong to another container */
- return 0;
+ }
- return 1;
+ if (verbose)
+ fprintf(stderr, Name ": failed container membership check\n");
+
+ close(cfd);
+ return 0;
}
#endif /* MDASSEMBLE */
+static int is_rebuilding(struct imsm_dev *dev)
+{
+ struct imsm_map *migr_map;
+
+ if (!dev->vol.migr_state)
+ return 0;
+
+ if (migr_type(dev) != MIGR_REBUILD)
+ return 0;
+
+ migr_map = get_imsm_map(dev, 1);
+
+ if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
+ return 1;
+ else
+ return 0;
+}
+
+static void update_recovery_start(struct imsm_dev *dev, struct mdinfo *array)
+{
+ struct mdinfo *rebuild = NULL;
+ struct mdinfo *d;
+ __u32 units;
+
+ if (!is_rebuilding(dev))
+ return;
+
+ /* Find the rebuild target, but punt on the dual rebuild case */
+ for (d = array->devs; d; d = d->next)
+ if (d->recovery_start == 0) {
+ if (rebuild)
+ return;
+ rebuild = d;
+ }
+
+ units = __le32_to_cpu(dev->vol.curr_migr_unit);
+ rebuild->recovery_start = units * blocks_per_migr_unit(dev);
+}
+
+
static struct mdinfo *container_content_imsm(struct supertype *st)
{
/* Given a container loaded by load_super_imsm_all,
}
this = malloc(sizeof(*this));
+ if (!this) {
+ fprintf(stderr, Name ": failed to allocate %lu bytes\n",
+ sizeof(*this));
+ break;
+ }
memset(this, 0, sizeof(*this));
this->next = rest;
super->current_vol = i;
getinfo_super_imsm_volume(st, this);
for (slot = 0 ; slot < map->num_members; slot++) {
+ unsigned long long recovery_start;
struct mdinfo *info_d;
struct dl *d;
int idx;
ord = get_imsm_ord_tbl_ent(dev, slot);
for (d = super->disks; d ; d = d->next)
if (d->index == idx)
- break;
+ break;
+ recovery_start = MaxSector;
if (d == NULL)
skip = 1;
if (d && is_failed(&d->disk))
skip = 1;
if (ord & IMSM_ORD_REBUILD)
- skip = 1;
+ recovery_start = 0;
/*
* if we skip some disks the array will be assmebled degraded;
- * reset resync start to avoid a dirty-degraded situation
+ * reset resync start to avoid a dirty-degraded
+ * situation when performing the intial sync
*
* FIXME handle dirty degraded
*/
- if (skip && !dev->vol.dirty)
- this->resync_start = ~0ULL;
+ if ((skip || recovery_start == 0) && !dev->vol.dirty)
+ this->resync_start = MaxSector;
if (skip)
continue;
- info_d = malloc(sizeof(*info_d));
+ info_d = calloc(1, sizeof(*info_d));
if (!info_d) {
fprintf(stderr, Name ": failed to allocate disk"
" for volume %.16s\n", dev->volume);
+ info_d = this->devs;
+ while (info_d) {
+ struct mdinfo *d = info_d->next;
+
+ free(info_d);
+ info_d = d;
+ }
free(this);
this = rest;
break;
}
- memset(info_d, 0, sizeof(*info_d));
info_d->next = this->devs;
this->devs = info_d;
info_d->disk.major = d->major;
info_d->disk.minor = d->minor;
info_d->disk.raid_disk = slot;
+ info_d->recovery_start = recovery_start;
- this->array.working_disks++;
+ if (info_d->recovery_start == MaxSector)
+ this->array.working_disks++;
info_d->events = __le32_to_cpu(mpb->generation_num);
info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
info_d->component_size = __le32_to_cpu(map->blocks_per_member);
- if (d->devname)
- strcpy(info_d->name, d->devname);
}
+ /* now that the disk list is up-to-date fixup recovery_start */
+ update_recovery_start(dev, this);
rest = this;
}
return 0;
}
-static int is_rebuilding(struct imsm_dev *dev)
-{
- struct imsm_map *migr_map;
-
- if (!dev->vol.migr_state)
- return 0;
-
- if (migr_type(dev) != MIGR_REBUILD)
- return 0;
-
- migr_map = get_imsm_map(dev, 1);
-
- if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
- return 1;
- else
- return 0;
-}
-
/* return true if we recorded new information */
static int mark_failure(struct imsm_dev *dev, struct imsm_disk *disk, int idx)
{
struct imsm_map *map = get_imsm_map(dev, 0);
int failed = imsm_count_failed(super, dev);
__u8 map_state = imsm_check_degraded(super, dev, failed);
+ __u32 blocks_per_unit;
/* before we activate this array handle any missing disks */
if (consistent == 2 && super->missing) {
mark_missing(dev, &dl->disk, dl->index);
super->updates_pending++;
}
-
+
if (consistent == 2 &&
- (!is_resync_complete(a) ||
+ (!is_resync_complete(&a->info) ||
map_state != IMSM_T_STATE_NORMAL ||
dev->vol.migr_state))
consistent = 0;
- if (is_resync_complete(a)) {
+ if (is_resync_complete(&a->info)) {
/* complete intialization / resync,
* recovery and interrupted recovery is completed in
* ->set_disk
}
} else if (!is_resyncing(dev) && !failed) {
/* mark the start of the init process if nothing is failed */
- dprintf("imsm: mark resync start (%llu)\n", a->resync_start);
+ dprintf("imsm: mark resync start\n");
if (map->map_state == IMSM_T_STATE_UNINITIALIZED)
migrate(dev, IMSM_T_STATE_NORMAL, MIGR_INIT);
else
super->updates_pending++;
}
- /* FIXME check if we can update curr_migr_unit from resync_start */
+ /* check if we can update curr_migr_unit from resync_start, recovery_start */
+ blocks_per_unit = blocks_per_migr_unit(dev);
+ if (blocks_per_unit && failed <= 1) {
+ __u32 units32;
+ __u64 units;
+
+ if (migr_type(dev) == MIGR_REBUILD)
+ units = min_recovery_start(&a->info) / blocks_per_unit;
+ else
+ units = a->info.resync_start / blocks_per_unit;
+ units32 = units;
+
+ /* check that we did not overflow 32-bits, and that
+ * curr_migr_unit needs updating
+ */
+ if (units32 == units &&
+ __le32_to_cpu(dev->vol.curr_migr_unit) != units32) {
+ dprintf("imsm: mark checkpoint (%u)\n", units32);
+ dev->vol.curr_migr_unit = __cpu_to_le32(units32);
+ super->updates_pending++;
+ }
+ }
/* mark dirty / clean */
if (dev->vol.dirty != !consistent) {
- dprintf("imsm: mark '%s' (%llu)\n",
- consistent ? "clean" : "dirty", a->resync_start);
+ dprintf("imsm: mark '%s'\n", consistent ? "clean" : "dirty");
if (consistent)
dev->vol.dirty = 0;
else
}
}
-static int store_imsm_mpb(int fd, struct intel_super *super)
+static int store_imsm_mpb(int fd, struct imsm_super *mpb)
{
- struct imsm_super *mpb = super->anchor;
+ void *buf = mpb;
__u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
unsigned long long dsize;
unsigned long long sectors;
if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
return 1;
- if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
+ if (write(fd, buf + 512, 512 * sectors) != 512 * sectors)
return 1;
}
if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
return 1;
- if (write(fd, super->buf, 512) != 512)
+ if (write(fd, buf, 512) != 512)
return 1;
return 0;
di->disk.major = dl->major;
di->disk.minor = dl->minor;
di->disk.state = 0;
+ di->recovery_start = 0;
di->data_offset = __le32_to_cpu(map->pba_of_lba0);
di->component_size = a->info.component_size;
di->container_member = inst;
.load_super = load_super_imsm,
.init_super = init_super_imsm,
- .store_super = store_zero_imsm,
+ .store_super = store_super_imsm,
.free_super = free_super_imsm,
.match_metadata_desc = match_metadata_desc_imsm,
.container_content = container_content_imsm,