unsigned index;
};
+struct intel_hba {
+ enum sys_dev_type type;
+ char *path;
+ char *pci_id;
+ struct intel_hba *next;
+};
+
enum action {
DISK_REMOVE = 1,
DISK_ADD
active */
struct dl *missing; /* disks removed while we weren't looking */
struct bbm_log *bbm_log;
- const char *hba; /* device path of the raid controller for this metadata */
+ struct intel_hba *hba; /* device path of the raid controller for this metadata */
const struct imsm_orom *orom; /* platform firmware support */
struct intel_super *next; /* (temp) list for disambiguating family_num */
};
unsigned long long start, size;
};
+/* definitions of reshape process types */
+enum imsm_reshape_type {
+ CH_TAKEOVER,
+ CH_MIGRATION,
+};
+
/* definition of messages passed to imsm_process_update */
enum imsm_update_type {
update_activate_spare,
update_rename_array,
update_add_remove_disk,
update_reshape_container_disks,
+ update_takeover
};
struct imsm_update_activate_spare {
int raid_disks;
};
+enum takeover_direction {
+ R10_TO_R0,
+ R0_TO_R10
+};
+struct imsm_update_takeover {
+ enum imsm_update_type type;
+ int subarray;
+ enum takeover_direction direction;
+};
struct imsm_update_reshape {
enum imsm_update_type type;
enum imsm_update_type type;
};
+
+static const char *_sys_dev_type[] = {
+ [SYS_DEV_UNKNOWN] = "Unknown",
+ [SYS_DEV_SAS] = "SAS",
+ [SYS_DEV_SATA] = "SATA"
+};
+
+const char *get_sys_dev_type(enum sys_dev_type type)
+{
+ if (type >= SYS_DEV_MAX)
+ type = SYS_DEV_UNKNOWN;
+
+ return _sys_dev_type[type];
+}
+
+#ifndef MDASSEMBLE
+static struct intel_hba * alloc_intel_hba(struct sys_dev *device)
+{
+ struct intel_hba *result = malloc(sizeof(*result));
+ if (result) {
+ result->type = device->type;
+ result->path = strdup(device->path);
+ result->next = NULL;
+ if (result->path && (result->pci_id = strrchr(result->path, '/')) != NULL)
+ result->pci_id++;
+ }
+ return result;
+}
+
+static struct intel_hba * find_intel_hba(struct intel_hba *hba, struct sys_dev *device)
+{
+ struct intel_hba *result=NULL;
+ for (result = hba; result; result = result->next) {
+ if (result->type == device->type && strcmp(result->path, device->path) == 0)
+ break;
+ }
+ return result;
+}
+
+
+static int attach_hba_to_super(struct intel_super *super, struct sys_dev *device,
+ const char *devname)
+{
+ struct intel_hba *hba;
+
+ /* check if disk attached to Intel HBA */
+ hba = find_intel_hba(super->hba, device);
+ if (hba != NULL)
+ return 1;
+ /* Check if HBA is already attached to super */
+ if (super->hba == NULL) {
+ super->hba = alloc_intel_hba(device);
+ return 1;
+ }
+
+ hba = super->hba;
+ /* Intel metadata allows for all disks attached to the same type HBA.
+ * Do not sypport odf HBA types mixing
+ */
+ if (device->type != hba->type)
+ return 2;
+
+ while (hba->next)
+ hba = hba->next;
+
+ hba->next = alloc_intel_hba(device);
+ return 1;
+}
+
+static struct sys_dev* find_disk_attached_hba(int fd, const char *devname)
+{
+ struct sys_dev *list, *elem, *prev;
+ char *disk_path;
+
+ if ((list = find_intel_devices()) == NULL)
+ return 0;
+
+ if (fd < 0)
+ disk_path = (char *) devname;
+ else
+ disk_path = diskfd_to_devpath(fd);
+
+ if (!disk_path) {
+ free_sys_dev(&list);
+ return 0;
+ }
+
+ for (prev = NULL, elem = list; elem; prev = elem, elem = elem->next) {
+ if (path_attached_to_hba(disk_path, elem->path)) {
+ if (prev == NULL)
+ list = list->next;
+ else
+ prev->next = elem->next;
+ elem->next = NULL;
+ if (disk_path != devname)
+ free(disk_path);
+ free_sys_dev(&list);
+ return elem;
+ }
+ }
+ if (disk_path != devname)
+ free(disk_path);
+ free_sys_dev(&list);
+
+ return NULL;
+}
+#endif /* MDASSEMBLE */
+
+
static struct supertype *match_metadata_desc_imsm(char *arg)
{
struct supertype *st;
struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
{
+ /* A device can have 2 maps if it is in the middle of a migration.
+ * If second_map is:
+ * 0 - we return the first map
+ * 1 - we return the second map if it exists, else NULL
+ * -1 - we return the second map if it exists, else the first
+ */
struct imsm_map *map = &dev->vol.map[0];
- if (second_map && !dev->vol.migr_state)
+ if (second_map == 1 && !dev->vol.migr_state)
return NULL;
- else if (second_map) {
+ else if (second_map == 1 ||
+ (second_map < 0 && dev->vol.migr_state)) {
void *ptr = map;
return ptr + sizeof_imsm_map(map);
} else
return map;
-
+
}
/* return the size of the device.
{
struct imsm_map *map;
- if (second_map == -1) {
- if (dev->vol.migr_state)
- map = get_imsm_map(dev, 1);
- else
- map = get_imsm_map(dev, 0);
- } else {
- map = get_imsm_map(dev, second_map);
- }
+ map = get_imsm_map(dev, second_map);
/* top byte identifies disk under rebuild */
return __le32_to_cpu(map->disk_ord_tbl[slot]);
printf("]");
}
printf("\n");
+ printf(" Failed disk : ");
+ if (map->failed_disk_num == 0xff)
+ printf("none");
+ else
+ printf("%i", map->failed_disk_num);
+ printf("\n");
slot = get_imsm_disk_slot(map, disk_idx);
if (slot >= 0) {
ord = get_imsm_ord_tbl_ent(dev, slot, -1);
static int imsm_read_serial(int fd, char *devname, __u8 *serial);
static void fd2devname(int fd, char *name);
-static int imsm_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
+static int ahci_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
{
- /* dump an unsorted list of devices attached to ahci, as well as
- * non-connected ports
+ /* dump an unsorted list of devices attached to AHCI Intel storage
+ * controller, as well as non-connected ports
*/
int hba_len = strlen(hba_path) + 1;
struct dirent *ent;
return err;
}
+
+
+static void print_found_intel_controllers(struct sys_dev *elem)
+{
+ for (; elem; elem = elem->next) {
+ fprintf(stderr, Name ": found Intel(R) ");
+ if (elem->type == SYS_DEV_SATA)
+ fprintf(stderr, "SATA ");
+ else if (elem->type == SYS_DEV_SAS)
+ fprintf(stderr, "SAS ");
+ fprintf(stderr, "RAID controller");
+ if (elem->pci_id)
+ fprintf(stderr, " at %s", elem->pci_id);
+ fprintf(stderr, ".\n");
+ }
+ fflush(stderr);
+}
+
+static int ahci_get_port_count(const char *hba_path, int *port_count)
+{
+ struct dirent *ent;
+ DIR *dir;
+ int host_base = -1;
+
+ *port_count = 0;
+ if ((dir = opendir(hba_path)) == NULL)
+ return -1;
+
+ for (ent = readdir(dir); ent; ent = readdir(dir)) {
+ int host;
+
+ if (sscanf(ent->d_name, "host%d", &host) != 1)
+ continue;
+ if (*port_count == 0)
+ host_base = host;
+ else if (host < host_base)
+ host_base = host;
+
+ if (host + 1 > *port_count + host_base)
+ *port_count = host + 1 - host_base;
+ }
+ closedir(dir);
+ return host_base;
+}
+
static int detail_platform_imsm(int verbose, int enumerate_only)
{
/* There are two components to imsm platform support, the ahci SATA
*/
const struct imsm_orom *orom;
struct sys_dev *list, *hba;
- DIR *dir;
- struct dirent *ent;
- const char *hba_path;
int host_base = 0;
int port_count = 0;
+ int result=0;
if (enumerate_only) {
if (check_env("IMSM_NO_PLATFORM") || find_imsm_orom())
return 2;
}
- list = find_driver_devices("pci", "ahci");
- for (hba = list; hba; hba = hba->next)
- if (devpath_to_vendor(hba->path) == 0x8086)
- break;
-
- if (!hba) {
+ list = find_intel_devices();
+ if (!list) {
if (verbose)
- fprintf(stderr, Name ": unable to find active ahci controller\n");
+ fprintf(stderr, Name ": no active Intel(R) RAID "
+ "controller found.\n");
free_sys_dev(&list);
return 2;
} else if (verbose)
- fprintf(stderr, Name ": found Intel SATA AHCI Controller\n");
- hba_path = hba->path;
- hba->path = NULL;
- free_sys_dev(&list);
+ print_found_intel_controllers(list);
orom = find_imsm_orom();
if (!orom) {
+ free_sys_dev(&list);
if (verbose)
fprintf(stderr, Name ": imsm option-rom not found\n");
return 2;
imsm_orom_has_chunk(orom, 1024*64) ? " 64M" : "");
printf(" Max Disks : %d\n", orom->tds);
printf(" Max Volumes : %d\n", orom->vpa);
- printf(" I/O Controller : %s\n", hba_path);
- /* find the smallest scsi host number to determine a port number base */
- dir = opendir(hba_path);
- for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
- int host;
-
- if (sscanf(ent->d_name, "host%d", &host) != 1)
- continue;
- if (port_count == 0)
- host_base = host;
- else if (host < host_base)
- host_base = host;
-
- if (host + 1 > port_count + host_base)
- port_count = host + 1 - host_base;
-
- }
- if (dir)
- closedir(dir);
-
- if (!port_count || imsm_enumerate_ports(hba_path, port_count,
- host_base, verbose) != 0) {
- if (verbose)
- fprintf(stderr, Name ": failed to enumerate ports\n");
- return 2;
+ for (hba = list; hba; hba = hba->next) {
+ printf(" I/O Controller : %s (%s)\n",
+ hba->path, get_sys_dev_type(hba->type));
+
+ if (hba->type == SYS_DEV_SATA) {
+ host_base = ahci_get_port_count(hba->path, &port_count);
+ if (ahci_enumerate_ports(hba->path, port_count, host_base, verbose)) {
+ if (verbose)
+ fprintf(stderr, Name ": failed to enumerate "
+ "ports on SATA controller at %s.", hba->pci_id);
+ result |= 2;
+ }
+ } else if (hba->type == SYS_DEV_SAS) {
+ if (verbose)
+ fprintf(stderr, Name ": failed to enumerate "
+ "devices on SAS controller at %s.", hba->pci_id);
+ result |= 2;
+ }
}
- return 0;
+ free_sys_dev(&list);
+ return result;
}
#endif
__le32_to_cpu(map_to_analyse->blocks_per_member);
memset(info->uuid, 0, sizeof(info->uuid));
info->recovery_start = MaxSector;
- info->reshape_active = (prev_map != NULL);
+ info->reshape_active = (prev_map != NULL) &&
+ (map->map_state == prev_map->map_state);
if (info->reshape_active)
info->delta_disks = map->num_members - prev_map->num_members;
else
int i;
struct imsm_dev *dev_new;
size_t len, len_migr;
+ size_t max_len = 0;
size_t space_needed = 0;
struct imsm_super *mpb = super->anchor;
dv = malloc(sizeof(*dv));
if (!dv)
return 1;
- dev_new = malloc(len_migr);
+ if (max_len < len_migr)
+ max_len = len_migr;
+ if (max_len > len_migr)
+ space_needed += max_len - len_migr;
+ dev_new = malloc(max_len);
if (!dev_new) {
free(dv);
return 1;
super->disks = d->next;
__free_imsm_disk(d);
}
+ while (super->disk_mgmt_list) {
+ d = super->disk_mgmt_list;
+ super->disk_mgmt_list = d->next;
+ __free_imsm_disk(d);
+ }
while (super->missing) {
d = super->missing;
super->missing = d->next;
/* free all the pieces hanging off of a super pointer */
static void __free_imsm(struct intel_super *super, int free_disks)
{
+ struct intel_hba *elem, *next;
+
if (super->buf) {
free(super->buf);
super->buf = NULL;
if (free_disks)
free_imsm_disks(super);
free_devlist(super);
- if (super->hba) {
- free((void *) super->hba);
- super->hba = NULL;
+ elem = super->hba;
+ while (elem) {
+ if (elem->path)
+ free((void *)elem->path);
+ next = elem->next;
+ free(elem);
+ elem = next;
}
+ super->hba = NULL;
}
static void free_imsm(struct intel_super *super)
super->create_offset = ~((__u32 ) 0);
if (!check_env("IMSM_NO_PLATFORM"))
super->orom = find_imsm_orom();
- if (super->orom && !check_env("IMSM_TEST_OROM")) {
- struct sys_dev *list, *ent;
-
- /* find the first intel ahci controller */
- list = find_driver_devices("pci", "ahci");
- for (ent = list; ent; ent = ent->next)
- if (devpath_to_vendor(ent->path) == 0x8086)
- break;
- if (ent) {
- super->hba = ent->path;
- ent->path = NULL;
- }
- free_sys_dev(&list);
- }
}
return super;
return 0;
}
+
static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
- int fd, char *devname)
+ int fd, char *devname)
{
struct intel_super *super = st->sb;
struct dl *dd;
int rv;
struct stat stb;
- /* if we are on an RAID enabled platform check that the disk is
- * attached to the raid controller
+ /* If we are on an RAID enabled platform check that the disk is
+ * attached to the raid controller.
+ * We do not need to test disks attachment for container based additions,
+ * they shall be already tested when container was created/assembled.
*/
- if (super->hba && !disk_attached_to_hba(fd, super->hba)) {
- fprintf(stderr,
- Name ": %s is not attached to the raid controller: %s\n",
- devname ? : "disk", super->hba);
- return 1;
+ if ((fd != -1) && !check_env("IMSM_NO_PLATFORM")) {
+ struct sys_dev *hba_name;
+ struct intel_hba *hba;
+
+ hba_name = find_disk_attached_hba(fd, NULL);
+ if (!hba_name) {
+ fprintf(stderr,
+ Name ": %s is not attached to Intel(R) RAID controller.\n",
+ devname ? : "disk");
+ return 1;
+ }
+ rv = attach_hba_to_super(super, hba_name, devname);
+ switch (rv) {
+ case 2:
+ fprintf(stderr, Name ": %s is attached to Intel(R) %s RAID "
+ "controller (%s),\n but the container is assigned to Intel(R) "
+ "%s RAID controller (",
+ devname,
+ get_sys_dev_type(hba_name->type),
+ hba_name->pci_id ? : "Err!",
+ get_sys_dev_type(hba_name->type));
+
+ hba = super->hba;
+ while (hba) {
+ fprintf(stderr, "%s", hba->pci_id ? : "Err!");
+ if (hba->next)
+ fprintf(stderr, ", ");
+ hba = hba->next;
+ }
+
+ fprintf(stderr, ").\n"
+ " Mixing devices attached to different controllers "
+ "is not allowed.\n");
+ free_sys_dev(&hba_name);
+ return 1;
+ }
+ free_sys_dev(&hba_name);
}
if (super->current_vol >= 0)
if (st->update_tail) {
/* queue the recently created array / added disk
* as a metadata update */
- struct dl *d;
int rv;
/* determine if we are creating a volume or adding a disk */
} else
rv = create_array(st, current_vol);
- for (d = super->disks; d ; d = d->next) {
- close(d->fd);
- d->fd = -1;
- }
-
return rv;
} else {
struct dl *d;
maxsize = merge_extents(super, extent_cnt);
minsize = size;
if (size == 0)
- minsize = chunk;
+ /* chunk is in K */
+ minsize = chunk * 2;
if (cnt < raiddisks ||
(super->orom && used && used != raiddisks) ||
if (size == 0) {
size = maxsize;
if (chunk) {
- size /= chunk;
- size *= chunk;
+ size /= 2 * chunk;
+ size *= 2 * chunk;
}
}
return 0;
}
-#endif /* MDASSEMBLE */
static int is_gen_migration(struct imsm_dev *dev)
{
return 0;
}
+#endif /* MDASSEMBLE */
static int is_rebuilding(struct imsm_dev *dev)
{
skip = 0;
idx = get_imsm_disk_idx(dev, slot, 0);
- ord = get_imsm_ord_tbl_ent(dev, slot, 0);
+ ord = get_imsm_ord_tbl_ent(dev, slot, -1);
for (d = super->disks; d ; d = d->next)
if (d->index == idx)
break;
if (map2) {
if (slot < map2->num_members)
info_d->disk.state = (1 << MD_DISK_ACTIVE);
+ else
+ this->array.spare_disks++;
} else {
if (slot < map->num_members)
info_d->disk.state = (1 << MD_DISK_ACTIVE);
+ else
+ this->array.spare_disks++;
}
if (info_d->recovery_start == MaxSector)
this->array.working_disks++;
super->updates_pending++;
}
+static unsigned long long imsm_set_array_size(struct imsm_dev *dev)
+{
+ int used_disks = imsm_num_data_members(dev, 0);
+ unsigned long long array_blocks;
+ struct imsm_map *map;
+
+ if (used_disks == 0) {
+ /* when problems occures
+ * return current array_blocks value
+ */
+ array_blocks = __le32_to_cpu(dev->size_high);
+ array_blocks = array_blocks << 32;
+ array_blocks += __le32_to_cpu(dev->size_low);
+
+ return array_blocks;
+ }
+
+ /* set array size in metadata
+ */
+ map = get_imsm_map(dev, 0);
+ array_blocks = map->blocks_per_member * used_disks;
+
+ /* round array size down to closest MB
+ */
+ array_blocks = (array_blocks >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
+ dev->size_low = __cpu_to_le32((__u32)array_blocks);
+ dev->size_high = __cpu_to_le32((__u32)(array_blocks >> 32));
+
+ return array_blocks;
+}
+
static void imsm_set_disk(struct active_array *a, int n, int state);
static void imsm_progress_container_reshape(struct intel_super *super)
struct imsm_super *mpb = super->anchor;
int prev_disks = -1;
int i;
+ int copy_map_size;
for (i = 0; i < mpb->num_raid_devs; i++) {
struct imsm_dev *dev = get_imsm_dev(super, i);
struct imsm_map *map = get_imsm_map(dev, 0);
struct imsm_map *map2;
int prev_num_members;
- int used_disks;
if (dev->vol.migr_state)
return;
* i.e it needs a migr_state
*/
+ copy_map_size = sizeof_imsm_map(map);
prev_num_members = map->num_members;
map->num_members = prev_disks;
dev->vol.migr_state = 1;
set_imsm_ord_tbl_ent(map, i, i);
map2 = get_imsm_map(dev, 1);
/* Copy the current map */
- memcpy(map2, map, sizeof_imsm_map(map));
+ memcpy(map2, map, copy_map_size);
map2->num_members = prev_num_members;
- /* calculate new size
- */
- used_disks = imsm_num_data_members(dev, 0);
- if (used_disks) {
- unsigned long long array_blocks;
-
- array_blocks =
- map->blocks_per_member
- * used_disks;
- /* round array size down to closest MB
- */
- array_blocks = (array_blocks
- >> SECT_PER_MB_SHIFT)
- << SECT_PER_MB_SHIFT;
- dev->size_low =
- __cpu_to_le32((__u32)array_blocks);
- dev->size_high =
- __cpu_to_le32(
- (__u32)(array_blocks >> 32));
- }
+ imsm_set_array_size(dev);
super->updates_pending++;
}
}
if (a->last_checkpoint >= a->info.component_size) {
unsigned long long array_blocks;
int used_disks;
- /* it seems the reshape is all done */
- dev->vol.migr_state = 0;
- dev->vol.migr_type = 0;
- dev->vol.curr_migr_unit = 0;
+ struct mdinfo *mdi;
+
+ used_disks = imsm_num_data_members(dev, 0);
+ if (used_disks > 0) {
+ array_blocks =
+ map->blocks_per_member *
+ used_disks;
+ /* round array size down to closest MB
+ */
+ array_blocks = (array_blocks
+ >> SECT_PER_MB_SHIFT)
+ << SECT_PER_MB_SHIFT;
+ a->info.custom_array_size = array_blocks;
+ /* encourage manager to update array
+ * size
+ */
+
+ a->check_reshape = 1;
+}
+ /* finalize online capacity expansion/reshape */
+ for (mdi = a->info.devs; mdi; mdi = mdi->next)
+ imsm_set_disk(a,
+ mdi->disk.raid_disk,
+ mdi->curr_state);
- used_disks = imsm_num_data_members(dev, -1);
- array_blocks = map->blocks_per_member * used_disks;
- /* round array size down to closest MB */
- array_blocks = (array_blocks >> SECT_PER_MB_SHIFT)
- << SECT_PER_MB_SHIFT;
- dev->size_low = __cpu_to_le32((__u32) array_blocks);
- dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
- a->info.custom_array_size = array_blocks;
- a->check_reshape = 1; /* encourage manager to update
- * array size
- */
- super->updates_pending++;
imsm_progress_container_reshape(super);
- }
+ }
}
}
super->updates_pending++;
}
- /* finalize online capacity expansion/reshape */
- if ((a->curr_action != reshape) &&
- (a->prev_action == reshape)) {
- struct mdinfo *mdi;
-
- for (mdi = a->info.devs; mdi; mdi = mdi->next)
- imsm_set_disk(a, mdi->disk.raid_disk, mdi->curr_state);
- }
-
return consistent;
}
int devices_to_reshape = 1;
struct imsm_super *mpb = super->anchor;
int ret_val = 0;
+ unsigned int dev_id;
- dprintf("imsm: imsm_process_update() for update_reshape\n");
+ dprintf("imsm: apply_reshape_container_disks_update()\n");
/* enable spares to use in array */
for (i = 0; i < delta_disks; i++) {
new_disk = get_disk_super(super,
major(u->new_disks[i]),
minor(u->new_disks[i]));
- dprintf("imsm: imsm_process_update(): new disk "
- "for reshape is: %i:%i (%p, index = %i)\n",
+ dprintf("imsm: new disk for reshape is: %i:%i "
+ "(%p, index = %i)\n",
major(u->new_disks[i]), minor(u->new_disks[i]),
new_disk, new_disk->index);
if ((new_disk == NULL) ||
new_disk->disk.status &= ~SPARE_DISK;
}
- dprintf("imsm: process_update(): update_reshape: volume set"
- " mpb->num_raid_devs = %i\n", mpb->num_raid_devs);
+ dprintf("imsm: volume set mpb->num_raid_devs = %i\n",
+ mpb->num_raid_devs);
/* manage changes in volume
*/
- for (id = super->devlist ; id; id = id->next) {
+ for (dev_id = 0; dev_id < mpb->num_raid_devs; dev_id++) {
void **sp = *space_list;
struct imsm_dev *newdev;
struct imsm_map *newmap, *oldmap;
+ for (id = super->devlist ; id; id = id->next) {
+ if (id->index == dev_id)
+ break;
+ }
+ if (id == NULL)
+ break;
if (!sp)
continue;
*space_list = *sp;
/* update one device only
*/
if (devices_to_reshape) {
- int used_disks;
-
- dprintf("process_update(): modifying "
- "subdev: %i\n", id->index);
+ dprintf("imsm: modifying subdev: %i\n",
+ id->index);
devices_to_reshape--;
newdev->vol.migr_state = 1;
newdev->vol.curr_migr_unit = 0;
newmap = get_imsm_map(newdev, 1);
memcpy(newmap, oldmap, sizeof_imsm_map(oldmap));
- /* calculate new size
- */
- used_disks = imsm_num_data_members(newdev, 0);
- if (used_disks) {
- unsigned long long array_blocks;
-
- array_blocks =
- newmap->blocks_per_member * used_disks;
- /* round array size down to closest MB
- */
- array_blocks = (array_blocks
- >> SECT_PER_MB_SHIFT)
- << SECT_PER_MB_SHIFT;
- newdev->size_low =
- __cpu_to_le32((__u32)array_blocks);
- newdev->size_high =
- __cpu_to_le32((__u32)(array_blocks >> 32));
- }
+ imsm_set_array_size(newdev);
}
sp = (void **)id->dev;
return ret_val;
}
+static int apply_takeover_update(struct imsm_update_takeover *u,
+ struct intel_super *super,
+ void ***space_list)
+{
+ struct imsm_dev *dev = NULL;
+ struct intel_dev *dv;
+ struct imsm_dev *dev_new;
+ struct imsm_map *map;
+ struct dl *dm, *du;
+ int i;
+
+ for (dv = super->devlist; dv; dv = dv->next)
+ if (dv->index == (unsigned int)u->subarray) {
+ dev = dv->dev;
+ break;
+ }
+
+ if (dev == NULL)
+ return 0;
+
+ map = get_imsm_map(dev, 0);
+
+ if (u->direction == R10_TO_R0) {
+ /* Number of failed disks must be half of initial disk number */
+ if (imsm_count_failed(super, dev) != (map->num_members / 2))
+ return 0;
+
+ /* iterate through devices to mark removed disks as spare */
+ for (dm = super->disks; dm; dm = dm->next) {
+ if (dm->disk.status & FAILED_DISK) {
+ int idx = dm->index;
+ /* update indexes on the disk list */
+/* FIXME this loop-with-the-loop looks wrong, I'm not convinced
+ the index values will end up being correct.... NB */
+ for (du = super->disks; du; du = du->next)
+ if (du->index > idx)
+ du->index--;
+ /* mark as spare disk */
+ dm->disk.status = SPARE_DISK;
+ dm->index = -1;
+ }
+ }
+ /* update map */
+ map->num_members = map->num_members / 2;
+ map->map_state = IMSM_T_STATE_NORMAL;
+ map->num_domains = 1;
+ map->raid_level = 0;
+ map->failed_disk_num = -1;
+ }
+
+ if (u->direction == R0_TO_R10) {
+ void **space;
+ /* update slots in current disk list */
+ for (dm = super->disks; dm; dm = dm->next) {
+ if (dm->index >= 0)
+ dm->index *= 2;
+ }
+ /* create new *missing* disks */
+ for (i = 0; i < map->num_members; i++) {
+ space = *space_list;
+ if (!space)
+ continue;
+ *space_list = *space;
+ du = (void *)space;
+ memcpy(du, super->disks, sizeof(*du));
+ du->disk.status = FAILED_DISK;
+ du->disk.scsi_id = 0;
+ du->fd = -1;
+ du->minor = 0;
+ du->major = 0;
+ du->index = (i * 2) + 1;
+ sprintf((char *)du->disk.serial,
+ " MISSING_%d", du->index);
+ sprintf((char *)du->serial,
+ "MISSING_%d", du->index);
+ du->next = super->missing;
+ super->missing = du;
+ }
+ /* create new dev and map */
+ space = *space_list;
+ if (!space)
+ return 0;
+ *space_list = *space;
+ dev_new = (void *)space;
+ memcpy(dev_new, dev, sizeof(*dev));
+ /* update new map */
+ map = get_imsm_map(dev_new, 0);
+ map->failed_disk_num = map->num_members;
+ map->num_members = map->num_members * 2;
+ map->map_state = IMSM_T_STATE_NORMAL;
+ map->num_domains = 2;
+ map->raid_level = 1;
+ /* replace dev<->dev_new */
+ dv->dev = dev_new;
+ }
+ /* update disk order table */
+ for (du = super->disks; du; du = du->next)
+ if (du->index >= 0)
+ set_imsm_ord_tbl_ent(map, du->index, du->index);
+ for (du = super->missing; du; du = du->next)
+ if (du->index >= 0)
+ set_imsm_ord_tbl_ent(map, du->index,
+ du->index | IMSM_ORD_REBUILD);
+
+ return 1;
+}
+
static void imsm_process_update(struct supertype *st,
struct metadata_update *update)
{
mpb = super->anchor;
switch (type) {
+ case update_takeover: {
+ struct imsm_update_takeover *u = (void *)update->buf;
+ if (apply_takeover_update(u, super, &update->space_list))
+ super->updates_pending++;
+ break;
+ }
+
case update_reshape_container_disks: {
struct imsm_update_reshape *u = (void *)update->buf;
if (apply_reshape_container_disks_update(
size_t len = 0;
switch (type) {
+ case update_takeover: {
+ struct imsm_update_takeover *u = (void *)update->buf;
+ if (u->direction == R0_TO_R10) {
+ void **tail = (void **)&update->space_list;
+ struct imsm_dev *dev = get_imsm_dev(super, u->subarray);
+ struct imsm_map *map = get_imsm_map(dev, 0);
+ int num_members = map->num_members;
+ void *space;
+ int size, i;
+ int err = 0;
+ /* allocate memory for added disks */
+ for (i = 0; i < num_members; i++) {
+ size = sizeof(struct dl);
+ space = malloc(size);
+ if (!space) {
+ err++;
+ break;
+ }
+ *tail = space;
+ tail = space;
+ *tail = NULL;
+ }
+ /* allocate memory for new device */
+ size = sizeof_imsm_dev(super->devlist->dev, 0) +
+ (num_members * sizeof(__u32));
+ space = malloc(size);
+ if (!space)
+ err++;
+ else {
+ *tail = space;
+ tail = space;
+ *tail = NULL;
+ }
+ if (!err) {
+ len = disks_to_mpb_size(num_members * 2);
+ } else {
+ /* if allocation didn't success, free buffer */
+ while (update->space_list) {
+ void **sp = update->space_list;
+ update->space_list = *sp;
+ free(sp);
+ }
+ }
+ }
+
+ break;
+ }
case update_reshape_container_disks: {
/* Every raid device in the container is about to
* gain some more devices, and we will enter a
__free_imsm_disk(dl);
}
}
-#endif /* MDASSEMBLE */
static char disk_by_path[] = "/dev/disk/by-path/";
static const char *imsm_get_disk_controller_domain(const char *path)
{
- struct sys_dev *list, *hba = NULL;
char disk_path[PATH_MAX];
- int ahci = 0;
- char *dpath = NULL;
-
- list = find_driver_devices("pci", "ahci");
- for (hba = list; hba; hba = hba->next)
- if (devpath_to_vendor(hba->path) == 0x8086)
- break;
-
- if (hba) {
- struct stat st;
+ char *drv=NULL;
+ struct stat st;
- strncpy(disk_path, disk_by_path, PATH_MAX - 1);
- strncat(disk_path, path, PATH_MAX - strlen(disk_path) - 1);
- if (stat(disk_path, &st) == 0) {
- dpath = devt_to_devpath(st.st_rdev);
- if (dpath)
- ahci = path_attached_to_hba(dpath, hba->path);
- }
+ strncpy(disk_path, disk_by_path, PATH_MAX - 1);
+ strncat(disk_path, path, PATH_MAX - strlen(disk_path) - 1);
+ if (stat(disk_path, &st) == 0) {
+ struct sys_dev* hba;
+ char *path=NULL;
+
+ path = devt_to_devpath(st.st_rdev);
+ if (path == NULL)
+ return "unknown";
+ hba = find_disk_attached_hba(-1, path);
+ if (hba && hba->type == SYS_DEV_SAS)
+ drv = "isci";
+ else if (hba && hba->type == SYS_DEV_SATA)
+ drv = "ahci";
+ else
+ drv = "unknown";
+ dprintf("path: %s hba: %s attached: %s\n",
+ path, (hba) ? hba->path : "NULL", drv);
+ free(path);
+ if (hba)
+ free_sys_dev(&hba);
}
- dprintf("path: %s(%s) hba: %s attached: %d\n",
- path, dpath, (hba) ? hba->path : "NULL", ahci);
- free_sys_dev(&list);
- if (ahci)
- return "ahci";
- else
- return NULL;
+ return drv;
}
static int imsm_find_array_minor_by_subdev(int subdev, int container, int *minor)
struct geo_params *geo,
int *old_raid_disks)
{
+ /* currently we only support increasing the number of devices
+ * for a container. This increases the number of device for each
+ * member array. They must all be RAID0 or RAID5.
+ */
int ret_val = 0;
struct mdinfo *info, *member;
int devices_that_can_grow = 0;
dprintf("imsm: checking device_num: %i\n",
member->container_member);
- if (geo->raid_disks < member->array.raid_disks) {
+ if (geo->raid_disks <= member->array.raid_disks) {
/* we work on container for Online Capacity Expansion
* only so raid_disks has to grow
*/
}
}
+/***************************************************************************
+* Function: imsm_analyze_change
+* Description: Function analyze change for single volume
+* and validate if transition is supported
+* Parameters: Geometry parameters, supertype structure
+* Returns: Operation type code on success, -1 if fail
+****************************************************************************/
+enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
+ struct geo_params *geo)
+{
+ struct mdinfo info;
+ int change = -1;
+ int check_devs = 0;
+
+ getinfo_super_imsm_volume(st, &info, NULL);
+
+ if ((geo->level != info.array.level) &&
+ (geo->level >= 0) &&
+ (geo->level != UnSet)) {
+ switch (info.array.level) {
+ case 0:
+ if (geo->level == 5) {
+ change = CH_MIGRATION;
+ check_devs = 1;
+ }
+ if (geo->level == 10) {
+ change = CH_TAKEOVER;
+ check_devs = 1;
+ }
+ break;
+ case 1:
+ if (geo->level == 0) {
+ change = CH_TAKEOVER;
+ check_devs = 1;
+ }
+ break;
+ case 5:
+ if (geo->level == 0)
+ change = CH_MIGRATION;
+ break;
+ case 10:
+ if (geo->level == 0) {
+ change = CH_TAKEOVER;
+ check_devs = 1;
+ }
+ break;
+ }
+ if (change == -1) {
+ fprintf(stderr,
+ Name " Error. Level Migration from %d to %d "
+ "not supported!\n",
+ info.array.level, geo->level);
+ goto analyse_change_exit;
+ }
+ } else
+ geo->level = info.array.level;
+
+ if ((geo->layout != info.array.layout)
+ && ((geo->layout != UnSet) && (geo->layout != -1))) {
+ change = CH_MIGRATION;
+ if ((info.array.layout == 0)
+ && (info.array.level == 5)
+ && (geo->layout == 5)) {
+ /* reshape 5 -> 4 */
+ } else if ((info.array.layout == 5)
+ && (info.array.level == 5)
+ && (geo->layout == 0)) {
+ /* reshape 4 -> 5 */
+ geo->layout = 0;
+ geo->level = 5;
+ } else {
+ fprintf(stderr,
+ Name " Error. Layout Migration from %d to %d "
+ "not supported!\n",
+ info.array.layout, geo->layout);
+ change = -1;
+ goto analyse_change_exit;
+ }
+ } else
+ geo->layout = info.array.layout;
+
+ if ((geo->chunksize > 0) && (geo->chunksize != UnSet)
+ && (geo->chunksize != info.array.chunk_size))
+ change = CH_MIGRATION;
+ else
+ geo->chunksize = info.array.chunk_size;
+
+ if (!validate_geometry_imsm(st,
+ geo->level,
+ geo->layout,
+ geo->raid_disks,
+ (geo->chunksize / 1024),
+ geo->size,
+ 0, 0, 1))
+ change = -1;
+
+ if (check_devs) {
+ struct intel_super *super = st->sb;
+ struct imsm_super *mpb = super->anchor;
+
+ if (mpb->num_raid_devs > 1) {
+ fprintf(stderr,
+ Name " Error. Cannot perform operation on %s"
+ "- for this operation it MUST be single "
+ "array in container\n",
+ geo->dev_name);
+ change = -1;
+ }
+ }
+
+analyse_change_exit:
+
+ return change;
+}
+
+int imsm_takeover(struct supertype *st, struct geo_params *geo)
+{
+ struct intel_super *super = st->sb;
+ struct imsm_update_takeover *u;
+
+ u = malloc(sizeof(struct imsm_update_takeover));
+ if (u == NULL)
+ return 1;
+
+ u->type = update_takeover;
+ u->subarray = super->current_vol;
+
+ /* 10->0 transition */
+ if (geo->level == 0)
+ u->direction = R10_TO_R0;
+
+ /* 0->10 transition */
+ if (geo->level == 10)
+ u->direction = R0_TO_R10;
+
+ /* update metadata locally */
+ imsm_update_metadata_locally(st, u,
+ sizeof(struct imsm_update_takeover));
+ /* and possibly remotely */
+ if (st->update_tail)
+ append_metadata_update(st, u,
+ sizeof(struct imsm_update_takeover));
+ else
+ free(u);
+
+ return 0;
+}
+
static int imsm_reshape_super(struct supertype *st, long long size, int level,
int layout, int chunksize, int raid_disks,
- char *backup, char *dev, int verbose)
+ int delta_disks, char *backup, char *dev,
+ int verbose)
{
- /* currently we only support increasing the number of devices
- * for a container. This increases the number of device for each
- * member array. They must all be RAID0 or RAID5.
- */
-
int ret_val = 1;
struct geo_params geo;
dprintf("imsm: reshape_super called.\n");
- memset(&geo, sizeof(struct geo_params), 0);
+ memset(&geo, 0, sizeof(struct geo_params));
geo.dev_name = dev;
+ geo.dev_id = st->devnum;
geo.size = size;
geo.level = level;
geo.layout = layout;
geo.chunksize = chunksize;
geo.raid_disks = raid_disks;
+ if (delta_disks != UnSet)
+ geo.raid_disks += delta_disks;
dprintf("\tfor level : %i\n", geo.level);
dprintf("\tfor raid_disks : %i\n", geo.raid_disks);
if (experimental() == 0)
return ret_val;
- /* verify reshape conditions
- * on container level we can only increase number of devices.
- */
if (st->container_dev == st->devnum) {
- /* check for delta_disks > 0
- * and supported raid levels 0 and 5 only in container
- */
+ /* On container level we can only increase number of devices. */
+ dprintf("imsm: info: Container operation\n");
int old_raid_disks = 0;
if (imsm_reshape_is_allowed_on_container(
st, &geo, &old_raid_disks)) {
else
free(u);
- } else
- fprintf(stderr, Name "imsm: Operation is not allowed "
- "on this container\n");
- } else
- fprintf(stderr, Name "imsm: not a container operation\n");
+ } else {
+ fprintf(stderr, Name ": (imsm) Operation "
+ "is not allowed on this container\n");
+ }
+ } else {
+ /* On volume level we support following operations
+ * - takeover: raid10 -> raid0; raid0 -> raid10
+ * - chunk size migration
+ * - migration: raid5 -> raid0; raid0 -> raid5
+ */
+ struct intel_super *super = st->sb;
+ struct intel_dev *dev = super->devlist;
+ int change, devnum;
+ dprintf("imsm: info: Volume operation\n");
+ /* find requested device */
+ while (dev) {
+ imsm_find_array_minor_by_subdev(dev->index, st->container_dev, &devnum);
+ if (devnum == geo.dev_id)
+ break;
+ dev = dev->next;
+ }
+ if (dev == NULL) {
+ fprintf(stderr, Name " Cannot find %s (%i) subarray\n",
+ geo.dev_name, geo.dev_id);
+ goto exit_imsm_reshape_super;
+ }
+ super->current_vol = dev->index;
+ change = imsm_analyze_change(st, &geo);
+ switch (change) {
+ case CH_TAKEOVER:
+ ret_val = imsm_takeover(st, &geo);
+ break;
+ case CH_MIGRATION:
+ ret_val = 0;
+ break;
+ default:
+ ret_val = 1;
+ }
+ }
exit_imsm_reshape_super:
dprintf("imsm: reshape_super Exit code = %i\n", ret_val);
afd, sra, reshape, st, stripes,
fds, offsets, dests, destfd, destoffsets);
}
+#endif /* MDASSEMBLE */
struct superswitch super_imsm = {
#ifndef MDASSEMBLE
.kill_subarray = kill_subarray_imsm,
.update_subarray = update_subarray_imsm,
.load_container = load_container_imsm,
+ .default_geometry = default_geometry_imsm,
+ .get_disk_controller_domain = imsm_get_disk_controller_domain,
+ .reshape_super = imsm_reshape_super,
+ .manage_reshape = imsm_manage_reshape,
#endif
.match_home = match_home_imsm,
.uuid_from_super= uuid_from_super_imsm,
.free_super = free_super_imsm,
.match_metadata_desc = match_metadata_desc_imsm,
.container_content = container_content_imsm,
- .default_geometry = default_geometry_imsm,
- .get_disk_controller_domain = imsm_get_disk_controller_domain,
- .reshape_super = imsm_reshape_super,
- .manage_reshape = imsm_manage_reshape,
.external = 1,
.name = "imsm",