[SYS_DEV_UNKNOWN] = "Unknown",
[SYS_DEV_SAS] = "SAS",
[SYS_DEV_SATA] = "SATA",
- [SYS_DEV_NVME] = "NVMe"
+ [SYS_DEV_NVME] = "NVMe",
+ [SYS_DEV_VMD] = "VMD"
};
const char *get_sys_dev_type(enum sys_dev_type type)
static struct intel_hba * find_intel_hba(struct intel_hba *hba, struct sys_dev *device)
{
- struct intel_hba *result=NULL;
+ struct intel_hba *result;
+
for (result = hba; result; result = result->next) {
if (result->type == device->type && strcmp(result->path, device->path) == 0)
break;
if (device->type != hba->type)
return 2;
+ /* Always forbid spanning between VMD domains (seen as different controllers by mdadm) */
+ if (device->type == SYS_DEV_VMD && !path_attached_to_hba(device->path, hba->path))
+ return 2;
+
/* Multiple same type HBAs can be used if they share the same OROM */
const struct imsm_orom *device_orom = get_orom_by_device_id(device->dev_id);
}
if (not_supported)
- dprintf("%s (IMSM): Unknown attributes : %x\n", Name, not_supported);
+ dprintf("(IMSM): Unknown attributes : %x\n", not_supported);
ret_val = 0;
}
* this hba
*/
dir = opendir("/sys/dev/block");
- for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
+ if (!dir)
+ return 1;
+
+ for (ent = readdir(dir); ent; ent = readdir(dir)) {
int fd;
char model[64];
char vendor[64];
break;
}
sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor);
- if (load_sys(device, buf) != 0) {
+ if (load_sys(device, buf, sizeof(buf)) != 0) {
if (verbose > 0)
pr_err("failed to read device type for %s\n",
path);
vendor[0] = '\0';
model[0] = '\0';
sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor);
- if (load_sys(device, buf) == 0) {
+ if (load_sys(device, buf, sizeof(buf)) == 0) {
strncpy(vendor, buf, sizeof(vendor));
vendor[sizeof(vendor) - 1] = '\0';
c = (char *) &vendor[sizeof(vendor) - 1];
}
sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor);
- if (load_sys(device, buf) == 0) {
+ if (load_sys(device, buf, sizeof(buf)) == 0) {
strncpy(model, buf, sizeof(model));
model[sizeof(model) - 1] = '\0';
c = (char *) &model[sizeof(model) - 1];
return err;
}
+static int print_vmd_attached_devs(struct sys_dev *hba)
+{
+ struct dirent *ent;
+ DIR *dir;
+ char path[292];
+ char link[256];
+ char *c, *rp;
+
+ if (hba->type != SYS_DEV_VMD)
+ return 1;
+
+ /* scroll through /sys/dev/block looking for devices attached to
+ * this hba
+ */
+ dir = opendir("/sys/bus/pci/drivers/nvme");
+ if (!dir)
+ return 1;
+
+ for (ent = readdir(dir); ent; ent = readdir(dir)) {
+ int n;
+
+ /* is 'ent' a device? check that the 'subsystem' link exists and
+ * that its target matches 'bus'
+ */
+ sprintf(path, "/sys/bus/pci/drivers/nvme/%s/subsystem",
+ ent->d_name);
+ n = readlink(path, link, sizeof(link));
+ if (n < 0 || n >= (int)sizeof(link))
+ continue;
+ link[n] = '\0';
+ c = strrchr(link, '/');
+ if (!c)
+ continue;
+ if (strncmp("pci", c+1, strlen("pci")) != 0)
+ continue;
+
+ sprintf(path, "/sys/bus/pci/drivers/nvme/%s", ent->d_name);
+ /* if not a intel NVMe - skip it*/
+ if (devpath_to_vendor(path) != 0x8086)
+ continue;
+
+ rp = realpath(path, NULL);
+ if (!rp)
+ continue;
+
+ if (path_attached_to_hba(rp, hba->path)) {
+ printf(" NVMe under VMD : %s\n", rp);
+ }
+ free(rp);
+ }
+
+ closedir(dir);
+ return 0;
+}
+
static void print_found_intel_controllers(struct sys_dev *elem)
{
for (; elem; elem = elem->next) {
fprintf(stderr, "SAS ");
else if (elem->type == SYS_DEV_NVME)
fprintf(stderr, "NVMe ");
- fprintf(stderr, "RAID controller");
+
+ if (elem->type == SYS_DEV_VMD)
+ fprintf(stderr, "VMD domain");
+ else
+ fprintf(stderr, "RAID controller");
+
if (elem->pci_id)
fprintf(stderr, " at %s", elem->pci_id);
fprintf(stderr, ".\n");
list = find_intel_devices();
if (!list) {
if (verbose > 0)
- pr_err("no active Intel(R) RAID "
- "controller found.\n");
+ pr_err("no active Intel(R) RAID controller found.\n");
return 2;
} else if (verbose > 0)
print_found_intel_controllers(list);
if (controller_path && (compare_paths(hba->path, controller_path) != 0))
continue;
if (!find_imsm_capability(hba)) {
+ char buf[PATH_MAX];
pr_err("imsm capabilities not found for controller: %s (type %s)\n",
- hba->path, get_sys_dev_type(hba->type));
+ hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path,
+ get_sys_dev_type(hba->type));
continue;
}
result = 0;
return result;
}
- const struct orom_entry *oroms = get_oroms();
- int i;
+ const struct orom_entry *entry;
- for (i = 0; i < SYS_DEV_MAX && oroms[i].devid_list; i++) {
- print_imsm_capability(&oroms[i].orom);
+ for (entry = orom_entries; entry; entry = entry->next) {
+ if (entry->type == SYS_DEV_VMD) {
+ for (hba = list; hba; hba = hba->next) {
+ if (hba->type == SYS_DEV_VMD) {
+ char buf[PATH_MAX];
+ print_imsm_capability(&entry->orom);
+ printf(" I/O Controller : %s (%s)\n",
+ vmd_domain_to_controller(hba, buf), get_sys_dev_type(hba->type));
+ if (print_vmd_attached_devs(hba)) {
+ if (verbose > 0)
+ pr_err("failed to get devices attached to VMD domain.\n");
+ result |= 2;
+ }
+ printf("\n");
+ }
+ }
+ continue;
+ }
- if (imsm_orom_is_nvme(&oroms[i].orom)) {
+ print_imsm_capability(&entry->orom);
+ if (entry->type == SYS_DEV_NVME) {
for (hba = list; hba; hba = hba->next) {
if (hba->type == SYS_DEV_NVME)
printf(" NVMe Device : %s\n", hba->path);
}
+ printf("\n");
continue;
}
struct devid_list *devid;
- for (devid = oroms[i].devid_list; devid; devid = devid->next) {
+ for (devid = entry->devid_list; devid; devid = devid->next) {
hba = device_by_id(devid->devid);
if (!hba)
continue;
host_base = ahci_get_port_count(hba->path, &port_count);
if (ahci_enumerate_ports(hba->path, port_count, host_base, verbose)) {
if (verbose > 0)
- pr_err("failed to enumerate "
- "ports on SATA controller at %s.\n", hba->pci_id);
+ pr_err("failed to enumerate ports on SATA controller at %s.\n", hba->pci_id);
result |= 2;
}
}
for (hba = list; hba; hba = hba->next) {
if (controller_path && (compare_paths(hba->path,controller_path) != 0))
continue;
- if (!find_imsm_capability(hba) && verbose > 0)
- pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n", hba->path);
+ if (!find_imsm_capability(hba) && verbose > 0) {
+ char buf[PATH_MAX];
+ pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n",
+ hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path);
+ }
else
result = 0;
}
- const struct orom_entry *oroms = get_oroms();
- int i;
+ const struct orom_entry *entry;
- for (i = 0; i < SYS_DEV_MAX && oroms[i].devid_list; i++)
- print_imsm_capability_export(&oroms[i].orom);
+ for (entry = orom_entries; entry; entry = entry->next) {
+ if (entry->type == SYS_DEV_VMD) {
+ for (hba = list; hba; hba = hba->next)
+ print_imsm_capability_export(&entry->orom);
+ continue;
+ }
+ print_imsm_capability_export(&entry->orom);
+ }
return result;
}
case 5:
return map->num_members - 1;
default:
- dprintf("%s: unsupported raid level\n", __func__);
+ dprintf("unsupported raid level\n");
return 0;
}
}
static int load_imsm_migr_rec(struct intel_super *super, struct mdinfo *info)
{
struct mdinfo *sd;
- struct dl *dl = NULL;
+ struct dl *dl;
char nm[30];
int retval = -1;
int fd = -1;
struct imsm_dev *dev;
- struct imsm_map *map = NULL;
+ struct imsm_map *map;
int slot = -1;
/* find map under migration */
int update_memory_size = 0;
- dprintf("imsm_create_metadata_checkpoint_update(enter)\n");
+ dprintf("(enter)\n");
if (u == NULL)
return 0;
*u = xcalloc(1, update_memory_size);
if (*u == NULL) {
- dprintf("error: cannot get memory for "
- "imsm_create_metadata_checkpoint_update update\n");
+ dprintf("error: cannot get memory\n");
return 0;
}
(*u)->type = update_general_migration_checkpoint;
(*u)->curr_migr_unit = __le32_to_cpu(super->migr_rec->curr_migr_unit);
- dprintf("imsm_create_metadata_checkpoint_update: prepared for %u\n",
- (*u)->curr_migr_unit);
+ dprintf("prepared for %u\n", (*u)->curr_migr_unit);
return update_memory_size;
}
int len;
struct imsm_update_general_migration_checkpoint *u;
struct imsm_dev *dev;
- struct imsm_map *map = NULL;
+ struct imsm_map *map;
/* find map under migration */
dev = imsm_get_device_during_migration(super);
*/
component_size_alligment = component_size % (chunk_size/512);
- dprintf("imsm_component_size_aligment_check(Level: %i, "
- "chunk_size = %i, component_size = %llu), "
- "component_size_alligment = %u\n",
+ dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alligment = %u\n",
level, chunk_size, component_size,
component_size_alligment);
dprintf("imsm: reported component size alligned from %llu ",
component_size);
component_size -= component_size_alligment;
- dprintf("to %llu (%i).\n",
+ dprintf_cont("to %llu (%i).\n",
component_size, component_size_alligment);
}
info->reshape_progress = blocks_per_unit * units;
- dprintf("IMSM: General Migration checkpoint : %llu "
- "(%llu) -> read reshape progress : %llu\n",
+ dprintf("IMSM: General Migration checkpoint : %llu (%llu) -> read reshape progress : %llu\n",
(unsigned long long)units,
(unsigned long long)blocks_per_unit,
info->reshape_progress);
*/
max_enough = max(max_enough, enough);
}
- dprintf("%s: enough: %d\n", __func__, max_enough);
+ dprintf("enough: %d\n", max_enough);
info->container_enough = max_enough;
if (super->disks) {
* for each disk in array */
struct mdinfo *getinfo_super_disks_imsm(struct supertype *st)
{
- struct mdinfo *mddev = NULL;
+ struct mdinfo *mddev;
struct intel_super *super = st->sb;
struct imsm_disk *disk;
int count = 0;
if (posix_memalign((void**)&anchor, 512, 512) != 0) {
if (devname)
- pr_err("Failed to allocate imsm anchor buffer"
- " on %s\n", devname);
+ pr_err("Failed to allocate imsm anchor buffer on %s\n", devname);
return 1;
}
if (read(fd, anchor, 512) != 512) {
free(anchor);
if (posix_memalign(&super->migr_rec_buf, 512, MIGR_REC_BUF_SIZE) != 0) {
- pr_err("%s could not allocate migr_rec buffer\n", __func__);
+ pr_err("could not allocate migr_rec buffer\n");
free(super->buf);
return 2;
}
if (devname) {
struct intel_hba *hba = super->hba;
- pr_err("%s is attached to Intel(R) %s RAID "
- "controller (%s),\n"
- " but the container is assigned to Intel(R) "
- "%s RAID controller (",
+ pr_err("%s is attached to Intel(R) %s %s (%s),\n"
+ " but the container is assigned to Intel(R) %s %s (",
devname,
get_sys_dev_type(hba_name->type),
+ hba_name->type == SYS_DEV_VMD ? "domain" : "RAID controller",
hba_name->pci_id ? : "Err!",
- get_sys_dev_type(super->hba->type));
+ get_sys_dev_type(super->hba->type),
+ hba->type == SYS_DEV_VMD ? "domain" : "RAID controller");
while (hba) {
fprintf(stderr, "%s", hba->pci_id ? : "Err!");
hba = hba->next;
}
fprintf(stderr, ").\n"
- " Mixing devices attached to different controllers "
- "is not allowed.\n");
+ " Mixing devices attached to different %s is not allowed.\n",
+ hba_name->type == SYS_DEV_VMD ? "VMD domains" : "controllers");
}
return 2;
}
if (tbl_mpb->family_num == mpb->family_num) {
if (tbl_mpb->check_sum == mpb->check_sum) {
- dprintf("%s: mpb from %d:%d matches %d:%d\n",
- __func__, super->disks->major,
+ dprintf("mpb from %d:%d matches %d:%d\n",
+ super->disks->major,
super->disks->minor,
table[i]->disks->major,
table[i]->disks->minor);
*/
struct intel_disk *idisk;
- dprintf("%s: mpb from %d:%d replaces %d:%d\n",
- __func__, super->disks->major,
+ dprintf("mpb from %d:%d replaces %d:%d\n",
+ super->disks->major,
super->disks->minor,
table[i]->disks->major,
table[i]->disks->minor);
idisk->disk.status |= CONFIGURED_DISK;
}
- dprintf("%s: mpb from %d:%d prefer %d:%d\n",
- __func__, super->disks->major,
+ dprintf("mpb from %d:%d prefer %d:%d\n",
+ super->disks->major,
super->disks->minor,
table[i]->disks->major,
table[i]->disks->minor);
idisk->owner == IMSM_UNKNOWN_OWNER)
ok_count++;
else
- dprintf("%s: '%.16s' owner %d != %d\n",
- __func__, disk->serial, idisk->owner,
+ dprintf("'%.16s' owner %d != %d\n",
+ disk->serial, idisk->owner,
owner);
} else {
- dprintf("%s: unknown disk %x [%d]: %.16s\n",
- __func__, __le32_to_cpu(mpb->family_num), i,
+ dprintf("unknown disk %x [%d]: %.16s\n",
+ __le32_to_cpu(mpb->family_num), i,
disk->serial);
break;
}
s = NULL;
if (!s)
- dprintf("%s: marking family: %#x from %d:%d offline\n",
- __func__, mpb->family_num,
+ dprintf("marking family: %#x from %d:%d offline\n",
+ mpb->family_num,
super_table[i]->disks->major,
super_table[i]->disks->minor);
super_table[i] = s;
champion = s;
if (conflict)
- pr_err("Chose family %#x on '%s', "
- "assemble conflicts to new container with '--update=uuid'\n",
+ pr_err("Chose family %#x on '%s', assemble conflicts to new container with '--update=uuid'\n",
__le32_to_cpu(s->anchor->family_num), s->disks->devname);
/* collect all dl's onto 'champion', and update them to
static int get_super_block(struct intel_super **super_list, char *devnm, char *devname,
int major, int minor, int keep_fd)
{
- struct intel_super*s = NULL;
+ struct intel_super *s;
char nm[32];
int dfd = -1;
int err = 0;
/* retry the load if we might have raced against mdmon */
if (rv == 3) {
- struct mdstat_ent *mdstat = mdstat_by_component(fd2devnm(fd));
+ struct mdstat_ent *mdstat = NULL;
+ char *name = fd2kname(fd);
+
+ if (name)
+ mdstat = mdstat_by_component(name);
if (mdstat && mdmon_running(mdstat->devnm) && getpid() != mdmon_pid(mdstat->devnm)) {
for (retry = 0; retry < 3; retry++) {
if (rv) {
if (devname)
- pr_err("Failed to load all information "
- "sections on %s\n", devname);
+ pr_err("Failed to load all information sections on %s\n", devname);
free_imsm(super);
return rv;
}
unsigned long long num_data_stripes;
if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
- pr_err("This imsm-container already has the "
- "maximum of %d volumes\n", super->orom->vpa);
+ pr_err("This imsm-container already has the maximum of %d volumes\n", super->orom->vpa);
return 0;
}
}
if (posix_memalign(&super->migr_rec_buf, 512,
MIGR_REC_BUF_SIZE) != 0) {
- pr_err("%s could not allocate migr_rec buffer\n",
- __func__);
+ pr_err("could not allocate migr_rec buffer\n");
free(super->buf);
free(super);
free(mpb_new);
if (info->level == 1 && info->raid_disks > 2) {
free(dev);
free(dv);
- pr_err("imsm does not support more than 2 disks"
- "in a raid1 volume\n");
+ pr_err("imsm does not support more than 2 disksin a raid1 volume\n");
return 0;
}
super = NULL;
}
if (!super) {
- pr_err("%s could not allocate superblock\n", __func__);
+ pr_err("could not allocate superblock\n");
return 0;
}
if (posix_memalign(&super->migr_rec_buf, 512, MIGR_REC_BUF_SIZE) != 0) {
- pr_err("%s could not allocate migr_rec buffer\n", __func__);
+ pr_err("could not allocate migr_rec buffer\n");
free(super->buf);
free(super);
return 0;
* is prepared.
*/
if (!st->update_tail) {
- pr_err("%s shall be used in mdmon context only"
- "(line %d).\n", __func__, __LINE__);
+ pr_err("shall be used in mdmon context only\n");
return 1;
}
dd = xcalloc(1, sizeof(*dd));
__u32 sum;
struct dl *d;
- spare->mpb_size = __cpu_to_le32(sizeof(struct imsm_super)),
- spare->generation_num = __cpu_to_le32(1UL),
+ spare->mpb_size = __cpu_to_le32(sizeof(struct imsm_super));
+ spare->generation_num = __cpu_to_le32(1UL);
spare->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
- spare->num_disks = 1,
- spare->num_raid_devs = 0,
- spare->cache_size = mpb->cache_size,
- spare->pwr_cycle_count = __cpu_to_le32(1),
+ spare->num_disks = 1;
+ spare->num_raid_devs = 0;
+ spare->cache_size = mpb->cache_size;
+ spare->pwr_cycle_count = __cpu_to_le32(1);
snprintf((char *) spare->sig, MAX_SIGNATURE_LENGTH,
MPB_SIGNATURE MPB_VERSION_RAID0);
spare->check_sum = __cpu_to_le32(sum);
if (store_imsm_mpb(d->fd, spare)) {
- pr_err("%s: failed for device %d:%d %s\n",
- __func__, d->major, d->minor, strerror(errno));
+ pr_err("failed for device %d:%d %s\n",
+ d->major, d->minor, strerror(errno));
return 1;
}
if (doclose) {
if (store_imsm_mpb(d->fd, mpb))
fprintf(stderr,
- "%s: failed for device %d:%d (fd: %d)%s\n",
- __func__, d->major, d->minor,
+ "failed for device %d:%d (fd: %d)%s\n",
+ d->major, d->minor,
d->fd, strerror(errno));
if (doclose) {
{
int fd;
unsigned long long ldsize;
- struct intel_super *super=NULL;
+ struct intel_super *super;
int rv = 0;
if (level != LEVEL_CONTAINER)
#if DEBUG
char str[256];
fd2devname(fd, str);
- dprintf("validate_geometry_imsm_container: fd: %d %s orom: %p rv: %d raiddisk: %d\n",
+ dprintf("fd: %d %s orom: %p rv: %d raiddisk: %d\n",
fd, str, super->orom, rv, raiddisks);
#endif
/* no orom/efi or non-intel hba of the disk */
if (super->orom) {
if (raiddisks > super->orom->tds) {
if (verbose)
- pr_err("%d exceeds maximum number of"
- " platform supported disks: %d\n",
+ pr_err("%d exceeds maximum number of platform supported disks: %d\n",
raiddisks, super->orom->tds);
free_imsm(super);
return 0;
int dpa, int verbose)
{
struct mdstat_ent *mdstat = mdstat_read(0, 0);
- struct mdstat_ent *memb = NULL;
+ struct mdstat_ent *memb;
int count = 0;
int num = 0;
- struct md_list *dv = NULL;
+ struct md_list *dv;
int found;
for (memb = mdstat ; memb ; memb = memb->next) {
if (num > 0)
fd = open(path, O_RDONLY, 0);
if ((num <= 0) || (fd < 0)) {
- pr_vrb(": Cannot open %s: %s\n",
+ pr_vrb("Cannot open %s: %s\n",
dev->name, strerror(errno));
}
free(path);
for (vol = mdstat ; vol ; vol = vol->next) {
if ((vol->active > 0) &&
vol->metadata_version &&
- is_container_member(vol, memb->dev)) {
+ is_container_member(vol, memb->devnm)) {
found++;
count++;
}
}
if (*devlist && (found < dpa)) {
dv = xcalloc(1, sizeof(*dv));
- dv->devname = xmalloc(strlen(memb->dev) + strlen("/dev/") + 1);
- sprintf(dv->devname, "%s%s", "/dev/", memb->dev);
+ dv->devname = xmalloc(strlen(memb->devnm) + strlen("/dev/") + 1);
+ sprintf(dv->devname, "%s%s", "/dev/", memb->devnm);
dv->found = found;
dv->used = 0;
dv->next = *devlist;
{
int i;
struct md_list *devlist = NULL;
- struct md_list *dv = NULL;
+ struct md_list *dv;
for(i = 0; i < 12; i++) {
dv = xcalloc(1, sizeof(*dv));
get_devices(const char *hba_path)
{
struct md_list *devlist = NULL;
- struct md_list *dv = NULL;
+ struct md_list *dv;
struct dirent *ent;
DIR *dir;
int err = 0;
{
struct md_list *tmpdev;
int count = 0;
- struct supertype *st = NULL;
+ struct supertype *st;
/* first walk the list of devices to find a consistent set
* that match the criterea, if that is possible.
*found = 0;
st = match_metadata_desc_imsm("imsm");
if (st == NULL) {
- pr_vrb(": cannot allocate memory for imsm supertype\n");
+ pr_vrb("cannot allocate memory for imsm supertype\n");
return 0;
}
continue;
tst = dup_super(st);
if (tst == NULL) {
- pr_vrb(": cannot allocate memory for imsm supertype\n");
+ pr_vrb("cannot allocate memory for imsm supertype\n");
goto err_1;
}
tmpdev->container = 0;
dfd = dev_open(devname, O_RDONLY|O_EXCL);
if (dfd < 0) {
- dprintf(": cannot open device %s: %s\n",
+ dprintf("cannot open device %s: %s\n",
devname, strerror(errno));
tmpdev->used = 2;
} else if (fstat(dfd, &stb)< 0) {
/* Impossible! */
- dprintf(": fstat failed for %s: %s\n",
+ dprintf("fstat failed for %s: %s\n",
devname, strerror(errno));
tmpdev->used = 2;
} else if ((stb.st_mode & S_IFMT) != S_IFBLK) {
- dprintf(": %s is not a block device.\n",
+ dprintf("%s is not a block device.\n",
devname);
tmpdev->used = 2;
} else if (must_be_container(dfd)) {
struct supertype *cst;
cst = super_by_fd(dfd, NULL);
if (cst == NULL) {
- dprintf(": cannot recognize container type %s\n",
+ dprintf("cannot recognize container type %s\n",
devname);
tmpdev->used = 2;
} else if (tst->ss != st->ss) {
- dprintf(": non-imsm container - ignore it: %s\n",
+ dprintf("non-imsm container - ignore it: %s\n",
devname);
tmpdev->used = 2;
} else if (!tst->ss->load_container ||
} else {
tmpdev->st_rdev = stb.st_rdev;
if (tst->ss->load_super(tst,dfd, NULL)) {
- dprintf(": no RAID superblock on %s\n",
+ dprintf("no RAID superblock on %s\n",
devname);
tmpdev->used = 2;
} else if (tst->ss->compare_super == NULL) {
- dprintf(": Cannot assemble %s metadata on %s\n",
+ dprintf("Cannot assemble %s metadata on %s\n",
tst->ss->name, devname);
tmpdev->used = 2;
}
* Or, if we are auto assembling, we just ignore the second
* for now.
*/
- dprintf(": superblock on %s doesn't match others - assembly aborted\n",
+ dprintf("superblock on %s doesn't match others - assembly aborted\n",
devname);
goto loop;
}
if (iter->array.state & (1<<MD_SB_BLOCK_VOLUME)) {
/* do not assemble arrays with unsupported
configurations */
- dprintf(": Cannot activate member %s.\n",
+ dprintf("Cannot activate member %s.\n",
iter->text_version);
} else
count++;
sysfs_free(head);
} else {
- dprintf(" no valid super block on device list: err: %d %p\n",
+ dprintf("No valid super block on device list: err: %d %p\n",
err, st->sb);
}
} else {
- dprintf(" no more devices to examin\n");
+ dprintf("no more devices to examine\n");
}
for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
}
static int
-count_volumes(char *hba, int dpa, int verbose)
+count_volumes(struct intel_hba *hba, int dpa, int verbose)
{
- struct md_list *devlist = NULL;
+ struct sys_dev *idev, *intel_devices = find_intel_devices();
int count = 0;
- int found = 0;;
+ const struct orom_entry *entry;
+ struct devid_list *dv, *devid_list;
- devlist = get_devices(hba);
- /* if no intel devices return zero volumes */
- if (devlist == NULL)
+ if (!hba || !hba->path)
return 0;
- count = active_arrays_by_format("imsm", hba, &devlist, dpa, verbose);
- dprintf(" path: %s active arrays: %d\n", hba, count);
- if (devlist == NULL)
+ for (idev = intel_devices; idev; idev = idev->next) {
+ if (strstr(idev->path, hba->path))
+ break;
+ }
+
+ if (!idev || !idev->dev_id)
return 0;
- do {
- found = 0;
- count += count_volumes_list(devlist,
- NULL,
- verbose,
- &found);
- dprintf("found %d count: %d\n", found, count);
- } while (found);
-
- dprintf("path: %s total number of volumes: %d\n", hba, count);
-
- while(devlist) {
- struct md_list *dv = devlist;
- devlist = devlist->next;
- free(dv->devname);
- free(dv);
+
+ entry = get_orom_entry_by_device_id(idev->dev_id);
+
+ if (!entry || !entry->devid_list)
+ return 0;
+
+ devid_list = entry->devid_list;
+ for (dv = devid_list; dv; dv = dv->next) {
+ struct md_list *devlist;
+ struct sys_dev *device = device_by_id(dv->devid);
+ char *hba_path;
+ int found = 0;
+
+ if (device)
+ hba_path = device->path;
+ else
+ return 0;
+
+ /* VMD has one orom entry for all domain, but spanning is not allowed.
+ * VMD arrays should be counted per domain (controller), so skip
+ * domains that are not the given one.
+ */
+ if ((hba->type == SYS_DEV_VMD) &&
+ (strncmp(device->path, hba->path, strlen(device->path)) != 0))
+ continue;
+
+ devlist = get_devices(hba_path);
+ /* if no intel devices return zero volumes */
+ if (devlist == NULL)
+ return 0;
+
+ count += active_arrays_by_format("imsm", hba_path, &devlist, dpa, verbose);
+ dprintf("path: %s active arrays: %d\n", hba_path, count);
+ if (devlist == NULL)
+ return 0;
+ do {
+ found = 0;
+ count += count_volumes_list(devlist,
+ NULL,
+ verbose,
+ &found);
+ dprintf("found %d count: %d\n", found, count);
+ } while (found);
+
+ dprintf("path: %s total number of volumes: %d\n", hba_path, count);
+
+ while (devlist) {
+ struct md_list *dv = devlist;
+ devlist = devlist->next;
+ free(dv->devname);
+ free(dv);
+ }
}
return count;
}
{
/* check/set platform and metadata limits/defaults */
if (super->orom && raiddisks > super->orom->dpa) {
- pr_vrb(": platform supports a maximum of %d disks per array\n",
+ pr_vrb("platform supports a maximum of %d disks per array\n",
super->orom->dpa);
return 0;
}
/* capabilities of OROM tested - copied from validate_geometry_imsm_volume */
if (!is_raid_level_supported(super->orom, level, raiddisks)) {
- pr_vrb(": platform does not support raid%d with %d disk%s\n",
+ pr_vrb("platform does not support raid%d with %d disk%s\n",
level, raiddisks, raiddisks > 1 ? "s" : "");
return 0;
}
*chunk = imsm_default_chunk(super->orom);
if (super->orom && !imsm_orom_has_chunk(super->orom, *chunk)) {
- pr_vrb(": platform does not support a chunk size of: "
- "%d\n", *chunk);
+ pr_vrb("platform does not support a chunk size of: %d\n", *chunk);
return 0;
}
if (layout != imsm_level_to_layout(level)) {
if (level == 5)
- pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
+ pr_vrb("imsm raid 5 only supports the left-asymmetric layout\n");
else if (level == 10)
- pr_vrb(": imsm raid 10 only supports the n2 layout\n");
+ pr_vrb("imsm raid 10 only supports the n2 layout\n");
else
- pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
+ pr_vrb("imsm unknown layout %#x for this raid level %d\n",
layout, level);
return 0;
}
if (super->orom && (super->orom->attr & IMSM_OROM_ATTR_2TB) == 0 &&
(calc_array_size(level, raiddisks, layout, *chunk, size) >> 32) > 0) {
- pr_vrb(": platform does not support a volume size over 2TB\n");
+ pr_vrb("platform does not support a volume size over 2TB\n");
return 0;
}
mpb = super->anchor;
if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
- pr_err("RAID gemetry validation failed. "
- "Cannot proceed with the action(s).\n");
+ pr_err("RAID gemetry validation failed. Cannot proceed with the action(s).\n");
return 0;
}
if (!dev) {
}
if (dcnt < raiddisks) {
if (verbose)
- pr_err("imsm: Not enough "
- "devices with space for this array "
- "(%d < %d)\n",
+ pr_err("imsm: Not enough devices with space for this array (%d < %d)\n",
dcnt, raiddisks);
return 0;
}
}
if (!dl) {
if (verbose)
- pr_err("%s is not in the "
- "same imsm set\n", dev);
+ pr_err("%s is not in the same imsm set\n", dev);
return 0;
} else if (super->orom && dl->index < 0 && mpb->num_raid_devs) {
/* If a volume is present then the current creation attempt
* understand this configuration (all member disks must be
* members of each array in the container).
*/
- pr_err("%s is a spare and a volume"
- " is already defined for this container\n", dev);
- pr_err("The option-rom requires all member"
- " disks to be a member of all volumes\n");
+ pr_err("%s is a spare and a volume is already defined for this container\n", dev);
+ pr_err("The option-rom requires all member disks to be a member of all volumes\n");
return 0;
} else if (super->orom && mpb->num_raid_devs > 0 &&
mpb->num_disks != raiddisks) {
- pr_err("The option-rom requires all member"
- " disks to be a member of all volumes\n");
+ pr_err("The option-rom requires all member disks to be a member of all volumes\n");
return 0;
}
if (!check_env("IMSM_NO_PLATFORM") &&
mpb->num_raid_devs > 0 && size && size != maxsize) {
- pr_err("attempting to create a second "
- "volume with size less then remaining space. "
- "Aborting...\n");
+ pr_err("attempting to create a second volume with size less then remaining space. Aborting...\n");
return 0;
}
if (maxsize < size || maxsize == 0) {
if (verbose) {
if (maxsize == 0)
- pr_err("no free space"
- " left on device. Aborting...\n");
+ pr_err("no free space left on device. Aborting...\n");
else
- pr_err("not enough space"
- " to create volume of given size"
- " (%llu < %llu). Aborting...\n",
+ pr_err("not enough space to create volume of given size (%llu < %llu). Aborting...\n",
maxsize, size);
}
return 0;
*freesize = maxsize;
if (super->orom) {
- int count = count_volumes(super->hba->path,
+ int count = count_volumes(super->hba,
super->orom->dpa, verbose);
if (super->orom->vphba <= count) {
- pr_vrb(": platform does not support more than %d raid volumes.\n",
+ pr_vrb("platform does not support more than %d raid volumes.\n",
super->orom->vphba);
return 0;
}
}
if (!check_env("IMSM_NO_PLATFORM") &&
mpb->num_raid_devs > 0 && size && size != maxsize) {
- pr_err("attempting to create a second "
- "volume with size less then remaining space. "
- "Aborting...\n");
+ pr_err("attempting to create a second volume with size less then remaining space. Aborting...\n");
return 0;
}
cnt = 0;
created */
if (super->orom && freesize) {
int count;
- count = count_volumes(super->hba->path,
+ count = count_volumes(super->hba,
super->orom->dpa, verbose);
if (super->orom->vphba <= count) {
- pr_vrb(": platform does not support more"
- " than %d raid volumes.\n",
+ pr_vrb("platform does not support more than %d raid volumes.\n",
super->orom->vphba);
return 0;
}
* IMSM_ORD_REBUILD, so assume they are missing and the
* disk_ord_tbl was not correctly updated
*/
- dprintf("%s: failed to locate out-of-sync disk\n", __func__);
+ dprintf("failed to locate out-of-sync disk\n");
return;
}
/* do not assemble arrays when not all attributes are supported */
if (imsm_check_attributes(mpb->attributes) == 0) {
sb_errors = 1;
- pr_err("Unsupported attributes in IMSM metadata."
- "Arrays activation is blocked.\n");
+ pr_err("Unsupported attributes in IMSM metadata.Arrays activation is blocked.\n");
}
/* check for bad blocks */
if (imsm_bbm_log_size(super->anchor)) {
- pr_err("BBM log found in IMSM metadata."
- "Arrays activation is blocked.\n");
+ pr_err("BBM log found in IMSM metadata.Arrays activation is blocked.\n");
sb_errors = 1;
}
*/
if (dev->vol.migr_state &&
(migr_type(dev) == MIGR_STATE_CHANGE)) {
- pr_err("cannot assemble volume '%.16s':"
- " unsupported migration in progress\n",
+ pr_err("cannot assemble volume '%.16s': unsupported migration in progress\n",
dev->volume);
continue;
}
map->num_members, /* raid disks */
&chunk, join_u32(dev->size_low, dev->size_high),
1 /* verbose */)) {
- pr_err("IMSM RAID geometry validation"
- " failed. Array %s activation is blocked.\n",
+ pr_err("IMSM RAID geometry validation failed. Array %s activation is blocked.\n",
dev->volume);
this->array.state |=
(1<<MD_SB_BLOCK_CONTAINER_RESHAPE) |
struct imsm_super *mpb = super->anchor;
if (atoi(inst) >= mpb->num_raid_devs) {
- pr_err("%s: subarry index %d, out of range\n",
- __func__, atoi(inst));
+ pr_err("subarry index %d, out of range\n", atoi(inst));
return -ENODEV;
}
case IMSM_T_STATE_NORMAL: /* transition to normal state */
dprintf("normal: ");
if (is_rebuilding(dev)) {
- dprintf("while rebuilding");
+ dprintf_cont("while rebuilding");
/* check if recovery is really finished */
for (mdi = a->info.devs; mdi ; mdi = mdi->next)
if (mdi->recovery_start != MaxSector) {
break;
}
if (recovery_not_finished) {
- dprintf("\nimsm: Rebuild has not finished yet, "
- "state not changed");
+ dprintf_cont("\n");
+ dprintf("Rebuild has not finished yet, state not changed");
if (a->last_checkpoint < mdi->recovery_start) {
a->last_checkpoint = mdi->recovery_start;
super->updates_pending++;
break;
}
if (is_gen_migration(dev)) {
- dprintf("while general migration");
+ dprintf_cont("while general migration");
if (a->last_checkpoint >= a->info.component_size)
end_migration(dev, super, map_state);
else
}
break;
case IMSM_T_STATE_DEGRADED: /* transition to degraded state */
- dprintf("degraded: ");
+ dprintf_cont("degraded: ");
if ((map->map_state != map_state) &&
!dev->vol.migr_state) {
- dprintf("mark degraded");
+ dprintf_cont("mark degraded");
map->map_state = map_state;
super->updates_pending++;
a->last_checkpoint = 0;
break;
}
if (is_rebuilding(dev)) {
- dprintf("while rebuilding.");
+ dprintf_cont("while rebuilding.");
if (map->map_state != map_state) {
- dprintf(" Map state change");
+ dprintf_cont(" Map state change");
end_migration(dev, super, map_state);
super->updates_pending++;
}
break;
}
if (is_gen_migration(dev)) {
- dprintf("while general migration");
+ dprintf_cont("while general migration");
if (a->last_checkpoint >= a->info.component_size)
end_migration(dev, super, map_state);
else {
break;
}
if (is_initializing(dev)) {
- dprintf("while initialization.");
+ dprintf_cont("while initialization.");
map->map_state = map_state;
super->updates_pending++;
break;
}
break;
case IMSM_T_STATE_FAILED: /* transition to failed state */
- dprintf("failed: ");
+ dprintf_cont("failed: ");
if (is_gen_migration(dev)) {
- dprintf("while general migration");
+ dprintf_cont("while general migration");
map->map_state = map_state;
super->updates_pending++;
break;
}
if (map->map_state != map_state) {
- dprintf("mark failed");
+ dprintf_cont("mark failed");
end_migration(dev, super, map_state);
super->updates_pending++;
a->last_checkpoint = 0;
}
break;
default:
- dprintf("state %i\n", map_state);
+ dprintf_cont("state %i\n", map_state);
}
- dprintf("\n");
-
+ dprintf_cont("\n");
}
static int store_imsm_mpb(int fd, struct imsm_super *mpb)
dl = NULL;
if (dl)
- dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
+ dprintf("found %x:%x\n", dl->major, dl->minor);
return dl;
}
/* Cannot activate another spare if rebuild is in progress already
*/
if (is_rebuilding(dev)) {
- dprintf("imsm: No spare activation allowed. "
- "Rebuild in progress already.\n");
+ dprintf("imsm: No spare activation allowed. Rebuild in progress already.\n");
return NULL;
}
IMSM_T_STATE_DEGRADED)
return NULL;
+ if (get_imsm_map(dev, MAP_0)->map_state == IMSM_T_STATE_UNINITIALIZED) {
+ dprintf("imsm: No spare activation allowed. Volume is not initialized.\n");
+ return NULL;
+ }
+
/*
* If there are any failed disks check state of the other volume.
* Block rebuild if the another one is failed until failed disks
* are removed from container.
*/
if (failed) {
- dprintf("found failed disks in %.*s, check if there another"
- "failed sub-array.\n",
+ dprintf("found failed disks in %.*s, check if there anotherfailed sub-array.\n",
MAX_RAID_SERIAL_LEN, dev->volume);
/* check if states of the other volumes allow for rebuild */
for (i = 0; i < super->anchor->num_raid_devs; i++) {
static struct dl *get_disk_super(struct intel_super *super, int major, int minor)
{
- struct dl *dl = NULL;
+ struct dl *dl;
+
for (dl = super->disks; dl; dl = dl->next)
if ((dl->major == major) && (dl->minor == minor))
return dl;
static int remove_disk_super(struct intel_super *super, int major, int minor)
{
- struct dl *prev = NULL;
+ struct dl *prev;
struct dl *dl;
prev = NULL;
super->disks = dl->next;
dl->next = NULL;
__free_imsm_disk(dl);
- dprintf("%s: removed %x:%x\n",
- __func__, major, minor);
+ dprintf("removed %x:%x\n", major, minor);
break;
}
prev = dl;
static int add_remove_disk_update(struct intel_super *super)
{
int check_degraded = 0;
- struct dl *disk = NULL;
+ struct dl *disk;
+
/* add/remove some spares to/from the metadata/contrainer */
while (super->disk_mgmt_list) {
struct dl *disk_cfg;
disk_cfg->next = super->disks;
super->disks = disk_cfg;
check_degraded = 1;
- dprintf("%s: added %x:%x\n",
- __func__, disk_cfg->major,
- disk_cfg->minor);
+ dprintf("added %x:%x\n",
+ disk_cfg->major, disk_cfg->minor);
} else if (disk_cfg->action == DISK_REMOVE) {
dprintf("Disk remove action processed: %x.%x\n",
disk_cfg->major, disk_cfg->minor);
void **tofree = NULL;
int ret_val = 0;
- dprintf("apply_reshape_migration_update()\n");
+ dprintf("(enter)\n");
if ((u->subdev < 0) ||
(u->subdev > 1)) {
dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
new_disk = get_disk_super(super,
major(u->new_disks[0]),
minor(u->new_disks[0]));
- dprintf("imsm: new disk for reshape is: %i:%i "
- "(%p, index = %i)\n",
+ dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
major(u->new_disks[0]),
minor(u->new_disks[0]),
new_disk, new_disk->index);
struct intel_dev *id;
int ret_val = 0;
- dprintf("apply_size_change_update()\n");
+ dprintf("(enter)\n");
if ((u->subdev < 0) ||
(u->subdev > 1)) {
dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
/* calculate new size
*/
blocks_per_member = u->new_size / used_disks;
- dprintf("imsm: apply_size_change_update(size: %llu, "
- "blocks per member: %llu)\n",
+ dprintf("(size: %llu, blocks per member: %llu)\n",
u->new_size, blocks_per_member);
set_blocks_per_member(map, blocks_per_member);
imsm_set_array_size(dev, u->new_size);
break;
if (!dl) {
- pr_err("error: imsm_activate_spare passed "
- "an unknown disk (index: %d)\n",
+ pr_err("error: imsm_activate_spare passed an unknown disk (index: %d)\n",
u->dl->index);
return 0;
}
int ret_val = 0;
unsigned int dev_id;
- dprintf("imsm: apply_reshape_container_disks_update()\n");
+ dprintf("(enter)\n");
/* enable spares to use in array */
for (i = 0; i < delta_disks; i++) {
new_disk = get_disk_super(super,
major(u->new_disks[i]),
minor(u->new_disks[i]));
- dprintf("imsm: new disk for reshape is: %i:%i "
- "(%p, index = %i)\n",
+ dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
major(u->new_disks[i]), minor(u->new_disks[i]),
new_disk, new_disk->index);
if ((new_disk == NULL) ||
struct imsm_update_general_migration_checkpoint *u =
(void *)update->buf;
- dprintf("imsm: process_update() "
- "for update_general_migration_checkpoint called\n");
+ dprintf("called for update_general_migration_checkpoint\n");
/* find device under general migration */
for (id = super->devlist ; id; id = id->next) {
/* handle racing creates: first come first serve */
if (u->dev_idx < mpb->num_raid_devs) {
- dprintf("%s: subarray %d already defined\n",
- __func__, u->dev_idx);
+ dprintf("subarray %d already defined\n", u->dev_idx);
goto create_error;
}
/* check update is next in sequence */
if (u->dev_idx != mpb->num_raid_devs) {
- dprintf("%s: can not create array %d expected index %d\n",
- __func__, u->dev_idx, mpb->num_raid_devs);
+ dprintf("can not create array %d expected index %d\n",
+ u->dev_idx, mpb->num_raid_devs);
goto create_error;
}
continue;
if (disks_overlap(super, i, u)) {
- dprintf("%s: arrays overlap\n", __func__);
+ dprintf("arrays overlap\n");
goto create_error;
}
}
/* check that prepare update was successful */
if (!update->space) {
- dprintf("%s: prepare update failed\n", __func__);
+ dprintf("prepare update failed\n");
goto create_error;
}
for (i = 0; i < new_map->num_members; i++) {
dl = serial_to_dl(inf[i].serial, super);
if (!dl) {
- dprintf("%s: disk disappeared\n", __func__);
+ dprintf("disk disappeared\n");
goto create_error;
}
}
break;
}
default:
- pr_err("error: unsuported process update type:"
- "(type: %d)\n", type);
+ pr_err("error: unsuported process update type:(type: %d)\n", type);
}
}
case update_general_migration_checkpoint:
if (update->len < (int)sizeof(struct imsm_update_general_migration_checkpoint))
return 0;
- dprintf("imsm: prepare_update() "
- "for update_general_migration_checkpoint called\n");
+ dprintf("called for update_general_migration_checkpoint\n");
break;
case update_takeover: {
struct imsm_update_takeover *u = (void *)update->buf;
if (update->len < (int)sizeof(*u))
return 0;
- dprintf("imsm: imsm_prepare_update() for update_reshape\n");
+ dprintf("for update_reshape\n");
for (dl = super->devlist; dl; dl = dl->next) {
int size = sizeof_imsm_dev(dl->dev, 1);
if (update->len < (int)sizeof(*u))
return 0;
- dprintf("imsm: imsm_prepare_update() for update_reshape\n");
+ dprintf("for update_reshape\n");
/* add space for bigger array in update
*/
int i, j, num_members;
__u32 ord;
- dprintf("%s: deleting device[%d] from imsm_super\n",
- __func__, index);
+ dprintf("deleting device[%d] from imsm_super\n", index);
/* shift all indexes down one */
for (iter = super->disks; iter; iter = iter->next)
return 1;
}
- if (orom != orom2) {
+ if ((orom != orom2) || ((hba->type == SYS_DEV_VMD) && (hba != hba2))) {
pr_err("WARNING - IMSM container assembled with disks under different HBAs!\n"
" This operation is not supported and can lead to data loss.\n");
return 1;
{
int rv = -1;
struct intel_super *super = st->sb;
- unsigned long long *target_offsets = NULL;
- int *targets = NULL;
+ unsigned long long *target_offsets;
+ int *targets;
int i;
struct imsm_map *map_dest = get_imsm_map(dev, MAP_0);
int new_disks = map_dest->num_members;
unsigned long long curr_migr_unit;
if (load_imsm_migr_rec(super, info) != 0) {
- dprintf("imsm: ERROR: Cannot read migration record "
- "for checkpoint save.\n");
+ dprintf("imsm: ERROR: Cannot read migration record for checkpoint save.\n");
return 1;
}
__cpu_to_le32(curr_migr_unit *
__le32_to_cpu(super->migr_rec->dest_depth_per_unit));
if (write_imsm_migr_rec(st) < 0) {
- dprintf("imsm: Cannot write migration record "
- "outside backup area\n");
+ dprintf("imsm: Cannot write migration record outside backup area\n");
return 1;
}
{
struct intel_super *super = st->sb;
struct migr_record *migr_rec = super->migr_rec;
- struct imsm_map *map_dest = NULL;
+ struct imsm_map *map_dest;
struct intel_dev *id = NULL;
unsigned long long read_offset;
unsigned long long write_offset;
new_disks,
super,
id->dev)) {
- pr_err("Cannot restore data from backup."
- " Too many failed disks\n");
+ pr_err("Cannot restore data from backup. Too many failed disks\n");
goto abort;
}
if (save_checkpoint_imsm(st, info, UNIT_SRC_NORMAL)) {
/* ignore error == 2, this can mean end of reshape here
*/
- dprintf("imsm: Cannot write checkpoint to "
- "migration record (UNIT_SRC_NORMAL) during restart\n");
+ dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL) during restart\n");
} else
retval = 0;
strncat(disk_path, path, PATH_MAX - strlen(disk_path) - 1);
if (stat(disk_path, &st) == 0) {
struct sys_dev* hba;
- char *path=NULL;
+ char *path;
path = devt_to_devpath(st.st_rdev);
if (path == NULL)
struct mdinfo *info, *member;
int devices_that_can_grow = 0;
- dprintf("imsm: imsm_reshape_is_allowed_on_container(ENTER): "
- "st->devnm = (%s)\n", st->devnm);
+ dprintf("imsm: imsm_reshape_is_allowed_on_container(ENTER): st->devnm = (%s)\n", st->devnm);
if (geo->size > 0 ||
geo->level != UnSet ||
geo->layout != UnSet ||
geo->chunksize != 0 ||
geo->raid_disks == UnSet) {
- dprintf("imsm: Container operation is allowed for "
- "raid disks number change only.\n");
+ dprintf("imsm: Container operation is allowed for raid disks number change only.\n");
return ret_val;
}
if (direction == ROLLBACK_METADATA_CHANGES) {
- dprintf("imsm: Metadata changes rollback is not supported for "
- "container operation.\n");
+ dprintf("imsm: Metadata changes rollback is not supported for container operation.\n");
return ret_val;
}
/* we work on container for Online Capacity Expansion
* only so raid_disks has to grow
*/
- dprintf("imsm: for container operation raid disks "
- "increase is required\n");
+ dprintf("imsm: for container operation raid disks increase is required\n");
break;
}
(info->array.level != 5)) {
/* we cannot use this container with other raid level
*/
- dprintf("imsm: for container operation wrong"
- " raid level (%i) detected\n",
+ dprintf("imsm: for container operation wrong raid level (%i) detected\n",
info->array.level);
break;
} else {
if (!is_raid_level_supported(super->orom,
member->array.level,
geo->raid_disks)) {
- dprintf("platform does not support raid%d with"
- " %d disk%s\n",
+ dprintf("platform does not support raid%d with %d disk%s\n",
info->array.level,
geo->raid_disks,
geo->raid_disks > 1 ? "s" : "");
*/
if (info->component_size %
(info->array.chunk_size/512)) {
- dprintf("Component size is not aligned to "
- "chunk size\n");
+ dprintf("Component size is not aligned to chunk size\n");
break;
}
}
ret_val = 1;
if (ret_val)
- dprintf("\tContainer operation allowed\n");
+ dprintf("Container operation allowed\n");
else
- dprintf("\tError: %i\n", ret_val);
+ dprintf("Error: %i\n", ret_val);
return ret_val;
}
{
struct intel_super *super = st->sb;
struct imsm_super *mpb = super->anchor;
- int update_memory_size = 0;
- struct imsm_update_reshape *u = NULL;
- struct mdinfo *spares = NULL;
+ int update_memory_size;
+ struct imsm_update_reshape *u;
+ struct mdinfo *spares;
int i;
- int delta_disks = 0;
+ int delta_disks;
struct mdinfo *dev;
- dprintf("imsm_update_metadata_for_reshape(enter) raid_disks = %i\n",
- geo->raid_disks);
+ dprintf("(enter) raid_disks = %i\n", geo->raid_disks);
delta_disks = geo->raid_disks - old_raid_disks;
if (spares == NULL
|| delta_disks > spares->array.spare_disks) {
- pr_err("imsm: ERROR: Cannot get spare devices "
- "for %s.\n", geo->dev_name);
+ pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo->dev_name);
i = -1;
goto abort;
}
dprintf("imsm: reshape update preparation :");
if (i == delta_disks) {
- dprintf(" OK\n");
+ dprintf_cont(" OK\n");
*updatep = u;
return update_memory_size;
}
free(u);
- dprintf(" Error\n");
+ dprintf_cont(" Error\n");
return 0;
}
struct imsm_update_size_change **updatep)
{
struct intel_super *super = st->sb;
- int update_memory_size = 0;
- struct imsm_update_size_change *u = NULL;
+ int update_memory_size;
+ struct imsm_update_size_change *u;
- dprintf("imsm_create_metadata_update_for_size_change(enter)"
- " New size = %llu\n", geo->size);
+ dprintf("(enter) New size = %llu\n", geo->size);
/* size of all update data without anchor */
update_memory_size = sizeof(struct imsm_update_size_change);
struct imsm_update_reshape_migration **updatep)
{
struct intel_super *super = st->sb;
- int update_memory_size = 0;
- struct imsm_update_reshape_migration *u = NULL;
+ int update_memory_size;
+ struct imsm_update_reshape_migration *u;
struct imsm_dev *dev;
int previous_level = -1;
- dprintf("imsm_create_metadata_update_for_migration(enter)"
- " New Level = %i\n", geo->level);
+ dprintf("(enter) New Level = %i\n", geo->level);
/* size of all update data without anchor */
update_memory_size = sizeof(struct imsm_update_reshape_migration);
if (geo->chunksize != current_chunk_size) {
u->new_chunksize = geo->chunksize / 1024;
- dprintf("imsm: "
- "chunk size change from %i to %i\n",
+ dprintf("imsm: chunk size change from %i to %i\n",
current_chunk_size, u->new_chunksize);
}
previous_level = map->raid_level;
free(u);
sysfs_free(spares);
update_memory_size = 0;
- dprintf("error: cannot get spare device "
- "for requested migration");
+ dprintf("error: cannot get spare device for requested migration");
return 0;
}
sysfs_free(spares);
if (geo->level == 5) {
change = CH_MIGRATION;
if (geo->layout != ALGORITHM_LEFT_ASYMMETRIC) {
- pr_err("Error. Requested Layout "
- "not supported (left-asymmetric layout "
- "is supported only)!\n");
+ pr_err("Error. Requested Layout not supported (left-asymmetric layout is supported only)!\n");
change = -1;
goto analyse_change_exit;
}
break;
}
if (change == -1) {
- pr_err("Error. Level Migration from %d to %d "
- "not supported!\n",
+ pr_err("Error. Level Migration from %d to %d not supported!\n",
info.array.level, geo->level);
goto analyse_change_exit;
}
geo->layout = 0;
geo->level = 5;
} else {
- pr_err("Error. Layout Migration from %d to %d "
- "not supported!\n",
+ pr_err("Error. Layout Migration from %d to %d not supported!\n",
info.array.layout, geo->layout);
change = -1;
goto analyse_change_exit;
chunk * 1024,
geo->size * 2);
if (geo->size == 0) {
- pr_err("Error. Size expansion is " \
- "supported only (current size is %llu, " \
- "requested size /rounded/ is 0).\n",
+ pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is 0).\n",
current_size);
goto analyse_change_exit;
}
if ((current_size != geo->size) && (geo->size > 0)) {
if (change != -1) {
- pr_err("Error. Size change should be the only "
- "one at a time.\n");
+ pr_err("Error. Size change should be the only one at a time.\n");
change = -1;
goto analyse_change_exit;
}
if ((super->current_vol + 1) != super->anchor->num_raid_devs) {
- pr_err("Error. The last volume in container "
- "can be expanded only (%i/%s).\n",
+ pr_err("Error. The last volume in container can be expanded only (%i/%s).\n",
super->current_vol, st->devnm);
goto analyse_change_exit;
}
/* requested size change to the maximum available size
*/
if (max_size == 0) {
- pr_err("Error. Cannot find "
- "maximum available space.\n");
+ pr_err("Error. Cannot find maximum available space.\n");
change = -1;
goto analyse_change_exit;
} else
dprintf("Prepare update for size change to %llu\n",
geo->size );
if (current_size >= geo->size) {
- pr_err("Error. Size expansion is "
- "supported only (current size is %llu, "
- "requested size /rounded/ is %llu).\n",
+ pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is %llu).\n",
current_size, geo->size);
goto analyse_change_exit;
}
if (max_size && geo->size > max_size) {
- pr_err("Error. Requested size is larger "
- "than maximum available size (maximum "
- "available size is %llu, "
- "requested size /rounded/ is %llu).\n",
+ pr_err("Error. Requested size is larger than maximum available size (maximum available size is %llu, requested size /rounded/ is %llu).\n",
max_size, geo->size);
goto analyse_change_exit;
}
struct imsm_super *mpb = super->anchor;
if (mpb->num_raid_devs > 1) {
- pr_err("Error. Cannot perform operation on %s"
- "- for this operation it MUST be single "
- "array in container\n",
+ pr_err("Error. Cannot perform operation on %s- for this operation it MUST be single array in container\n",
geo->dev_name);
change = -1;
}
analyse_change_exit:
if ((direction == ROLLBACK_METADATA_CHANGES) &&
((change == CH_MIGRATION) || (change == CH_TAKEOVER))) {
- dprintf("imsm: Metadata changes rollback is not supported for "
- "migration and takeover operations.\n");
+ dprintf("imsm: Metadata changes rollback is not supported for migration and takeover operations.\n");
change = -1;
}
return change;
int ret_val = 1;
struct geo_params geo;
- dprintf("imsm: reshape_super called.\n");
+ dprintf("(enter)\n");
memset(&geo, 0, sizeof(struct geo_params));
if (delta_disks != UnSet)
geo.raid_disks += delta_disks;
- dprintf("\tfor level : %i\n", geo.level);
- dprintf("\tfor raid_disks : %i\n", geo.raid_disks);
+ dprintf("for level : %i\n", geo.level);
+ dprintf("for raid_disks : %i\n", geo.raid_disks);
if (experimental() == 0)
return ret_val;
free(u);
} else {
- pr_err("(imsm) Operation "
- "is not allowed on this container\n");
+ pr_err("(imsm) Operation is not allowed on this container\n");
}
} else {
/* On volume level we support following operations
imsm_create_metadata_update_for_migration(
st, &geo, &u);
if (len < 1) {
- dprintf("imsm: "
- "Cannot prepare update\n");
+ dprintf("imsm: Cannot prepare update\n");
break;
}
ret_val = 0;
imsm_create_metadata_update_for_size_change(
st, &geo, &u);
if (len < 1) {
- dprintf("imsm: "
- "Cannot prepare update\n");
+ dprintf("imsm: Cannot prepare update\n");
break;
}
ret_val = 0;
return ret_val;
}
+#define COMPLETED_OK 0
+#define COMPLETED_NONE 1
+#define COMPLETED_DELAYED 2
+
+static int read_completed(int fd, unsigned long long *val)
+{
+ int ret;
+ char buf[50];
+
+ ret = sysfs_fd_get_str(fd, buf, 50);
+ if (ret < 0)
+ return ret;
+
+ ret = COMPLETED_OK;
+ if (strncmp(buf, "none", 4) == 0) {
+ ret = COMPLETED_NONE;
+ } else if (strncmp(buf, "delayed", 7) == 0) {
+ ret = COMPLETED_DELAYED;
+ } else {
+ char *ep;
+ *val = strtoull(buf, &ep, 0);
+ if (ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))
+ ret = -1;
+ }
+ return ret;
+}
+
/*******************************************************************************
* Function: wait_for_reshape_imsm
* Description: Function writes new sync_max value and waits until
int wait_for_reshape_imsm(struct mdinfo *sra, int ndata)
{
int fd = sysfs_get_fd(sra, NULL, "sync_completed");
+ int retry = 3;
unsigned long long completed;
/* to_complete : new sync_max position */
unsigned long long to_complete = sra->reshape_progress;
unsigned long long position_to_set = to_complete / ndata;
if (fd < 0) {
- dprintf("imsm: wait_for_reshape_imsm() "
- "cannot open reshape_position\n");
+ dprintf("cannot open reshape_position\n");
return 1;
}
- if (sysfs_fd_get_ll(fd, &completed) < 0) {
- dprintf("imsm: wait_for_reshape_imsm() "
- "cannot read reshape_position (no reshape in progres)\n");
- close(fd);
- return 0;
- }
+ do {
+ if (sysfs_fd_get_ll(fd, &completed) < 0) {
+ if (!retry) {
+ dprintf("cannot read reshape_position (no reshape in progres)\n");
+ close(fd);
+ return 1;
+ }
+ usleep(30000);
+ } else
+ break;
+ } while (retry--);
if (completed > position_to_set) {
- dprintf("imsm: wait_for_reshape_imsm() "
- "wrong next position to set %llu (%llu)\n",
+ dprintf("wrong next position to set %llu (%llu)\n",
to_complete, position_to_set);
close(fd);
return -1;
dprintf("Position set: %llu\n", position_to_set);
if (sysfs_set_num(sra, NULL, "sync_max",
position_to_set) != 0) {
- dprintf("imsm: wait_for_reshape_imsm() "
- "cannot set reshape position to %llu\n",
+ dprintf("cannot set reshape position to %llu\n",
position_to_set);
close(fd);
return -1;
}
do {
+ int rc;
char action[20];
- sysfs_wait(fd, NULL);
+ int timeout = 3000;
+
+ sysfs_wait(fd, &timeout);
if (sysfs_get_str(sra, NULL, "sync_action",
action, 20) > 0 &&
- strncmp(action, "reshape", 7) != 0)
- break;
- if (sysfs_fd_get_ll(fd, &completed) < 0) {
- dprintf("imsm: wait_for_reshape_imsm() "
- "cannot read reshape_position (in loop)\n");
+ strncmp(action, "reshape", 7) != 0) {
+ if (strncmp(action, "idle", 4) == 0)
+ break;
close(fd);
- return 1;
+ return -1;
}
+
+ rc = read_completed(fd, &completed);
+ if (rc < 0) {
+ dprintf("cannot read reshape_position (in loop)\n");
+ close(fd);
+ return 1;
+ } else if (rc == COMPLETED_NONE)
+ break;
} while (completed < position_to_set);
+
close(fd);
return 0;
-
}
/*******************************************************************************
* Function: imsm_manage_reshape
* Description: Function finds array under reshape and it manages reshape
* process. It creates stripes backups (if required) and sets
- * checheckpoits.
+ * checkpoints.
* Parameters:
* afd : Backup handle (nattive) - not used
* sra : general array info
{
int ret_val = 0;
struct intel_super *super = st->sb;
- struct intel_dev *dv = NULL;
+ struct intel_dev *dv;
struct imsm_dev *dev = NULL;
struct imsm_map *map_src;
int migr_vol_qan = 0;
int degraded = 0;
int source_layout = 0;
- if (!fds || !offsets || !sra)
+ if (!sra)
+ return ret_val;
+
+ if (!fds || !offsets)
goto abort;
/* Find volume during the reshape */
}
/* Only one volume can migrate at the same time */
if (migr_vol_qan != 1) {
- pr_err(": %s", migr_vol_qan ?
+ pr_err("%s", migr_vol_qan ?
"Number of migrating volumes greater than 1\n" :
"There is no volume during migrationg\n");
goto abort;
init_migr_record_imsm(st, dev, sra);
else {
if (__le32_to_cpu(migr_rec->rec_status) != UNIT_SRC_NORMAL) {
- dprintf("imsm: cannot restart migration when data "
- "are present in copy area.\n");
+ dprintf("imsm: cannot restart migration when data are present in copy area.\n");
goto abort;
}
/* Save checkpoint to update migration record for current
if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
/* ignore error == 2, this can mean end of reshape here
*/
- dprintf("imsm: Cannot write checkpoint to "
- "migration record (UNIT_SRC_NORMAL, "
- "initial save)\n");
+ dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL, initial save)\n");
goto abort;
}
}
*/
degraded = check_degradation_change(sra, fds, degraded);
if (degraded > 1) {
- dprintf("imsm: Abort reshape due to degradation"
- " level (%i)\n", degraded);
+ dprintf("imsm: Abort reshape due to degradation level (%i)\n", degraded);
goto abort;
}
start = current_position * 512;
- /* allign reading start to old geometry */
+ /* align reading start to old geometry */
start_buf_shift = start % old_data_stripe_length;
start_src = start - start_buf_shift;
* to backup alligned to source array
* [bytes]
*/
- unsigned long long next_step_filler = 0;
+ unsigned long long next_step_filler;
unsigned long long copy_length = next_step * 512;
/* allign copy area length to stripe in old geometry */
if (next_step_filler)
next_step_filler = (old_data_stripe_length
- next_step_filler);
- dprintf("save_stripes() parameters: start = %llu,"
- "\tstart_src = %llu,\tnext_step*512 = %llu,"
- "\tstart_in_buf_shift = %llu,"
- "\tnext_step_filler = %llu\n",
+ dprintf("save_stripes() parameters: start = %llu,\tstart_src = %llu,\tnext_step*512 = %llu,\tstart_in_buf_shift = %llu,\tnext_step_filler = %llu\n",
start, start_src, copy_length,
start_buf_shift, next_step_filler);
copy_length +
next_step_filler + start_buf_shift,
buf)) {
- dprintf("imsm: Cannot save stripes"
- " to buffer\n");
+ dprintf("imsm: Cannot save stripes to buffer\n");
goto abort;
}
/* Convert data to destination format and store it
*/
if (save_backup_imsm(st, dev, sra,
buf + start_buf_shift, copy_length)) {
- dprintf("imsm: Cannot save stripes to "
- "target devices\n");
+ dprintf("imsm: Cannot save stripes to target devices\n");
goto abort;
}
if (save_checkpoint_imsm(st, sra,
UNIT_SRC_IN_CP_AREA)) {
- dprintf("imsm: Cannot write checkpoint to "
- "migration record (UNIT_SRC_IN_CP_AREA)\n");
+ dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_IN_CP_AREA)\n");
goto abort;
}
} else {
sra->reshape_progress = next_step;
/* wait until reshape finish */
- if (wait_for_reshape_imsm(sra, ndata) < 0) {
+ if (wait_for_reshape_imsm(sra, ndata)) {
dprintf("wait_for_reshape_imsm returned error!\n");
goto abort;
}
if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
/* ignore error == 2, this can mean end of reshape here
*/
- dprintf("imsm: Cannot write checkpoint to "
- "migration record (UNIT_SRC_NORMAL)\n");
+ dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL)\n");
goto abort;
}
ret_val = 1;
abort:
free(buf);
- abort_reshape(sra);
+ /* See Grow.c: abort_reshape() for further explanation */
+ sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
+ sysfs_set_num(sra, NULL, "suspend_hi", 0);
+ sysfs_set_num(sra, NULL, "suspend_lo", 0);
return ret_val;
}