printf(" <-- %s", map_state_str[map->map_state]);
printf("\n Checkpoint : %u ",
__le32_to_cpu(dev->vol.curr_migr_unit));
- if ((is_gen_migration(dev)) && ((slot > 1) || (slot < 0)))
+ if (is_gen_migration(dev) && (slot > 1 || slot < 0))
printf("(N/A)");
else
printf("(%llu)", (unsigned long long)
map = get_imsm_map(dev, MAP_0);
if (map)
slot = get_imsm_disk_slot(map, super->disks->index);
- if ((map == NULL) || (slot > 1) || (slot < 0)) {
+ if (map == NULL || slot > 1 || slot < 0) {
printf(" Empty\n ");
printf("Examine one of first two disks in array\n");
break;
*/
if (dev == NULL)
return -2;
- map = get_imsm_map(dev, MAP_0);
if (info) {
for (sd = info->devs ; sd ; sd = sd->next) {
- /* skip spare and failed disks
- */
- if (sd->disk.raid_disk < 0)
- continue;
/* read only from one of the first two slots */
- if (map)
- slot = get_imsm_disk_slot(map,
- sd->disk.raid_disk);
- if ((map == NULL) || (slot > 1) || (slot < 0))
+ if ((sd->disk.raid_disk < 0) ||
+ (sd->disk.raid_disk > 1))
continue;
sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
}
}
if (fd < 0) {
+ map = get_imsm_map(dev, MAP_0);
for (dl = super->disks; dl; dl = dl->next) {
/* skip spare and failed disks
*/
/* read only from one of the first two slots */
if (map)
slot = get_imsm_disk_slot(map, dl->index);
- if ((map == NULL) || (slot > 1) || (slot < 0))
+ if (map == NULL || slot > 1 || slot < 0)
continue;
sprintf(nm, "%d:%d", dl->major, dl->minor);
fd = dev_open(nm, O_RDONLY);
/* write to 2 first slots only */
if (map)
slot = get_imsm_disk_slot(map, sd->index);
- if ((map == NULL) || (slot > 1) || (slot < 0))
+ if (map == NULL || slot > 1 || slot < 0)
continue;
sprintf(nm, "%d:%d", sd->major, sd->minor);
}
}
+static int nvme_get_serial(int fd, void *buf, size_t buf_len)
+{
+ char path[60];
+ char *name = fd2kname(fd);
+
+ if (!name)
+ return 1;
+
+ if (strncmp(name, "nvme", 4) != 0)
+ return 1;
+
+ snprintf(path, sizeof(path) - 1, "/sys/block/%s/device/serial", name);
+
+ return load_sys(path, buf, buf_len);
+}
+
extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
static int imsm_read_serial(int fd, char *devname,
__u8 serial[MAX_RAID_SERIAL_LEN])
{
- unsigned char scsi_serial[255];
+ char buf[50];
int rv;
- int rsp_len;
int len;
char *dest;
char *src;
- char *rsp_buf;
- int i;
+ unsigned int i;
- memset(scsi_serial, 0, sizeof(scsi_serial));
+ memset(buf, 0, sizeof(buf));
- rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
+ rv = nvme_get_serial(fd, buf, sizeof(buf));
+
+ if (rv)
+ rv = scsi_get_serial(fd, buf, sizeof(buf));
if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
memset(serial, 0, MAX_RAID_SERIAL_LEN);
return rv;
}
- rsp_len = scsi_serial[3];
- if (!rsp_len) {
- if (devname)
- pr_err("Failed to retrieve serial for %s\n",
- devname);
- return 2;
- }
- rsp_buf = (char *) &scsi_serial[4];
-
/* trim all whitespace and non-printable characters and convert
* ':' to ';'
*/
- for (i = 0, dest = rsp_buf; i < rsp_len; i++) {
- src = &rsp_buf[i];
+ for (i = 0, dest = buf; i < sizeof(buf) && buf[i]; i++) {
+ src = &buf[i];
if (*src > 0x20) {
/* ':' is reserved for use in placeholder serial
* numbers for missing disks
*dest++ = *src;
}
}
- len = dest - rsp_buf;
- dest = rsp_buf;
+ len = dest - buf;
+ dest = buf;
/* truncate leading characters */
if (len > MAX_RAID_SERIAL_LEN) {
/* duplicate and then set the target end state in map[0] */
memcpy(dest, src, sizeof_imsm_map(src));
- if ((migr_type == MIGR_REBUILD) ||
- (migr_type == MIGR_GEN_MIGR)) {
+ if (migr_type == MIGR_REBUILD || migr_type == MIGR_GEN_MIGR) {
__u32 ord;
int i;
*
* FIXME add support for raid-level-migration
*/
- if ((map_state != map->map_state) && (is_gen_migration(dev) == 0) &&
- (prev->map_state != IMSM_T_STATE_UNINITIALIZED)) {
+ if (map_state != map->map_state && (is_gen_migration(dev) == 0) &&
+ prev->map_state != IMSM_T_STATE_UNINITIALIZED) {
/* when final map state is other than expected
* merge maps (not for migration)
*/
struct sys_dev *hba_name;
int rv = 0;
- if ((fd < 0) || check_env("IMSM_NO_PLATFORM")) {
+ if (fd < 0 || check_env("IMSM_NO_PLATFORM")) {
super->orom = NULL;
super->hba = NULL;
return 0;
}
/* Check migration compatibility */
- if ((err == 0) && (check_mpb_migr_compatibility(super) != 0)) {
+ if (err == 0 && check_mpb_migr_compatibility(super) != 0) {
pr_err("Unsupported migration detected");
if (devname)
fprintf(stderr, " on %s\n", devname);
if (dfd >= 0)
close(dfd);
}
- if ((dfd >= 0) && (!keep_fd))
+ if (dfd >= 0 && !keep_fd)
close(dfd);
return err;
*/
rv = find_intel_hba_capability(fd, super, devname);
/* no orom/efi or non-intel hba of the disk */
- if ((rv != 0) && (st->ignore_hw_compat == 0)) {
+ if (rv != 0 && st->ignore_hw_compat == 0) {
if (devname)
pr_err("No OROM/EFI properties for %s\n", devname);
free_imsm(super);
struct imsm_map *map2 = get_imsm_map(dev,
MAP_1);
int slot2 = get_imsm_disk_slot(map2, df->index);
- if ((slot2 < map2->num_members) &&
- (slot2 >= 0)) {
+ if (slot2 < map2->num_members && slot2 >= 0) {
__u32 ord2 = get_imsm_ord_tbl_ent(dev,
slot2,
MAP_1);
rv = imsm_read_serial(fd, devname, dd->serial);
if (rv) {
pr_err("failed to retrieve scsi serial, aborting\n");
+ if (dd->devname)
+ free(dd->devname);
free(dd);
abort();
}
+ if (super->hba && ((super->hba->type == SYS_DEV_NVME) ||
+ (super->hba->type == SYS_DEV_VMD))) {
+ int i;
+ char *devpath = diskfd_to_devpath(fd);
+ char controller_path[PATH_MAX];
+
+ if (!devpath) {
+ pr_err("failed to get devpath, aborting\n");
+ if (dd->devname)
+ free(dd->devname);
+ free(dd);
+ return 1;
+ }
+
+ snprintf(controller_path, PATH_MAX-1, "%s/device", devpath);
+ free(devpath);
+
+ if (devpath_to_vendor(controller_path) == 0x8086) {
+ /*
+ * If Intel's NVMe drive has serial ended with
+ * "-A","-B","-1" or "-2" it means that this is "x8"
+ * device (double drive on single PCIe card).
+ * User should be warned about potential data loss.
+ */
+ for (i = MAX_RAID_SERIAL_LEN-1; i > 0; i--) {
+ /* Skip empty character at the end */
+ if (dd->serial[i] == 0)
+ continue;
+
+ if (((dd->serial[i] == 'A') ||
+ (dd->serial[i] == 'B') ||
+ (dd->serial[i] == '1') ||
+ (dd->serial[i] == '2')) &&
+ (dd->serial[i-1] == '-'))
+ pr_err("\tThe action you are about to take may put your data at risk.\n"
+ "\tPlease note that x8 devices may consist of two separate x4 devices "
+ "located on a single PCIe port.\n"
+ "\tRAID 0 is the only supported configuration for this type of x8 device.\n");
+ break;
+ }
+ }
+ }
get_dev_size(fd, NULL, &size);
/* clear migr_rec when adding disk to container */
num = sprintf(path, "%s%s", "/dev/", dev->name);
if (num > 0)
fd = open(path, O_RDONLY, 0);
- if ((num <= 0) || (fd < 0)) {
- pr_vrb(": Cannot open %s: %s\n",
+ if (num <= 0 || fd < 0) {
+ pr_vrb("Cannot open %s: %s\n",
dev->name, strerror(errno));
}
free(path);
dev = dev->next;
}
found = 0;
- if ((fd >= 0) && disk_attached_to_hba(fd, hba)) {
+ if (fd >= 0 && disk_attached_to_hba(fd, hba)) {
struct mdstat_ent *vol;
for (vol = mdstat ; vol ; vol = vol->next) {
- if ((vol->active > 0) &&
+ if (vol->active > 0 &&
vol->metadata_version &&
is_container_member(vol, memb->devnm)) {
found++;
*found = 0;
st = match_metadata_desc_imsm("imsm");
if (st == NULL) {
- pr_vrb(": cannot allocate memory for imsm supertype\n");
+ pr_vrb("cannot allocate memory for imsm supertype\n");
return 0;
}
continue;
tst = dup_super(st);
if (tst == NULL) {
- pr_vrb(": cannot allocate memory for imsm supertype\n");
+ pr_vrb("cannot allocate memory for imsm supertype\n");
goto err_1;
}
tmpdev->container = 0;
}
for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
- if ((tmpdev->used == 1) && (tmpdev->found)) {
+ if (tmpdev->used == 1 && tmpdev->found) {
if (count) {
if (count < tmpdev->found)
count = 0;
* VMD arrays should be counted per domain (controller), so skip
* domains that are not the given one.
*/
- if ((hba->type == SYS_DEV_VMD) &&
+ if (hba->type == SYS_DEV_VMD &&
(strncmp(device->path, hba->path, strlen(device->path)) != 0))
continue;
{
/* check/set platform and metadata limits/defaults */
if (super->orom && raiddisks > super->orom->dpa) {
- pr_vrb(": platform supports a maximum of %d disks per array\n",
+ pr_vrb("platform supports a maximum of %d disks per array\n",
super->orom->dpa);
return 0;
}
/* capabilities of OROM tested - copied from validate_geometry_imsm_volume */
if (!is_raid_level_supported(super->orom, level, raiddisks)) {
- pr_vrb(": platform does not support raid%d with %d disk%s\n",
+ pr_vrb("platform does not support raid%d with %d disk%s\n",
level, raiddisks, raiddisks > 1 ? "s" : "");
return 0;
}
*chunk = imsm_default_chunk(super->orom);
if (super->orom && !imsm_orom_has_chunk(super->orom, *chunk)) {
- pr_vrb(": platform does not support a chunk size of: %d\n", *chunk);
+ pr_vrb("platform does not support a chunk size of: %d\n", *chunk);
return 0;
}
if (layout != imsm_level_to_layout(level)) {
if (level == 5)
- pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
+ pr_vrb("imsm raid 5 only supports the left-asymmetric layout\n");
else if (level == 10)
- pr_vrb(": imsm raid 10 only supports the n2 layout\n");
+ pr_vrb("imsm raid 10 only supports the n2 layout\n");
else
- pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
+ pr_vrb("imsm unknown layout %#x for this raid level %d\n",
layout, level);
return 0;
}
if (super->orom && (super->orom->attr & IMSM_OROM_ATTR_2TB) == 0 &&
(calc_array_size(level, raiddisks, layout, *chunk, size) >> 32) > 0) {
- pr_vrb(": platform does not support a volume size over 2TB\n");
+ pr_vrb("platform does not support a volume size over 2TB\n");
return 0;
}
int count = count_volumes(super->hba,
super->orom->dpa, verbose);
if (super->orom->vphba <= count) {
- pr_vrb(": platform does not support more than %d raid volumes.\n",
+ pr_vrb("platform does not support more than %d raid volumes.\n",
super->orom->vphba);
return 0;
}
count = count_volumes(super->hba,
super->orom->dpa, verbose);
if (super->orom->vphba <= count) {
- pr_vrb(": platform does not support more than %d raid volumes.\n",
+ pr_vrb("platform does not support more than %d raid volumes.\n",
super->orom->vphba);
return 0;
}
/* when MAP_X is passed both maps failures are counted
*/
if (prev &&
- ((look_in_map == MAP_1) || (look_in_map == MAP_X)) &&
- (i < prev->num_members)) {
+ (look_in_map == MAP_1 || look_in_map == MAP_X) &&
+ i < prev->num_members) {
ord = __le32_to_cpu(prev->disk_ord_tbl[i]);
idx_1 = ord_to_idx(ord);
if (!disk || is_failed(disk) || ord & IMSM_ORD_REBUILD)
failed++;
}
- if (((look_in_map == MAP_0) || (look_in_map == MAP_X)) &&
- (i < map->num_members)) {
+ if ((look_in_map == MAP_0 || look_in_map == MAP_X) &&
+ i < map->num_members) {
ord = __le32_to_cpu(map->disk_ord_tbl[i]);
idx = ord_to_idx(ord);
migr_map = get_imsm_map(dev, MAP_1);
- if ((migr_map->map_state == IMSM_T_STATE_NORMAL) &&
- (dev->vol.migr_type != MIGR_GEN_MIGR))
+ if (migr_map->map_state == IMSM_T_STATE_NORMAL &&
+ dev->vol.migr_type != MIGR_GEN_MIGR)
return 1;
else
return 0;
struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
int slot2 = get_imsm_disk_slot(map2, idx);
- if ((slot2 < map2->num_members) &&
- (slot2 >= 0))
+ if (slot2 < map2->num_members && slot2 >= 0)
set_imsm_ord_tbl_ent(map2, slot2,
idx | IMSM_ORD_REBUILD);
}
break;
case IMSM_T_STATE_DEGRADED: /* transition to degraded state */
dprintf_cont("degraded: ");
- if ((map->map_state != map_state) &&
- !dev->vol.migr_state) {
+ if (map->map_state != map_state && !dev->vol.migr_state) {
dprintf_cont("mark degraded");
map->map_state = map_state;
super->updates_pending++;
IMSM_T_STATE_DEGRADED)
return NULL;
+ if (get_imsm_map(dev, MAP_0)->map_state == IMSM_T_STATE_UNINITIALIZED) {
+ dprintf("imsm: No spare activation allowed. Volume is not initialized.\n");
+ return NULL;
+ }
+
/*
* If there are any failed disks check state of the other volume.
* Block rebuild if the another one is failed until failed disks
struct dl *dl;
for (dl = super->disks; dl; dl = dl->next)
- if ((dl->major == major) && (dl->minor == minor))
+ if (dl->major == major && dl->minor == minor)
return dl;
return NULL;
}
prev = NULL;
for (dl = super->disks; dl; dl = dl->next) {
- if ((dl->major == major) && (dl->minor == minor)) {
+ if (dl->major == major && dl->minor == minor) {
/* remove */
if (prev)
prev->next = dl->next;
int ret_val = 0;
dprintf("(enter)\n");
- if ((u->subdev < 0) ||
- (u->subdev > 1)) {
+ if (u->subdev < 0 || u->subdev > 1) {
dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
return ret_val;
}
- if ((space_list == NULL) || (*space_list == NULL)) {
+ if (space_list == NULL || *space_list == NULL) {
dprintf("imsm: Error: Memory is not allocated\n");
return ret_val;
}
/* add disk
*/
- if ((u->new_level != 5) ||
- (migr_map->raid_level != 0) ||
- (migr_map->raid_level == map->raid_level))
+ if (u->new_level != 5 || migr_map->raid_level != 0 ||
+ migr_map->raid_level == map->raid_level)
goto skip_disk_add;
if (u->new_disks[0] >= 0) {
int ret_val = 0;
dprintf("(enter)\n");
- if ((u->subdev < 0) ||
- (u->subdev > 1)) {
+ if (u->subdev < 0 || u->subdev > 1) {
dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
return ret_val;
}
dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
major(u->new_disks[i]), minor(u->new_disks[i]),
new_disk, new_disk->index);
- if ((new_disk == NULL) ||
- ((new_disk->index >= 0) &&
- (new_disk->index < u->old_raid_disks)))
+ if (new_disk == NULL ||
+ (new_disk->index >= 0 &&
+ new_disk->index < u->old_raid_disks))
goto update_reshape_exit;
new_disk->index = disk_count++;
/* slot to fill in autolayout
current_level = map->raid_level;
break;
}
- if ((u->new_level == 5) && (u->new_level != current_level)) {
+ if (u->new_level == 5 && u->new_level != current_level) {
struct mdinfo *spares;
spares = get_spares_for_grow(st);
continue;
}
- if ((sd->disk.raid_disk >= raid_disks) ||
- (sd->disk.raid_disk < 0))
+ if (sd->disk.raid_disk >= raid_disks || sd->disk.raid_disk < 0)
continue;
dn = map_dev(sd->disk.major,
/* check if maximum array degradation level is not exceeded
*/
if ((raid_disks - opened) >
- imsm_get_allowed_degradation(info->new_level,
- raid_disks,
- super, dev)) {
+ imsm_get_allowed_degradation(info->new_level, raid_disks,
+ super, dev)) {
pr_err("Not enough disks can be opened.\n");
close_targets(raid_fds, raid_disks);
return -2;
return 1;
}
- if ((orom != orom2) || ((hba->type == SYS_DEV_VMD) && (hba != hba2))) {
+ if (orom != orom2 ||
+ (hba->type == SYS_DEV_VMD && hba != hba2)) {
pr_err("WARNING - IMSM container assembled with disks under different HBAs!\n"
" This operation is not supported and can lead to data loss.\n");
return 1;
break;
}
- if ((info->array.level != 0) &&
- (info->array.level != 5)) {
+ if (info->array.level != 0 && info->array.level != 5) {
/* we cannot use this container with other raid level
*/
dprintf("imsm: for container operation wrong raid level (%i) detected\n",
previous_level = map->raid_level;
}
}
- if ((geo->level == 5) && (previous_level == 0)) {
+ if (geo->level == 5 && previous_level == 0) {
struct mdinfo *spares = NULL;
u->new_raid_disks++;
spares = get_spares_for_grow(st);
- if ((spares == NULL) || (spares->array.spare_disks < 1)) {
+ if (spares == NULL || spares->array.spare_disks < 1) {
free(u);
sysfs_free(spares);
update_memory_size = 0;
int rv;
getinfo_super_imsm_volume(st, &info, NULL);
- if ((geo->level != info.array.level) &&
- (geo->level >= 0) &&
- (geo->level != UnSet)) {
+ if (geo->level != info.array.level && geo->level >= 0 &&
+ geo->level != UnSet) {
switch (info.array.level) {
case 0:
if (geo->level == 5) {
} else
geo->level = info.array.level;
- if ((geo->layout != info.array.layout)
- && ((geo->layout != UnSet) && (geo->layout != -1))) {
+ if (geo->layout != info.array.layout &&
+ (geo->layout != UnSet && geo->layout != -1)) {
change = CH_MIGRATION;
- if ((info.array.layout == 0)
- && (info.array.level == 5)
- && (geo->layout == 5)) {
+ if (info.array.layout == 0 && info.array.level == 5 &&
+ geo->layout == 5) {
/* reshape 5 -> 4 */
- } else if ((info.array.layout == 5)
- && (info.array.level == 5)
- && (geo->layout == 0)) {
+ } else if (info.array.layout == 5 && info.array.level == 5 &&
+ geo->layout == 0) {
/* reshape 4 -> 5 */
geo->layout = 0;
geo->level = 5;
imsm_layout = info.array.layout;
}
- if ((geo->chunksize > 0) && (geo->chunksize != UnSet)
- && (geo->chunksize != info.array.chunk_size))
+ if (geo->chunksize > 0 && geo->chunksize != UnSet &&
+ geo->chunksize != info.array.chunk_size) {
+ if (info.array.level == 10) {
+ pr_err("Error. Chunk size change for RAID 10 is not supported.\n");
+ change = -1;
+ goto analyse_change_exit;
+ }
change = CH_MIGRATION;
- else
+ } else {
geo->chunksize = info.array.chunk_size;
+ }
chunk = geo->chunksize / 1024;
*/
current_size = info.custom_array_size / data_disks;
- if ((geo->size > 0) && (geo->size != MAX_SIZE)) {
+ if (geo->size > 0 && geo->size != MAX_SIZE) {
/* align component size
*/
geo->size = imsm_component_size_aligment_check(
}
}
- if ((current_size != geo->size) && (geo->size > 0)) {
+ if (current_size != geo->size && geo->size > 0) {
if (change != -1) {
pr_err("Error. Size change should be the only one at a time.\n");
change = -1;
geo->size = max_size;
}
- if ((direction == ROLLBACK_METADATA_CHANGES)) {
+ if (direction == ROLLBACK_METADATA_CHANGES) {
/* accept size for rollback only
*/
} else {
}
analyse_change_exit:
- if ((direction == ROLLBACK_METADATA_CHANGES) &&
- ((change == CH_MIGRATION) || (change == CH_TAKEOVER))) {
+ if (direction == ROLLBACK_METADATA_CHANGES &&
+ (change == CH_MIGRATION || change == CH_TAKEOVER)) {
dprintf("imsm: Metadata changes rollback is not supported for migration and takeover operations.\n");
change = -1;
}
int rv;
rv = sysfs_get_ll(info, NULL, "degraded", &new_degraded);
- if ((rv == -1) || (new_degraded != (unsigned long long)degraded)) {
+ if (rv == -1 || (new_degraded != (unsigned long long)degraded)) {
/* check each device to ensure it is still working */
struct mdinfo *sd;
new_degraded = 0;
}
/* Only one volume can migrate at the same time */
if (migr_vol_qan != 1) {
- pr_err(": %s", migr_vol_qan ?
+ pr_err("%s", migr_vol_qan ?
"Number of migrating volumes greater than 1\n" :
"There is no volume during migrationg\n");
goto abort;