#define MPB_ATTRIB_IGNORED (MPB_ATTRIB_NEVER_USE)
#define MPB_SECTOR_CNT 2210
-#define IMSM_RESERVED_SECTORS 4096
-#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2056
+#define IMSM_RESERVED_SECTORS 8192
+#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2048
#define SECT_PER_MB_SHIFT 11
#define MAX_SECTOR_SIZE 4096
+#define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
+ * mutliple PPL area
+ */
/* Disk configuration info. */
#define IMSM_MAX_DEVICES 255
#define RWH_OFF 0
#define RWH_DISTRIBUTED 1
#define RWH_JOURNALING_DRIVE 2
+#define RWH_MULTIPLE_DISTRIBUTED 3
+#define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
+#define RWH_MULTIPLE_OFF 5
__u8 rwh_policy; /* Raid Write Hole Policy */
__u8 jd_serial[MAX_RAID_SERIAL_LEN]; /* Journal Drive serial number */
__u8 filler1;
__u32 orig_family_num; /* 0x40 - 0x43 original family num */
__u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
__u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
-#define IMSM_FILLERS 35
- __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
+ __u16 num_raid_devs_created; /* 0x4C - 0x4D Used for generating unique
+ * volume IDs for raid_dev created in this array
+ * (starts at 1)
+ */
+ __u16 filler1; /* 0x4E - 0x4F */
+#define IMSM_FILLERS 34
+ __u32 filler[IMSM_FILLERS]; /* 0x50 - 0xD7 RAID_MPB_FILLERS */
struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
/* here comes imsm_dev[num_raid_devs] */
/* here comes BBM logs */
* already been migrated and must
* be recovered from checkpoint area */
-#define PPL_ENTRY_SPACE (128 * 1024) /* Size of the PPL, without the header */
+#define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
struct migr_record {
__u32 rec_status; /* Status used to determine how to restart
return join_u32(map->num_data_stripes_lo, map->num_data_stripes_hi);
}
+static unsigned long long imsm_dev_size(struct imsm_dev *dev)
+{
+ if (dev == NULL)
+ return 0;
+ return join_u32(dev->size_low, dev->size_high);
+}
+
static void set_total_blocks(struct imsm_disk *disk, unsigned long long n)
{
split_ull(n, &disk->total_blocks_lo, &disk->total_blocks_hi);
split_ull(n, &map->num_data_stripes_lo, &map->num_data_stripes_hi);
}
+static void set_imsm_dev_size(struct imsm_dev *dev, unsigned long long n)
+{
+ split_ull(n, &dev->size_low, &dev->size_high);
+}
+
+static unsigned long long per_dev_array_size(struct imsm_map *map)
+{
+ unsigned long long array_size = 0;
+
+ if (map == NULL)
+ return array_size;
+
+ array_size = num_data_stripes(map) * map->blocks_per_strip;
+ if (get_imsm_raid_level(map) == 1 || get_imsm_raid_level(map) == 10)
+ array_size *= 2;
+
+ return array_size;
+}
+
static struct extent *get_extents(struct intel_super *super, struct dl *dl)
{
/* find a list of used extents on the given physical device */
if (get_imsm_disk_slot(map, dl->index) >= 0) {
e->start = pba_of_lba0(map);
- e->size = blocks_per_member(map);
+ e->size = per_dev_array_size(map);
e++;
}
}
return size;
}
+static int able_to_resync(int raid_level, int missing_disks)
+{
+ int max_missing_disks = 0;
+
+ switch (raid_level) {
+ case 10:
+ max_missing_disks = 1;
+ break;
+ default:
+ max_missing_disks = 0;
+ }
+ return missing_disks <= max_missing_disks;
+}
+
/* try to determine how much space is reserved for metadata from
* the last get_extents() entry on the smallest active disk,
* otherwise fallback to the default
return (remainder < rv) ? remainder : rv;
}
-/* Return minimum size of a spare that can be used in this array*/
-static unsigned long long min_acceptable_spare_size_imsm(struct supertype *st)
+/*
+ * Return minimum size of a spare and sector size
+ * that can be used in this array
+ */
+int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
{
struct intel_super *super = st->sb;
struct dl *dl;
struct extent *e;
int i;
- unsigned long long rv = 0;
+ unsigned long long size = 0;
+
+ c->min_size = 0;
+ c->sector_size = 0;
if (!super)
- return rv;
+ return -EINVAL;
/* find first active disk in array */
dl = super->disks;
while (dl && (is_failed(&dl->disk) || dl->index == -1))
dl = dl->next;
if (!dl)
- return rv;
+ return -EINVAL;
/* find last lba used by subarrays */
e = get_extents(super, dl);
if (!e)
- return rv;
+ return -EINVAL;
for (i = 0; e[i].size; i++)
continue;
if (i > 0)
- rv = e[i-1].start + e[i-1].size;
+ size = e[i-1].start + e[i-1].size;
free(e);
/* add the amount of space needed for metadata */
- rv = rv + imsm_min_reserved_sectors(super);
+ size += imsm_min_reserved_sectors(super);
+
+ c->min_size = size * 512;
+ c->sector_size = super->sector_size;
- return rv * 512;
+ return 0;
}
static int is_gen_migration(struct imsm_dev *dev);
ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
} else
printf(" This Slot : ?\n");
- sz = __le32_to_cpu(dev->size_high);
- sz <<= 32;
- sz += __le32_to_cpu(dev->size_low);
- printf(" Array Size : %llu%s\n", (unsigned long long)sz,
+ printf(" Sector Size : %u\n", super->sector_size);
+ sz = imsm_dev_size(dev);
+ printf(" Array Size : %llu%s\n",
+ (unsigned long long)sz * 512 / super->sector_size,
human_size(sz * 512));
sz = blocks_per_member(map);
- printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
+ printf(" Per Dev Size : %llu%s\n",
+ (unsigned long long)sz * 512 / super->sector_size,
human_size(sz * 512));
printf(" Sector Offset : %llu\n",
pba_of_lba0(map));
printf(" Dirty State : %s\n", (dev->vol.dirty & RAIDVOL_DIRTY) ?
"dirty" : "clean");
printf(" RWH Policy : ");
- if (dev->rwh_policy == RWH_OFF)
+ if (dev->rwh_policy == RWH_OFF || dev->rwh_policy == RWH_MULTIPLE_OFF)
printf("off\n");
else if (dev->rwh_policy == RWH_DISTRIBUTED)
printf("PPL distributed\n");
else if (dev->rwh_policy == RWH_JOURNALING_DRIVE)
printf("PPL journaling drive\n");
+ else if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+ printf("Multiple distributed PPLs\n");
+ else if (dev->rwh_policy == RWH_MULTIPLE_PPLS_JOURNALING_DRIVE)
+ printf("Multiple PPLs on journaling drive\n");
else
printf("<unknown:%d>\n", dev->rwh_policy);
}
struct imsm_dev *dev = __get_imsm_dev(mpb, i);
struct imsm_map *map = get_imsm_map(dev, MAP_0);
/* dev */
- split_ull((join_u32(dev->size_low, dev->size_high)/IMSM_4K_DIV),
- &dev->size_low, &dev->size_high);
+ set_imsm_dev_size(dev, imsm_dev_size(dev)/IMSM_4K_DIV);
dev->vol.curr_migr_unit /= IMSM_4K_DIV;
/* map0 */
struct imsm_dev *dev = __get_imsm_dev(mpb, i);
struct imsm_map *map = get_imsm_map(dev, MAP_0);
/* dev */
- split_ull((join_u32(dev->size_low, dev->size_high)*IMSM_4K_DIV),
- &dev->size_low, &dev->size_high);
+ set_imsm_dev_size(dev, imsm_dev_size(dev)*IMSM_4K_DIV);
dev->vol.curr_migr_unit *= IMSM_4K_DIV;
/* map0 */
strncpy(str, (char *)mpb->sig, MPB_SIG_LEN);
str[MPB_SIG_LEN-1] = '\0';
printf(" Magic : %s\n", str);
- snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
printf(" Version : %s\n", get_imsm_version(mpb));
printf(" Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
printf(" Platform : Intel(R) ");
if (orom->capabilities == 0 && orom->driver_features == 0)
printf("Matrix Storage Manager\n");
+ else if (imsm_orom_is_enterprise(orom) && orom->major_ver >= 6)
+ printf("Virtual RAID on CPU\n");
else
printf("Rapid Storage Technology%s\n",
imsm_orom_is_enterprise(orom) ? " enterprise" : "");
return num_stripes_per_unit_resync(dev);
}
-static __u8 imsm_num_data_members(struct imsm_dev *dev, int second_map)
+static __u8 imsm_num_data_members(struct imsm_map *map)
{
/* named 'imsm_' because raid0, raid1 and raid10
* counter-intuitively have the same number of data disks
*/
- struct imsm_map *map = get_imsm_map(dev, second_map);
-
switch (get_imsm_raid_level(map)) {
case 0:
return map->num_members;
}
}
+static unsigned long long calc_component_size(struct imsm_map *map,
+ struct imsm_dev *dev)
+{
+ unsigned long long component_size;
+ unsigned long long dev_size = imsm_dev_size(dev);
+ unsigned long long calc_dev_size = 0;
+ unsigned int member_disks = imsm_num_data_members(map);
+
+ if (member_disks == 0)
+ return 0;
+
+ component_size = per_dev_array_size(map);
+ calc_dev_size = component_size * member_disks;
+
+ /* Component size is rounded to 1MB so difference between size from
+ * metadata and size calculated from num_data_stripes equals up to
+ * 2048 blocks per each device. If the difference is higher it means
+ * that array size was expanded and num_data_stripes was not updated.
+ */
+ if ((unsigned int)abs(calc_dev_size - dev_size) >
+ (1 << SECT_PER_MB_SHIFT) * member_disks) {
+ component_size = dev_size / member_disks;
+ dprintf("Invalid num_data_stripes in metadata; expected=%llu, found=%llu\n",
+ component_size / map->blocks_per_strip,
+ num_data_stripes(map));
+ }
+
+ return component_size;
+}
+
static __u32 parity_segment_depth(struct imsm_dev *dev)
{
struct imsm_map *map = get_imsm_map(dev, MAP_0);
*/
stripes_per_unit = num_stripes_per_unit_resync(dev);
migr_chunk = migr_strip_blocks_resync(dev);
- disks = imsm_num_data_members(dev, MAP_0);
+ disks = imsm_num_data_members(map);
blocks_per_unit = stripes_per_unit * migr_chunk * disks;
stripe = __le16_to_cpu(map->blocks_per_strip) * disks;
segment = blocks_per_unit / stripe;
}
return rv;
}
-static unsigned long long imsm_component_size_aligment_check(int level,
+static unsigned long long imsm_component_size_alignment_check(int level,
int chunk_size,
unsigned int sector_size,
unsigned long long component_size)
{
- unsigned int component_size_alligment;
+ unsigned int component_size_alignment;
- /* check component size aligment
+ /* check component size alignment
*/
- component_size_alligment = component_size % (chunk_size/sector_size);
+ component_size_alignment = component_size % (chunk_size/sector_size);
- dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alligment = %u\n",
+ dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alignment = %u\n",
level, chunk_size, component_size,
- component_size_alligment);
+ component_size_alignment);
- if (component_size_alligment && (level != 1) && (level != UnSet)) {
- dprintf("imsm: reported component size alligned from %llu ",
+ if (component_size_alignment && (level != 1) && (level != UnSet)) {
+ dprintf("imsm: reported component size aligned from %llu ",
component_size);
- component_size -= component_size_alligment;
+ component_size -= component_size_alignment;
dprintf_cont("to %llu (%i).\n",
- component_size, component_size_alligment);
+ component_size, component_size_alignment);
}
return component_size;
info->array.chunk_size =
__le16_to_cpu(map_to_analyse->blocks_per_strip) << 9;
info->array.state = !(dev->vol.dirty & RAIDVOL_DIRTY);
- info->custom_array_size = __le32_to_cpu(dev->size_high);
- info->custom_array_size <<= 32;
- info->custom_array_size |= __le32_to_cpu(dev->size_low);
+ info->custom_array_size = imsm_dev_size(dev);
info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
if (is_gen_migration(dev)) {
}
info->data_offset = pba_of_lba0(map_to_analyse);
-
- if (info->array.level == 5) {
- info->component_size = num_data_stripes(map_to_analyse) *
- map_to_analyse->blocks_per_strip;
- } else {
- info->component_size = blocks_per_member(map_to_analyse);
- }
-
- info->component_size = imsm_component_size_aligment_check(
+ info->component_size = calc_component_size(map, dev);
+ info->component_size = imsm_component_size_alignment_check(
info->array.level,
info->array.chunk_size,
super->sector_size,
memset(info->uuid, 0, sizeof(info->uuid));
info->recovery_start = MaxSector;
- if (info->array.level == 5 && dev->rwh_policy == RWH_DISTRIBUTED) {
+ if (info->array.level == 5 &&
+ (dev->rwh_policy == RWH_DISTRIBUTED ||
+ dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)) {
info->consistency_policy = CONSISTENCY_POLICY_PPL;
info->ppl_sector = get_ppl_sector(super, super->current_vol);
- info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+ if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+ info->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
+ else
+ info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE)
+ >> 9;
} else if (info->array.level <= 0) {
info->consistency_policy = CONSISTENCY_POLICY_NONE;
} else {
(unsigned long long)blocks_per_unit,
info->reshape_progress);
- used_disks = imsm_num_data_members(dev, MAP_1);
+ used_disks = imsm_num_data_members(prev_map);
if (used_disks > 0) {
- array_blocks = blocks_per_member(map) *
+ array_blocks = per_dev_array_size(map) *
used_disks;
info->custom_array_size =
round_size_to_mb(array_blocks,
__u32 ord = get_imsm_ord_tbl_ent(dev, j, MAP_0);
__u32 idx = ord_to_idx(ord);
+ if (super->disks && super->disks->index == (int)idx)
+ info->disk.raid_disk = j;
+
if (!(ord & IMSM_ORD_REBUILD) &&
get_imsm_missing(super, idx)) {
missing = 1;
/* duplicate and then set the target end state in map[0] */
memcpy(dest, src, sizeof_imsm_map(src));
- if (migr_type == MIGR_REBUILD || migr_type == MIGR_GEN_MIGR) {
+ if (migr_type == MIGR_GEN_MIGR) {
__u32 ord;
int i;
if (pba_of_lba0(map0) != pba_of_lba0(map1))
/* migration optimization area was used */
return -1;
- if (migr_rec->ascending_migr == 0
- && migr_rec->dest_depth_per_unit > 0)
+ if (migr_rec->ascending_migr == 0 &&
+ migr_rec->dest_depth_per_unit > 0)
/* descending reshape not supported yet */
return -1;
}
sectors = mpb_sectors(anchor, sector_size) - 1;
free(anchor);
- if (posix_memalign(&super->migr_rec_buf, sector_size,
- MIGR_REC_BUF_SECTORS*sector_size) != 0) {
+ if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
+ MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE) != 0) {
pr_err("could not allocate migr_rec buffer\n");
free(super->buf);
return 2;
hba = hba->next;
}
fprintf(stderr, ").\n"
- " Mixing devices attached to different %s is not allowed.\n",
- hba_name->type == SYS_DEV_VMD ? "VMD domains" : "controllers");
+ " Mixing devices attached to different controllers is not allowed.\n");
}
return 2;
}
struct imsm_map *map;
int idx = mpb->num_raid_devs;
int i;
+ int namelen;
unsigned long long array_blocks;
size_t size_old, size_new;
unsigned long long num_data_stripes;
pr_err("could not allocate new mpb\n");
return 0;
}
- if (posix_memalign(&super->migr_rec_buf, sector_size,
- MIGR_REC_BUF_SECTORS*sector_size) != 0) {
+ if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
+ MIGR_REC_BUF_SECTORS*
+ MAX_SECTOR_SIZE) != 0) {
pr_err("could not allocate migr_rec buffer\n");
free(super->buf);
free(super);
return 0;
dv = xmalloc(sizeof(*dv));
dev = xcalloc(1, sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
- strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
+ /*
+ * Explicitly allow truncating to not confuse gcc's
+ * -Werror=stringop-truncation
+ */
+ namelen = min((int) strlen(name), MAX_RAID_SERIAL_LEN);
+ memcpy(dev->volume, name, namelen);
array_blocks = calc_array_size(info->level, info->raid_disks,
info->layout, info->chunk_size,
s->size * BLOCKS_PER_KB);
array_blocks = round_size_to_mb(array_blocks, data_disks);
size_per_member = array_blocks / data_disks;
- dev->size_low = __cpu_to_le32((__u32) array_blocks);
- dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
+ set_imsm_dev_size(dev, array_blocks);
dev->status = (DEV_READ_COALESCING | DEV_WRITE_COALESCING);
vol = &dev->vol;
vol->migr_state = 0;
vol->curr_migr_unit = 0;
map = get_imsm_map(dev, MAP_0);
set_pba_of_lba0(map, super->create_offset);
- set_blocks_per_member(map, info_to_blocks_per_member(info,
- size_per_member /
- BLOCKS_PER_KB));
map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
map->failed_disk_num = ~0;
if (info->level > 0)
num_data_stripes /= map->num_domains;
set_num_data_stripes(map, num_data_stripes);
+ size_per_member += NUM_BLOCKS_DIRTY_STRIPE_REGION;
+ set_blocks_per_member(map, info_to_blocks_per_member(info,
+ size_per_member /
+ BLOCKS_PER_KB));
+
map->num_members = info->raid_disks;
for (i = 0; i < map->num_members; i++) {
/* initialized in add_to_super */
set_imsm_ord_tbl_ent(map, i, IMSM_ORD_REBUILD);
}
mpb->num_raid_devs++;
+ mpb->num_raid_devs_created++;
+ dev->my_vol_raid_dev_num = mpb->num_raid_devs_created;
- if (s->consistency_policy == UnSet ||
- s->consistency_policy == CONSISTENCY_POLICY_RESYNC ||
- s->consistency_policy == CONSISTENCY_POLICY_NONE) {
- dev->rwh_policy = RWH_OFF;
+ if (s->consistency_policy <= CONSISTENCY_POLICY_RESYNC) {
+ dev->rwh_policy = RWH_MULTIPLE_OFF;
} else if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
- dev->rwh_policy = RWH_DISTRIBUTED;
+ dev->rwh_policy = RWH_MULTIPLE_DISTRIBUTED;
} else {
free(dev);
free(dv);
return 1;
}
+static int drive_validate_sector_size(struct intel_super *super, struct dl *dl)
+{
+ unsigned int member_sector_size;
+
+ if (dl->fd < 0) {
+ pr_err("Invalid file descriptor for %s\n", dl->devname);
+ return 0;
+ }
+
+ if (!get_dev_sector_size(dl->fd, dl->devname, &member_sector_size))
+ return 0;
+ if (member_sector_size != super->sector_size)
+ return 0;
+ return 1;
+}
+
static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
int fd, char *devname)
{
return 1;
}
+ if (mpb->num_disks == 0)
+ if (!get_dev_sector_size(dl->fd, dl->devname,
+ &super->sector_size))
+ return 1;
+
+ if (!drive_validate_sector_size(super, dl)) {
+ pr_err("Combining drives of different sector size in one volume is not allowed\n");
+ return 1;
+ }
+
/* add a pristine spare to the metadata */
if (dl->index < 0) {
dl->index = super->anchor->num_disks;
} else if (super->hba->type == SYS_DEV_VMD && super->orom &&
!imsm_orom_has_tpv_support(super->orom)) {
pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
- "\tPlease refer to Intel(R) RSTe user guide.\n");
+ "\tPlease refer to Intel(R) RSTe/VROC user guide.\n");
free(dd->devname);
free(dd);
return 1;
if (super->sector_size == 0) {
/* this a first device, so sector_size is not set yet */
super->sector_size = member_sector_size;
- } else if (member_sector_size != super->sector_size) {
- pr_err("Mixing between different sector size is forbidden, aborting...\n");
- if (dd->devname)
- free(dd->devname);
- free(dd);
- return 1;
}
/* clear migr_rec when adding disk to container */
- memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*super->sector_size);
- if (lseek64(fd, size - MIGR_REC_SECTOR_POSITION*super->sector_size,
+ memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
+ if (lseek64(fd, size - MIGR_REC_SECTOR_POSITION*member_sector_size,
SEEK_SET) >= 0) {
if ((unsigned int)write(fd, super->migr_rec_buf,
- MIGR_REC_BUF_SECTORS*super->sector_size) !=
- MIGR_REC_BUF_SECTORS*super->sector_size)
+ MIGR_REC_BUF_SECTORS*member_sector_size) !=
+ MIGR_REC_BUF_SECTORS*member_sector_size)
perror("Write migr_rec failed");
}
}
if (clear_migration_record)
memset(super->migr_rec_buf, 0,
- MIGR_REC_BUF_SECTORS*sector_size);
+ MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
if (sector_size == 4096)
convert_to_4k(super);
__u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
+static int write_ppl_header(unsigned long long ppl_sector, int fd, void *buf)
+{
+ struct ppl_header *ppl_hdr = buf;
+ int ret;
+
+ ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
+
+ if (lseek64(fd, ppl_sector * 512, SEEK_SET) < 0) {
+ ret = -errno;
+ perror("Failed to seek to PPL header location");
+ return ret;
+ }
+
+ if (write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+ ret = -errno;
+ perror("Write PPL header failed");
+ return ret;
+ }
+
+ fsync(fd);
+
+ return 0;
+}
+
static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd)
{
struct intel_super *super = st->sb;
struct ppl_header *ppl_hdr;
int ret;
- ret = posix_memalign(&buf, 4096, PPL_HEADER_SIZE);
+ /* first clear entire ppl space */
+ ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
+ if (ret)
+ return ret;
+
+ ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
if (ret) {
pr_err("Failed to allocate PPL header buffer\n");
- return ret;
+ return -ret;
}
memset(buf, 0, PPL_HEADER_SIZE);
ppl_hdr = buf;
memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
ppl_hdr->signature = __cpu_to_le32(super->anchor->orig_family_num);
- ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
-
- if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
- ret = errno;
- perror("Failed to seek to PPL header location");
- }
- if (!ret && write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
- ret = errno;
- perror("Write PPL header failed");
+ if (info->mismatch_cnt) {
+ /*
+ * We are overwriting an invalid ppl. Make one entry with wrong
+ * checksum to prevent the kernel from skipping resync.
+ */
+ ppl_hdr->entries_count = __cpu_to_le32(1);
+ ppl_hdr->entries[0].checksum = ~0;
}
- if (!ret)
- fsync(fd);
+ ret = write_ppl_header(info->ppl_sector, fd, buf);
free(buf);
return ret;
}
+static int is_rebuilding(struct imsm_dev *dev);
+
static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
struct mdinfo *disk)
{
struct intel_super *super = st->sb;
struct dl *d;
- void *buf;
+ void *buf_orig, *buf, *buf_prev = NULL;
int ret = 0;
- struct ppl_header *ppl_hdr;
+ struct ppl_header *ppl_hdr = NULL;
__u32 crc;
struct imsm_dev *dev;
- struct imsm_map *map;
__u32 idx;
+ unsigned int i;
+ unsigned long long ppl_offset = 0;
+ unsigned long long prev_gen_num = 0;
if (disk->disk.raid_disk < 0)
return 0;
- if (posix_memalign(&buf, 4096, PPL_HEADER_SIZE)) {
+ dev = get_imsm_dev(super, info->container_member);
+ idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_0);
+ d = get_imsm_dl_disk(super, idx);
+
+ if (!d || d->index < 0 || is_failed(&d->disk))
+ return 0;
+
+ if (posix_memalign(&buf_orig, MAX_SECTOR_SIZE, PPL_HEADER_SIZE * 2)) {
pr_err("Failed to allocate PPL header buffer\n");
return -1;
}
+ buf = buf_orig;
- dev = get_imsm_dev(super, info->container_member);
- map = get_imsm_map(dev, MAP_X);
- idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_X);
- d = get_imsm_dl_disk(super, idx);
+ ret = 1;
+ while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
+ void *tmp;
- if (!d || d->index < 0 || is_failed(&d->disk))
- goto out;
+ dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
- if (lseek64(d->fd, info->ppl_sector * 512, SEEK_SET) < 0) {
- perror("Failed to seek to PPL header location");
- ret = -1;
- goto out;
+ if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
+ SEEK_SET) < 0) {
+ perror("Failed to seek to PPL header location");
+ ret = -1;
+ break;
+ }
+
+ if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+ perror("Read PPL header failed");
+ ret = -1;
+ break;
+ }
+
+ ppl_hdr = buf;
+
+ crc = __le32_to_cpu(ppl_hdr->checksum);
+ ppl_hdr->checksum = 0;
+
+ if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
+ dprintf("Wrong PPL header checksum on %s\n",
+ d->devname);
+ break;
+ }
+
+ if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
+ /* previous was newest, it was already checked */
+ break;
+ }
+
+ if ((__le32_to_cpu(ppl_hdr->signature) !=
+ super->anchor->orig_family_num)) {
+ dprintf("Wrong PPL header signature on %s\n",
+ d->devname);
+ ret = 1;
+ break;
+ }
+
+ ret = 0;
+ prev_gen_num = __le64_to_cpu(ppl_hdr->generation);
+
+ ppl_offset += PPL_HEADER_SIZE;
+ for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
+ ppl_offset +=
+ __le32_to_cpu(ppl_hdr->entries[i].pp_size);
+
+ if (!buf_prev)
+ buf_prev = buf + PPL_HEADER_SIZE;
+ tmp = buf_prev;
+ buf_prev = buf;
+ buf = tmp;
}
- if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
- perror("Read PPL header failed");
- ret = -1;
- goto out;
+ if (buf_prev) {
+ buf = buf_prev;
+ ppl_hdr = buf_prev;
}
- ppl_hdr = buf;
+ /*
+ * Update metadata to use mutliple PPLs area (1MB).
+ * This is done once for all RAID members
+ */
+ if (info->consistency_policy == CONSISTENCY_POLICY_PPL &&
+ info->ppl_size != (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9)) {
+ char subarray[20];
+ struct mdinfo *member_dev;
- crc = __le32_to_cpu(ppl_hdr->checksum);
- ppl_hdr->checksum = 0;
+ sprintf(subarray, "%d", info->container_member);
- if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
- dprintf("Wrong PPL header checksum on %s\n",
- d->devname);
- ret = 1;
- }
+ if (mdmon_running(st->container_devnm))
+ st->update_tail = &st->updates;
- if (!ret && (__le32_to_cpu(ppl_hdr->signature) !=
- super->anchor->orig_family_num)) {
- dprintf("Wrong PPL header signature on %s\n",
- d->devname);
- ret = 1;
+ if (st->ss->update_subarray(st, subarray, "ppl", NULL)) {
+ pr_err("Failed to update subarray %s\n",
+ subarray);
+ } else {
+ if (st->update_tail)
+ flush_metadata_updates(st);
+ else
+ st->ss->sync_metadata(st);
+ info->ppl_size = (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+ for (member_dev = info->devs; member_dev;
+ member_dev = member_dev->next)
+ member_dev->ppl_size =
+ (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+ }
}
-out:
- free(buf);
+ if (ret == 1) {
+ struct imsm_map *map = get_imsm_map(dev, MAP_X);
- if (ret == 1 && map->map_state == IMSM_T_STATE_UNINITIALIZED)
- return st->ss->write_init_ppl(st, info, d->fd);
+ if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
+ (map->map_state == IMSM_T_STATE_NORMAL &&
+ !(dev->vol.dirty & RAIDVOL_DIRTY)) ||
+ (is_rebuilding(dev) &&
+ dev->vol.curr_migr_unit == 0 &&
+ get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_1) != idx))
+ ret = st->ss->write_init_ppl(st, info, d->fd);
+ else
+ info->mismatch_cnt++;
+ } else if (ret == 0 &&
+ ppl_hdr->entries_count == 0 &&
+ is_rebuilding(dev) &&
+ info->resync_start == 0) {
+ /*
+ * The header has no entries - add a single empty entry and
+ * rewrite the header to prevent the kernel from going into
+ * resync after an interrupted rebuild.
+ */
+ ppl_hdr->entries_count = __cpu_to_le32(1);
+ ret = write_ppl_header(info->ppl_sector, d->fd, buf);
+ }
+
+ free(buf_orig);
return ret;
}
for (memb = mdstat ; memb ; memb = memb->next) {
if (memb->metadata_version &&
- (strncmp(memb->metadata_version, "external:", 9) == 0) &&
+ (strncmp(memb->metadata_version, "external:", 9) == 0) &&
(strcmp(&memb->metadata_version[9], name) == 0) &&
!is_subarray(memb->metadata_version+9) &&
memb->members) {
for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
char *devname = tmpdev->devname;
- struct stat stb;
+ dev_t rdev;
struct supertype *tst;
int dfd;
if (tmpdev->used > 1)
dprintf("cannot open device %s: %s\n",
devname, strerror(errno));
tmpdev->used = 2;
- } else if (fstat(dfd, &stb)< 0) {
- /* Impossible! */
- dprintf("fstat failed for %s: %s\n",
- devname, strerror(errno));
- tmpdev->used = 2;
- } else if ((stb.st_mode & S_IFMT) != S_IFBLK) {
- dprintf("%s is not a block device.\n",
- devname);
+ } else if (!fstat_is_blkdev(dfd, devname, &rdev)) {
tmpdev->used = 2;
} else if (must_be_container(dfd)) {
struct supertype *cst;
if (cst)
cst->ss->free_super(cst);
} else {
- tmpdev->st_rdev = stb.st_rdev;
+ tmpdev->st_rdev = rdev;
if (tst->ss->load_super(tst,dfd, NULL)) {
dprintf("no RAID superblock on %s\n",
devname);
unsigned long long *freesize,
int verbose)
{
- struct stat stb;
+ dev_t rdev;
struct intel_super *super = st->sb;
struct imsm_super *mpb;
struct dl *dl;
mpb = super->anchor;
if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
- pr_err("RAID gemetry validation failed. Cannot proceed with the action(s).\n");
+ pr_err("RAID geometry validation failed. Cannot proceed with the action(s).\n");
return 0;
}
if (!dev) {
}
/* This device must be a member of the set */
- if (stat(dev, &stb) < 0)
- return 0;
- if ((S_IFMT & stb.st_mode) != S_IFBLK)
+ if (!stat_is_blkdev(dev, &rdev))
return 0;
for (dl = super->disks ; dl ; dl = dl->next) {
- if (dl->major == (int)major(stb.st_rdev) &&
- dl->minor == (int)minor(stb.st_rdev))
+ if (dl->major == (int)major(rdev) &&
+ dl->minor == (int)minor(rdev))
break;
}
if (!dl) {
verbose);
}
+ if (size && ((size < 1024) || (*chunk != UnSet &&
+ size < (unsigned long long) *chunk))) {
+ pr_err("Given size must be greater than 1M and chunk size.\n");
+ /* Depends on algorithm in Create.c :
+ * if container was given (dev == NULL) return -1,
+ * if block device was given ( dev != NULL) return 0.
+ */
+ return dev ? -1 : 0;
+ }
+
if (!dev) {
if (st->sb) {
struct intel_super *super = st->sb;
append_metadata_update(st, u, sizeof(*u));
} else {
struct imsm_dev *dev;
- int i;
+ int i, namelen;
dev = get_imsm_dev(super, vol);
- strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
- dev->volume[MAX_RAID_SERIAL_LEN-1] = '\0';
+ memset(dev->volume, '\0', MAX_RAID_SERIAL_LEN);
+ namelen = min((int)strlen(name), MAX_RAID_SERIAL_LEN);
+ memcpy(dev->volume, name, namelen);
for (i = 0; i < mpb->num_raid_devs; i++) {
dev = get_imsm_dev(super, i);
handle_missing(super, dev);
return 2;
if (strcmp(update, "ppl") == 0)
- new_policy = RWH_DISTRIBUTED;
+ new_policy = RWH_MULTIPLE_DISTRIBUTED;
else
- new_policy = RWH_OFF;
+ new_policy = RWH_MULTIPLE_OFF;
if (st->update_tail) {
struct imsm_update_rwh_policy *u = xmalloc(sizeof(*u));
int slot;
int chunk;
char *ep;
+ int level;
if (subarray &&
(i != strtoul(subarray, &ep, 10) || *ep != '\0'))
dev = get_imsm_dev(super, i);
map = get_imsm_map(dev, MAP_0);
map2 = get_imsm_map(dev, MAP_1);
+ level = get_imsm_raid_level(map);
/* do not publish arrays that are in the middle of an
* unsupported migration
chunk = __le16_to_cpu(map->blocks_per_strip) >> 1;
/* mdadm does not support all metadata features- set the bit in all arrays state */
if (!validate_geometry_imsm_orom(super,
- get_imsm_raid_level(map), /* RAID level */
- imsm_level_to_layout(get_imsm_raid_level(map)),
+ level, /* RAID level */
+ imsm_level_to_layout(level),
map->num_members, /* raid disks */
- &chunk, join_u32(dev->size_low, dev->size_high),
+ &chunk, imsm_dev_size(dev),
1 /* verbose */)) {
pr_err("IMSM RAID geometry validation failed. Array %s activation is blocked.\n",
dev->volume);
int idx;
int skip;
__u32 ord;
+ int missing = 0;
skip = 0;
idx = get_imsm_disk_idx(dev, slot, MAP_0);
skip = 1;
if (d && is_failed(&d->disk))
skip = 1;
- if (ord & IMSM_ORD_REBUILD)
+ if (!skip && (ord & IMSM_ORD_REBUILD))
recovery_start = 0;
/*
* if we skip some disks the array will be assmebled degraded;
* reset resync start to avoid a dirty-degraded
* situation when performing the intial sync
- *
- * FIXME handle dirty degraded
*/
- if ((skip || recovery_start == 0) &&
- !(dev->vol.dirty & RAIDVOL_DIRTY))
- this->resync_start = MaxSector;
+ if (skip)
+ missing++;
+
+ if (!(dev->vol.dirty & RAIDVOL_DIRTY)) {
+ if ((!able_to_resync(level, missing) ||
+ recovery_start == 0))
+ this->resync_start = MaxSector;
+ } else {
+ /*
+ * FIXME handle dirty degraded
+ */
+ }
+
if (skip)
continue;
info_d->events = __le32_to_cpu(mpb->generation_num);
info_d->data_offset = pba_of_lba0(map);
+ info_d->component_size = calc_component_size(map, dev);
if (map->raid_level == 5) {
- info_d->component_size =
- num_data_stripes(map) *
- map->blocks_per_strip;
info_d->ppl_sector = this->ppl_sector;
info_d->ppl_size = this->ppl_size;
- } else {
- info_d->component_size = blocks_per_member(map);
+ if (this->consistency_policy == CONSISTENCY_POLICY_PPL &&
+ recovery_start == 0)
+ this->resync_start = 0;
}
- info_d->consistency_policy = this->consistency_policy;
info_d->bb.supported = 1;
get_volume_badblocks(super->bbm_log, ord_to_idx(ord),
strcat(buf, ":0");
if ((len = strlen(buf)) >= MAX_RAID_SERIAL_LEN)
shift = len - MAX_RAID_SERIAL_LEN + 1;
- strncpy((char *)disk->serial, &buf[shift], MAX_RAID_SERIAL_LEN);
+ memcpy(disk->serial, &buf[shift], len + 1 - shift);
disk->status |= FAILED_DISK;
set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
/* end process for initialization and rebuild only
*/
if (is_gen_migration(dev) == 0) {
- __u8 map_state;
- int failed;
+ int failed = imsm_count_failed(super, dev, MAP_0);
- failed = imsm_count_failed(super, dev, MAP_0);
- map_state = imsm_check_degraded(super, dev, failed, MAP_0);
+ if (failed) {
+ __u8 map_state;
+ struct imsm_map *map = get_imsm_map(dev, MAP_0);
+ struct imsm_map *map1;
+ int i, ord, ord_map1;
+ int rebuilt = 1;
- if (failed)
- end_migration(dev, super, map_state);
+ for (i = 0; i < map->num_members; i++) {
+ ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
+ if (!(ord & IMSM_ORD_REBUILD))
+ continue;
+
+ map1 = get_imsm_map(dev, MAP_1);
+ if (!map1)
+ continue;
+
+ ord_map1 = __le32_to_cpu(map1->disk_ord_tbl[i]);
+ if (ord_map1 & IMSM_ORD_REBUILD)
+ rebuilt = 0;
+ }
+
+ if (rebuilt) {
+ map_state = imsm_check_degraded(super, dev,
+ failed, MAP_0);
+ end_migration(dev, super, map_state);
+ }
+ }
}
for (dl = super->missing; dl; dl = dl->next)
mark_missing(super, dev, &dl->disk, dl->index);
static unsigned long long imsm_set_array_size(struct imsm_dev *dev,
long long new_size)
{
- int used_disks = imsm_num_data_members(dev, MAP_0);
unsigned long long array_blocks;
- struct imsm_map *map;
+ struct imsm_map *map = get_imsm_map(dev, MAP_0);
+ int used_disks = imsm_num_data_members(map);
if (used_disks == 0) {
/* when problems occures
* return current array_blocks value
*/
- array_blocks = __le32_to_cpu(dev->size_high);
- array_blocks = array_blocks << 32;
- array_blocks += __le32_to_cpu(dev->size_low);
+ array_blocks = imsm_dev_size(dev);
return array_blocks;
}
/* set array size in metadata
*/
- if (new_size <= 0) {
+ if (new_size <= 0)
/* OLCE size change is caused by added disks
*/
- map = get_imsm_map(dev, MAP_0);
- array_blocks = blocks_per_member(map) * used_disks;
- } else {
+ array_blocks = per_dev_array_size(map) * used_disks;
+ else
/* Online Volume Size Change
* Using available free space
*/
array_blocks = new_size;
- }
array_blocks = round_size_to_mb(array_blocks, used_disks);
- dev->size_low = __cpu_to_le32((__u32)array_blocks);
- dev->size_high = __cpu_to_le32((__u32)(array_blocks >> 32));
+ set_imsm_dev_size(dev, array_blocks);
return array_blocks;
}
int used_disks;
struct mdinfo *mdi;
- used_disks = imsm_num_data_members(dev, MAP_0);
+ used_disks = imsm_num_data_members(map);
if (used_disks > 0) {
array_blocks =
- blocks_per_member(map) *
+ per_dev_array_size(map) *
used_disks;
array_blocks =
round_size_to_mb(array_blocks,
dev->vol.dirty = RAIDVOL_CLEAN;
} else {
dev->vol.dirty = RAIDVOL_DIRTY;
- if (dev->rwh_policy == RWH_DISTRIBUTED)
+ if (dev->rwh_policy == RWH_DISTRIBUTED ||
+ dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
dev->vol.dirty |= RAIDVOL_DSRECORD_VALID;
}
super->updates_pending++;
int failed;
int ord;
__u8 map_state;
+ int rebuild_done = 0;
+ int i;
- ord = imsm_disk_slot_to_ord(a, n);
+ ord = get_imsm_ord_tbl_ent(dev, n, MAP_X);
if (ord < 0)
return;
struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
+ rebuild_done = 1;
super->updates_pending++;
}
dprintf_cont(" Map state change");
end_migration(dev, super, map_state);
super->updates_pending++;
+ } else if (!rebuild_done) {
+ break;
+ }
+
+ /* check if recovery is really finished */
+ for (mdi = a->info.devs; mdi ; mdi = mdi->next)
+ if (mdi->recovery_start != MaxSector) {
+ recovery_not_finished = 1;
+ break;
+ }
+ if (recovery_not_finished) {
+ dprintf_cont("\n");
+ dprintf("Rebuild has not finished yet, state not changed");
+ if (a->last_checkpoint < mdi->recovery_start) {
+ a->last_checkpoint =
+ mdi->recovery_start;
+ super->updates_pending++;
+ }
+ break;
}
+
+ dprintf_cont(" Rebuild done, still degraded");
+ dev->vol.migr_state = 0;
+ set_migr_type(dev, 0);
+ dev->vol.curr_migr_unit = 0;
+
+ for (i = 0; i < map->num_members; i++) {
+ int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
+
+ if (idx & IMSM_ORD_REBUILD)
+ map->failed_disk_num = i;
+ }
+ super->updates_pending++;
break;
}
if (is_gen_migration(dev)) {
if (dl->index == -1 && !activate_new)
continue;
+ if (!drive_validate_sector_size(super, dl))
+ continue;
+
/* Does this unused device have the requisite free space?
* It needs to be able to cover all member volumes
*/
pos = 0;
array_start = pba_of_lba0(map);
array_end = array_start +
- blocks_per_member(map) - 1;
+ per_dev_array_size(map) - 1;
do {
/* check that we can start at pba_of_lba0 with
- * blocks_per_member of space
+ * num_data_stripes*blocks_per_stripe of space
*/
if (array_start >= pos && array_end < ex[j].start) {
found = 1;
di->component_size = a->info.component_size;
di->container_member = inst;
di->bb.supported = 1;
- if (dev->rwh_policy == RWH_DISTRIBUTED) {
- di->consistency_policy = CONSISTENCY_POLICY_PPL;
+ if (a->info.consistency_policy == CONSISTENCY_POLICY_PPL) {
di->ppl_sector = get_ppl_sector(super, inst);
- di->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+ di->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
}
super->random = random32();
di->next = rv;
*/
if (u->new_chunksize > 0) {
unsigned long long num_data_stripes;
+ struct imsm_map *dest_map =
+ get_imsm_map(dev, MAP_0);
int used_disks =
- imsm_num_data_members(dev, MAP_0);
+ imsm_num_data_members(dest_map);
if (used_disks == 0)
return ret_val;
map->blocks_per_strip =
__cpu_to_le16(u->new_chunksize * 2);
num_data_stripes =
- (join_u32(dev->size_low, dev->size_high)
- / used_disks);
+ imsm_dev_size(dev) / used_disks;
num_data_stripes /= map->blocks_per_strip;
num_data_stripes /= map->num_domains;
set_num_data_stripes(map, num_data_stripes);
}
+ /* ensure blocks_per_member has valid value
+ */
+ set_blocks_per_member(map,
+ per_dev_array_size(map) +
+ NUM_BLOCKS_DIRTY_STRIPE_REGION);
+
/* add disk
*/
if (u->new_level != 5 || migr_map->raid_level != 0 ||
if (id->index == (unsigned)u->subdev) {
struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
struct imsm_map *map = get_imsm_map(dev, MAP_0);
- int used_disks = imsm_num_data_members(dev, MAP_0);
+ int used_disks = imsm_num_data_members(map);
unsigned long long blocks_per_member;
unsigned long long num_data_stripes;
+ unsigned long long new_size_per_disk;
+
+ if (used_disks == 0)
+ return 0;
/* calculate new size
*/
- blocks_per_member = u->new_size / used_disks;
- num_data_stripes = blocks_per_member /
+ new_size_per_disk = u->new_size / used_disks;
+ blocks_per_member = new_size_per_disk +
+ NUM_BLOCKS_DIRTY_STRIPE_REGION;
+ num_data_stripes = new_size_per_disk /
map->blocks_per_strip;
num_data_stripes /= map->num_domains;
dprintf("(size: %llu, blocks per member: %llu, num_data_stipes: %llu)\n",
- u->new_size, blocks_per_member,
+ u->new_size, new_size_per_disk,
num_data_stripes);
set_blocks_per_member(map, blocks_per_member);
set_num_data_stripes(map, num_data_stripes);
unsigned long long num_data_stripes;
map->num_domains = 1;
- num_data_stripes = blocks_per_member(map);
+ num_data_stripes = imsm_dev_size(dev) / 2;
num_data_stripes /= map->blocks_per_strip;
num_data_stripes /= map->num_domains;
set_num_data_stripes(map, num_data_stripes);
new_map = get_imsm_map(&u->dev, MAP_0);
new_start = pba_of_lba0(new_map);
- new_end = new_start + blocks_per_member(new_map);
+ new_end = new_start + per_dev_array_size(new_map);
inf = get_disk_info(u);
/* handle activate_spare versus create race:
dev = get_imsm_dev(super, i);
map = get_imsm_map(dev, MAP_0);
start = pba_of_lba0(map);
- end = start + blocks_per_member(map);
+ end = start + per_dev_array_size(map);
if ((new_start >= start && new_start <= end) ||
(start >= new_start && start <= new_end))
/* overlap */;
/* sanity check that we are not affecting the uuid of
* an active array
*/
+ memset(name, 0, sizeof(name));
snprintf(name, MAX_RAID_SERIAL_LEN, "%s", (char *) u->name);
name[MAX_RAID_SERIAL_LEN] = '\0';
for (a = st->arrays; a; a = a->next)
break;
}
- snprintf((char *) dev->volume, MAX_RAID_SERIAL_LEN, "%s", name);
+ memcpy(dev->volume, name, MAX_RAID_SERIAL_LEN);
super->updates_pending++;
break;
}
struct imsm_dev *dev;
struct imsm_map *map;
unsigned int i, j, num_members;
- __u32 ord;
+ __u32 ord, ord_map0;
struct bbm_log *log = super->bbm_log;
dprintf("deleting device[%d] from imsm_super\n", index);
* ord-flags to the first map
*/
ord = get_imsm_ord_tbl_ent(dev, j, MAP_X);
+ ord_map0 = get_imsm_ord_tbl_ent(dev, j, MAP_0);
if (ord_to_idx(ord) <= index)
continue;
map = get_imsm_map(dev, MAP_0);
- set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1));
+ set_imsm_ord_tbl_ent(map, j, ord_map0 - 1);
map = get_imsm_map(dev, MAP_1);
if (map)
set_imsm_ord_tbl_ent(map, j, ord - 1);
return NULL;
get_volume_badblocks(super->bbm_log, ord_to_idx(ord), pba_of_lba0(map),
- blocks_per_member(map), &super->bb);
+ per_dev_array_size(map), &super->bb);
return &super->bb;
}
max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
migr_rec->dest_depth_per_unit *=
max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
- new_data_disks = imsm_num_data_members(dev, MAP_0);
+ new_data_disks = imsm_num_data_members(map_dest);
migr_rec->blocks_per_unit =
__cpu_to_le32(migr_rec->dest_depth_per_unit * new_data_disks);
migr_rec->dest_depth_per_unit =
int dest_layout = 0;
int dest_chunk;
unsigned long long start;
- int data_disks = imsm_num_data_members(dev, MAP_0);
+ int data_disks = imsm_num_data_members(map_dest);
targets = xmalloc(new_disks * sizeof(int));
drv = "isci";
else if (hba && hba->type == SYS_DEV_SATA)
drv = "ahci";
+ else if (hba && hba->type == SYS_DEV_VMD)
+ drv = "vmd";
+ else if (hba && hba->type == SYS_DEV_NVME)
+ drv = "nvme";
else
drv = "unknown";
dprintf("path: %s hba: %s attached: %s\n",
*/
static struct mdinfo *get_spares_for_grow(struct supertype *st)
{
- unsigned long long min_size = min_acceptable_spare_size_imsm(st);
- return container_choose_spares(st, min_size, NULL, NULL, NULL, 0);
+ struct spare_criteria sc;
+
+ get_spare_criteria_imsm(st, &sc);
+ return container_choose_spares(st, &sc, NULL, NULL, NULL, 0);
}
/******************************************************************************
*/
spares = get_spares_for_grow(st);
- if (spares == NULL
- || delta_disks > spares->array.spare_disks) {
+ if (spares == NULL || delta_disks > spares->array.spare_disks) {
pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo->dev_name);
i = -1;
goto abort;
int imsm_layout = -1;
int data_disks;
struct imsm_dev *dev;
+ struct imsm_map *map;
struct intel_super *super;
unsigned long long current_size;
unsigned long long free_size;
super = st->sb;
dev = get_imsm_dev(super, super->current_vol);
- data_disks = imsm_num_data_members(dev , MAP_0);
+ map = get_imsm_map(dev, MAP_0);
+ data_disks = imsm_num_data_members(map);
/* compute current size per disk member
*/
current_size = info.custom_array_size / data_disks;
if (geo->size > 0 && geo->size != MAX_SIZE) {
/* align component size
*/
- geo->size = imsm_component_size_aligment_check(
+ geo->size = imsm_component_size_alignment_check(
get_imsm_raid_level(dev->vol.map),
chunk * 1024, super->sector_size,
geo->size * 2);
max_size = free_size + current_size;
/* align component size
*/
- max_size = imsm_component_size_aligment_check(
+ max_size = imsm_component_size_alignment_check(
get_imsm_raid_level(dev->vol.map),
chunk * 1024, super->sector_size,
max_size);
struct intel_dev *dv;
unsigned int sector_size = super->sector_size;
struct imsm_dev *dev = NULL;
- struct imsm_map *map_src;
+ struct imsm_map *map_src, *map_dest;
int migr_vol_qan = 0;
int ndata, odata; /* [bytes] */
int chunk; /* [bytes] */
/* Find volume during the reshape */
for (dv = super->devlist; dv; dv = dv->next) {
- if (dv->dev->vol.migr_type == MIGR_GEN_MIGR
- && dv->dev->vol.migr_state == 1) {
+ if (dv->dev->vol.migr_type == MIGR_GEN_MIGR &&
+ dv->dev->vol.migr_state == 1) {
dev = dv->dev;
migr_vol_qan++;
}
goto abort;
}
+ map_dest = get_imsm_map(dev, MAP_0);
map_src = get_imsm_map(dev, MAP_1);
if (map_src == NULL)
goto abort;
- ndata = imsm_num_data_members(dev, MAP_0);
- odata = imsm_num_data_members(dev, MAP_1);
+ ndata = imsm_num_data_members(map_dest);
+ odata = imsm_num_data_members(map_src);
chunk = __le16_to_cpu(map_src->blocks_per_strip) * 512;
old_data_stripe_length = odata * chunk;
buf_size = __le32_to_cpu(migr_rec->blocks_per_unit) * 512;
/* extend buffer size for parity disk */
buf_size += __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
- /* add space for stripe aligment */
+ /* add space for stripe alignment */
buf_size += old_data_stripe_length;
if (posix_memalign((void **)&buf, MAX_SECTOR_SIZE, buf_size)) {
dprintf("imsm: Cannot allocate checkpoint buffer\n");
/* clear migr_rec on disks after successful migration */
struct dl *d;
- memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*sector_size);
+ memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
for (d = super->disks; d; d = d->next) {
if (d->index < 0 || is_failed(&d->disk))
continue;
.update_super = update_super_imsm,
.avail_size = avail_size_imsm,
- .min_acceptable_spare_size = min_acceptable_spare_size_imsm,
+ .get_spare_criteria = get_spare_criteria_imsm,
.compare_super = compare_super_imsm,