#define MPB_ATTRIB_IGNORED (MPB_ATTRIB_NEVER_USE)
#define MPB_SECTOR_CNT 2210
-#define IMSM_RESERVED_SECTORS 4096
-#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2056
+#define IMSM_RESERVED_SECTORS 8192
+#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2048
#define SECT_PER_MB_SHIFT 11
#define MAX_SECTOR_SIZE 4096
#define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
return size;
}
+static int able_to_resync(int raid_level, int missing_disks)
+{
+ int max_missing_disks = 0;
+
+ switch (raid_level) {
+ case 10:
+ max_missing_disks = 1;
+ break;
+ default:
+ max_missing_disks = 0;
+ }
+ return missing_disks <= max_missing_disks;
+}
+
/* try to determine how much space is reserved for metadata from
* the last get_extents() entry on the smallest active disk,
* otherwise fallback to the default
printf(" Platform : Intel(R) ");
if (orom->capabilities == 0 && orom->driver_features == 0)
printf("Matrix Storage Manager\n");
+ else if (imsm_orom_is_enterprise(orom) && orom->major_ver >= 6)
+ printf("Virtual RAID on CPU\n");
else
printf("Rapid Storage Technology%s\n",
imsm_orom_is_enterprise(orom) ? " enterprise" : "");
} else if (super->hba->type == SYS_DEV_VMD && super->orom &&
!imsm_orom_has_tpv_support(super->orom)) {
pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
- "\tPlease refer to Intel(R) RSTe user guide.\n");
+ "\tPlease refer to Intel(R) RSTe/VROC user guide.\n");
free(dd->devname);
free(dd);
return 1;
__u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
+static int write_ppl_header(unsigned long long ppl_sector, int fd, void *buf)
+{
+ struct ppl_header *ppl_hdr = buf;
+ int ret;
+
+ ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
+
+ if (lseek64(fd, ppl_sector * 512, SEEK_SET) < 0) {
+ ret = -errno;
+ perror("Failed to seek to PPL header location");
+ return ret;
+ }
+
+ if (write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+ ret = -errno;
+ perror("Write PPL header failed");
+ return ret;
+ }
+
+ fsync(fd);
+
+ return 0;
+}
+
static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd)
{
struct intel_super *super = st->sb;
ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
if (ret) {
pr_err("Failed to allocate PPL header buffer\n");
- return ret;
+ return -ret;
}
memset(buf, 0, PPL_HEADER_SIZE);
ppl_hdr->entries[0].checksum = ~0;
}
- ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
-
- if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
- ret = errno;
- perror("Failed to seek to PPL header location");
- }
-
- if (!ret && write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
- ret = errno;
- perror("Write PPL header failed");
- }
-
- if (!ret)
- fsync(fd);
+ ret = write_ppl_header(info->ppl_sector, fd, buf);
free(buf);
return ret;
}
+static int is_rebuilding(struct imsm_dev *dev);
+
static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
struct mdinfo *disk)
{
struct intel_super *super = st->sb;
struct dl *d;
- void *buf;
+ void *buf_orig, *buf, *buf_prev = NULL;
int ret = 0;
- struct ppl_header *ppl_hdr;
+ struct ppl_header *ppl_hdr = NULL;
__u32 crc;
struct imsm_dev *dev;
__u32 idx;
if (disk->disk.raid_disk < 0)
return 0;
- if (posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE)) {
- pr_err("Failed to allocate PPL header buffer\n");
- return -1;
- }
-
dev = get_imsm_dev(super, info->container_member);
idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_0);
d = get_imsm_dl_disk(super, idx);
if (!d || d->index < 0 || is_failed(&d->disk))
- goto out;
+ return 0;
+
+ if (posix_memalign(&buf_orig, MAX_SECTOR_SIZE, PPL_HEADER_SIZE * 2)) {
+ pr_err("Failed to allocate PPL header buffer\n");
+ return -1;
+ }
+ buf = buf_orig;
ret = 1;
while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
+ void *tmp;
+
dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
SEEK_SET) < 0) {
perror("Failed to seek to PPL header location");
ret = -1;
- goto out;
+ break;
}
if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
perror("Read PPL header failed");
ret = -1;
- goto out;
+ break;
}
ppl_hdr = buf;
if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
dprintf("Wrong PPL header checksum on %s\n",
d->devname);
- goto out;
+ break;
}
if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
/* previous was newest, it was already checked */
- goto out;
+ break;
}
if ((__le32_to_cpu(ppl_hdr->signature) !=
dprintf("Wrong PPL header signature on %s\n",
d->devname);
ret = 1;
- goto out;
+ break;
}
ret = 0;
for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
ppl_offset +=
__le32_to_cpu(ppl_hdr->entries[i].pp_size);
+
+ if (!buf_prev)
+ buf_prev = buf + PPL_HEADER_SIZE;
+ tmp = buf_prev;
+ buf_prev = buf;
+ buf = tmp;
}
-out:
- free(buf);
+ if (buf_prev) {
+ buf = buf_prev;
+ ppl_hdr = buf_prev;
+ }
/*
* Update metadata to use mutliple PPLs area (1MB).
if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
(map->map_state == IMSM_T_STATE_NORMAL &&
!(dev->vol.dirty & RAIDVOL_DIRTY)) ||
- (dev->vol.migr_state == MIGR_REBUILD &&
+ (is_rebuilding(dev) &&
dev->vol.curr_migr_unit == 0 &&
get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_1) != idx))
ret = st->ss->write_init_ppl(st, info, d->fd);
else
info->mismatch_cnt++;
+ } else if (ret == 0 &&
+ ppl_hdr->entries_count == 0 &&
+ is_rebuilding(dev) &&
+ info->resync_start == 0) {
+ /*
+ * The header has no entries - add a single empty entry and
+ * rewrite the header to prevent the kernel from going into
+ * resync after an interrupted rebuild.
+ */
+ ppl_hdr->entries_count = __cpu_to_le32(1);
+ ret = write_ppl_header(info->ppl_sector, d->fd, buf);
}
+ free(buf_orig);
+
return ret;
}
int slot;
int chunk;
char *ep;
+ int level;
if (subarray &&
(i != strtoul(subarray, &ep, 10) || *ep != '\0'))
dev = get_imsm_dev(super, i);
map = get_imsm_map(dev, MAP_0);
map2 = get_imsm_map(dev, MAP_1);
+ level = get_imsm_raid_level(map);
/* do not publish arrays that are in the middle of an
* unsupported migration
chunk = __le16_to_cpu(map->blocks_per_strip) >> 1;
/* mdadm does not support all metadata features- set the bit in all arrays state */
if (!validate_geometry_imsm_orom(super,
- get_imsm_raid_level(map), /* RAID level */
- imsm_level_to_layout(get_imsm_raid_level(map)),
+ level, /* RAID level */
+ imsm_level_to_layout(level),
map->num_members, /* raid disks */
&chunk, join_u32(dev->size_low, dev->size_high),
1 /* verbose */)) {
int idx;
int skip;
__u32 ord;
+ int missing = 0;
skip = 0;
idx = get_imsm_disk_idx(dev, slot, MAP_0);
skip = 1;
if (d && is_failed(&d->disk))
skip = 1;
- if (ord & IMSM_ORD_REBUILD)
+ if (!skip && (ord & IMSM_ORD_REBUILD))
recovery_start = 0;
/*
* if we skip some disks the array will be assmebled degraded;
* reset resync start to avoid a dirty-degraded
* situation when performing the intial sync
- *
- * FIXME handle dirty degraded
*/
- if ((skip || recovery_start == 0) &&
- !(dev->vol.dirty & RAIDVOL_DIRTY))
- this->resync_start = MaxSector;
+ if (skip)
+ missing++;
+
+ if (!(dev->vol.dirty & RAIDVOL_DIRTY)) {
+ if ((!able_to_resync(level, missing) ||
+ recovery_start == 0))
+ this->resync_start = MaxSector;
+ } else {
+ /*
+ * FIXME handle dirty degraded
+ */
+ }
+
if (skip)
continue;