__u32 rec_status; /* Status used to determine how to restart
* migration in case it aborts
* in some fashion */
- __u32 curr_migr_unit; /* 0..numMigrUnits-1 */
+ __u32 curr_migr_unit_lo; /* 0..numMigrUnits-1 */
__u32 family_num; /* Family number of MPB
* containing the RaidDev
* that is migrating */
__u32 dest_depth_per_unit; /* Num member blocks each destMap
* member disk
* advances per unit-of-operation */
- __u32 ckpt_area_pba; /* Pba of first block of ckpt copy area */
- __u32 dest_1st_member_lba; /* First member lba on first
- * stripe of destination */
- __u32 num_migr_units; /* Total num migration units-of-op */
+ __u32 ckpt_area_pba_lo; /* Pba of first block of ckpt copy area */
+ __u32 dest_1st_member_lba_lo; /* First member lba on first
+ * stripe of destination */
+ __u32 num_migr_units_lo; /* Total num migration units-of-op */
__u32 post_migr_vol_cap; /* Size of volume after
* migration completes */
__u32 post_migr_vol_cap_hi; /* Expansion space for LBA64 */
__u32 ckpt_read_disk_num; /* Which member disk in destSubMap[0] the
* migration ckpt record was read from
* (for recovered migrations) */
+ __u32 curr_migr_unit_hi; /* 0..numMigrUnits-1 high order 32 bits */
+ __u32 ckpt_area_pba_hi; /* Pba of first block of ckpt copy area
+ * high order 32 bits */
+ __u32 dest_1st_member_lba_hi; /* First member lba on first stripe of
+ * destination - high order 32 bits */
+ __u32 num_migr_units_hi; /* Total num migration units-of-op
+ * high order 32 bits */
} __attribute__ ((__packed__));
struct md_list {
static __u32 imsm_min_reserved_sectors(struct intel_super *super);
-static int split_ull(unsigned long long n, __u32 *lo, __u32 *hi)
+static int split_ull(unsigned long long n, void *lo, void *hi)
{
if (lo == 0 || hi == 0)
return 1;
- *lo = __le32_to_cpu((unsigned)n);
- *hi = __le32_to_cpu((unsigned)(n >> 32));
+ __put_unaligned32(__cpu_to_le32((__u32)n), lo);
+ __put_unaligned32(__cpu_to_le32((n >> 32)), hi);
return 0;
}
return join_u32(dev->size_low, dev->size_high);
}
+static unsigned long long migr_chkp_area_pba(struct migr_record *migr_rec)
+{
+ if (migr_rec == NULL)
+ return 0;
+ return join_u32(migr_rec->ckpt_area_pba_lo,
+ migr_rec->ckpt_area_pba_hi);
+}
+
+static unsigned long long current_migr_unit(struct migr_record *migr_rec)
+{
+ if (migr_rec == NULL)
+ return 0;
+ return join_u32(migr_rec->curr_migr_unit_lo,
+ migr_rec->curr_migr_unit_hi);
+}
+
+static unsigned long long migr_dest_1st_member_lba(struct migr_record *migr_rec)
+{
+ if (migr_rec == NULL)
+ return 0;
+ return join_u32(migr_rec->dest_1st_member_lba_lo,
+ migr_rec->dest_1st_member_lba_hi);
+}
+
+static unsigned long long get_num_migr_units(struct migr_record *migr_rec)
+{
+ if (migr_rec == NULL)
+ return 0;
+ return join_u32(migr_rec->num_migr_units_lo,
+ migr_rec->num_migr_units_hi);
+}
+
static void set_total_blocks(struct imsm_disk *disk, unsigned long long n)
{
split_ull(n, &disk->total_blocks_lo, &disk->total_blocks_hi);
split_ull(n, &dev->size_low, &dev->size_high);
}
-static struct extent *get_extents(struct intel_super *super, struct dl *dl)
+static void set_migr_chkp_area_pba(struct migr_record *migr_rec,
+ unsigned long long n)
+{
+ split_ull(n, &migr_rec->ckpt_area_pba_lo, &migr_rec->ckpt_area_pba_hi);
+}
+
+static void set_current_migr_unit(struct migr_record *migr_rec,
+ unsigned long long n)
+{
+ split_ull(n, &migr_rec->curr_migr_unit_lo,
+ &migr_rec->curr_migr_unit_hi);
+}
+
+static void set_migr_dest_1st_member_lba(struct migr_record *migr_rec,
+ unsigned long long n)
+{
+ split_ull(n, &migr_rec->dest_1st_member_lba_lo,
+ &migr_rec->dest_1st_member_lba_hi);
+}
+
+static void set_num_migr_units(struct migr_record *migr_rec,
+ unsigned long long n)
+{
+ split_ull(n, &migr_rec->num_migr_units_lo,
+ &migr_rec->num_migr_units_hi);
+}
+
+static unsigned long long per_dev_array_size(struct imsm_map *map)
+{
+ unsigned long long array_size = 0;
+
+ if (map == NULL)
+ return array_size;
+
+ array_size = num_data_stripes(map) * map->blocks_per_strip;
+ if (get_imsm_raid_level(map) == 1 || get_imsm_raid_level(map) == 10)
+ array_size *= 2;
+
+ return array_size;
+}
+
+static struct extent *get_extents(struct intel_super *super, struct dl *dl,
+ int get_minimal_reservation)
{
/* find a list of used extents on the given physical device */
struct extent *rv, *e;
* regardless of whether the OROM has assigned sectors from the
* IMSM_RESERVED_SECTORS region
*/
- if (dl->index == -1)
+ if (dl->index == -1 || get_minimal_reservation)
reservation = imsm_min_reserved_sectors(super);
else
reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
if (get_imsm_disk_slot(map, dl->index) >= 0) {
e->start = pba_of_lba0(map);
- e->size = blocks_per_member(map);
+ e->size = per_dev_array_size(map);
e++;
}
}
if (dl->index == -1)
return MPB_SECTOR_CNT;
- e = get_extents(super, dl);
+ e = get_extents(super, dl, 0);
if (!e)
return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
return rv;
/* find last lba used by subarrays on the smallest active disk */
- e = get_extents(super, dl_min);
+ e = get_extents(super, dl_min, 0);
if (!e)
return rv;
for (i = 0; e[i].size; i++)
if (!dl)
return -EINVAL;
/* find last lba used by subarrays */
- e = get_extents(super, dl);
+ e = get_extents(super, dl, 0);
if (!e)
return -EINVAL;
for (i = 0; e[i].size; i++)
struct migr_record *migr_rec = super->migr_rec;
migr_rec->blocks_per_unit /= IMSM_4K_DIV;
- migr_rec->ckpt_area_pba /= IMSM_4K_DIV;
- migr_rec->dest_1st_member_lba /= IMSM_4K_DIV;
migr_rec->dest_depth_per_unit /= IMSM_4K_DIV;
split_ull((join_u32(migr_rec->post_migr_vol_cap,
migr_rec->post_migr_vol_cap_hi) / IMSM_4K_DIV),
&migr_rec->post_migr_vol_cap, &migr_rec->post_migr_vol_cap_hi);
+ set_migr_chkp_area_pba(migr_rec,
+ migr_chkp_area_pba(migr_rec) / IMSM_4K_DIV);
+ set_migr_dest_1st_member_lba(migr_rec,
+ migr_dest_1st_member_lba(migr_rec) / IMSM_4K_DIV);
}
void convert_to_4k_imsm_disk(struct imsm_disk *disk)
printf("Normal\n");
else
printf("Contains Data\n");
- printf(" Current Unit : %u\n",
- __le32_to_cpu(migr_rec->curr_migr_unit));
+ printf(" Current Unit : %llu\n",
+ current_migr_unit(migr_rec));
printf(" Family : %u\n",
__le32_to_cpu(migr_rec->family_num));
printf(" Ascending : %u\n",
__le32_to_cpu(migr_rec->blocks_per_unit));
printf(" Dest. Depth Per Unit : %u\n",
__le32_to_cpu(migr_rec->dest_depth_per_unit));
- printf(" Checkpoint Area pba : %u\n",
- __le32_to_cpu(migr_rec->ckpt_area_pba));
- printf(" First member lba : %u\n",
- __le32_to_cpu(migr_rec->dest_1st_member_lba));
- printf(" Total Number of Units : %u\n",
- __le32_to_cpu(migr_rec->num_migr_units));
- printf(" Size of volume : %u\n",
- __le32_to_cpu(migr_rec->post_migr_vol_cap));
- printf(" Expansion space for LBA64 : %u\n",
- __le32_to_cpu(migr_rec->post_migr_vol_cap_hi));
+ printf(" Checkpoint Area pba : %llu\n",
+ migr_chkp_area_pba(migr_rec));
+ printf(" First member lba : %llu\n",
+ migr_dest_1st_member_lba(migr_rec));
+ printf(" Total Number of Units : %llu\n",
+ get_num_migr_units(migr_rec));
+ printf(" Size of volume : %llu\n",
+ join_u32(migr_rec->post_migr_vol_cap,
+ migr_rec->post_migr_vol_cap_hi));
printf(" Record was read from : %u\n",
__le32_to_cpu(migr_rec->ckpt_read_disk_num));
struct migr_record *migr_rec = super->migr_rec;
migr_rec->blocks_per_unit *= IMSM_4K_DIV;
- migr_rec->ckpt_area_pba *= IMSM_4K_DIV;
- migr_rec->dest_1st_member_lba *= IMSM_4K_DIV;
migr_rec->dest_depth_per_unit *= IMSM_4K_DIV;
split_ull((join_u32(migr_rec->post_migr_vol_cap,
migr_rec->post_migr_vol_cap_hi) * IMSM_4K_DIV),
&migr_rec->post_migr_vol_cap,
&migr_rec->post_migr_vol_cap_hi);
+ set_migr_chkp_area_pba(migr_rec,
+ migr_chkp_area_pba(migr_rec) * IMSM_4K_DIV);
+ set_migr_dest_1st_member_lba(migr_rec,
+ migr_dest_1st_member_lba(migr_rec) * IMSM_4K_DIV);
}
void convert_from_4k(struct intel_super *super)
strncpy(str, (char *)mpb->sig, MPB_SIG_LEN);
str[MPB_SIG_LEN-1] = '\0';
printf(" Magic : %s\n", str);
- snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
printf(" Version : %s\n", get_imsm_version(mpb));
printf(" Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
}
}
+static unsigned long long calc_component_size(struct imsm_map *map,
+ struct imsm_dev *dev)
+{
+ unsigned long long component_size;
+ unsigned long long dev_size = imsm_dev_size(dev);
+ long long calc_dev_size = 0;
+ unsigned int member_disks = imsm_num_data_members(map);
+
+ if (member_disks == 0)
+ return 0;
+
+ component_size = per_dev_array_size(map);
+ calc_dev_size = component_size * member_disks;
+
+ /* Component size is rounded to 1MB so difference between size from
+ * metadata and size calculated from num_data_stripes equals up to
+ * 2048 blocks per each device. If the difference is higher it means
+ * that array size was expanded and num_data_stripes was not updated.
+ */
+ if (llabs(calc_dev_size - (long long)dev_size) >
+ (1 << SECT_PER_MB_SHIFT) * member_disks) {
+ component_size = dev_size / member_disks;
+ dprintf("Invalid num_data_stripes in metadata; expected=%llu, found=%llu\n",
+ component_size / map->blocks_per_strip,
+ num_data_stripes(map));
+ }
+
+ return component_size;
+}
+
static __u32 parity_segment_depth(struct imsm_dev *dev)
{
struct imsm_map *map = get_imsm_map(dev, MAP_0);
return 0;
}
(*u)->type = update_general_migration_checkpoint;
- (*u)->curr_migr_unit = __le32_to_cpu(super->migr_rec->curr_migr_unit);
+ (*u)->curr_migr_unit = current_migr_unit(super->migr_rec);
dprintf("prepared for %u\n", (*u)->curr_migr_unit);
return update_memory_size;
}
return rv;
}
-static unsigned long long imsm_component_size_aligment_check(int level,
+static unsigned long long imsm_component_size_alignment_check(int level,
int chunk_size,
unsigned int sector_size,
unsigned long long component_size)
{
- unsigned int component_size_alligment;
+ unsigned int component_size_alignment;
- /* check component size aligment
+ /* check component size alignment
*/
- component_size_alligment = component_size % (chunk_size/sector_size);
+ component_size_alignment = component_size % (chunk_size/sector_size);
- dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alligment = %u\n",
+ dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alignment = %u\n",
level, chunk_size, component_size,
- component_size_alligment);
+ component_size_alignment);
- if (component_size_alligment && (level != 1) && (level != UnSet)) {
- dprintf("imsm: reported component size alligned from %llu ",
+ if (component_size_alignment && (level != 1) && (level != UnSet)) {
+ dprintf("imsm: reported component size aligned from %llu ",
component_size);
- component_size -= component_size_alligment;
+ component_size -= component_size_alignment;
dprintf_cont("to %llu (%i).\n",
- component_size, component_size_alligment);
+ component_size, component_size_alignment);
}
return component_size;
}
info->data_offset = pba_of_lba0(map_to_analyse);
-
- if (info->array.level == 5) {
- info->component_size = num_data_stripes(map_to_analyse) *
- map_to_analyse->blocks_per_strip;
- } else {
- info->component_size = blocks_per_member(map_to_analyse);
- }
-
- info->component_size = imsm_component_size_aligment_check(
+ info->component_size = calc_component_size(map, dev);
+ info->component_size = imsm_component_size_alignment_check(
info->array.level,
info->array.chunk_size,
super->sector_size,
case MIGR_GEN_MIGR: {
__u64 blocks_per_unit = blocks_per_migr_unit(super,
dev);
- __u64 units = __le32_to_cpu(migr_rec->curr_migr_unit);
+ __u64 units = current_migr_unit(migr_rec);
unsigned long long array_blocks;
int used_disks;
if (__le32_to_cpu(migr_rec->ascending_migr) &&
(units <
- (__le32_to_cpu(migr_rec->num_migr_units)-1)) &&
+ (get_num_migr_units(migr_rec)-1)) &&
(super->migr_rec->rec_status ==
__cpu_to_le32(UNIT_SRC_IN_CP_AREA)))
units++;
used_disks = imsm_num_data_members(prev_map);
if (used_disks > 0) {
- array_blocks = blocks_per_member(map) *
+ array_blocks = per_dev_array_size(map) *
used_disks;
info->custom_array_size =
round_size_to_mb(array_blocks,
struct sys_dev *hba_name;
int rv = 0;
+ if (fd >= 0 && test_partition(fd)) {
+ pr_err("imsm: %s is a partition, cannot be used in IMSM\n",
+ devname);
+ return 1;
+ }
if (fd < 0 || check_env("IMSM_NO_PLATFORM")) {
super->orom = NULL;
super->hba = NULL;
{
struct imsm_super *mpb = super->anchor;
char *reason = NULL;
+ char *start = name;
+ size_t len = strlen(name);
int i;
- if (strlen(name) > MAX_RAID_SERIAL_LEN)
+ if (len > 0) {
+ while (isspace(start[len - 1]))
+ start[--len] = 0;
+ while (*start && isspace(*start))
+ ++start, --len;
+ memmove(name, start, len + 1);
+ }
+
+ if (len > MAX_RAID_SERIAL_LEN)
reason = "must be 16 characters or less";
+ else if (len == 0)
+ reason = "must be a non-empty string";
for (i = 0; i < mpb->num_raid_devs; i++) {
struct imsm_dev *dev = get_imsm_dev(super, i);
struct imsm_map *map;
int idx = mpb->num_raid_devs;
int i;
+ int namelen;
unsigned long long array_blocks;
size_t size_old, size_new;
unsigned long long num_data_stripes;
return 0;
dv = xmalloc(sizeof(*dv));
dev = xcalloc(1, sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
- strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
+ /*
+ * Explicitly allow truncating to not confuse gcc's
+ * -Werror=stringop-truncation
+ */
+ namelen = min((int) strlen(name), MAX_RAID_SERIAL_LEN);
+ memcpy(dev->volume, name, namelen);
array_blocks = calc_array_size(info->level, info->raid_disks,
info->layout, info->chunk_size,
s->size * BLOCKS_PER_KB);
vol->curr_migr_unit = 0;
map = get_imsm_map(dev, MAP_0);
set_pba_of_lba0(map, super->create_offset);
- set_blocks_per_member(map, info_to_blocks_per_member(info,
- size_per_member /
- BLOCKS_PER_KB));
map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
map->failed_disk_num = ~0;
if (info->level > 0)
num_data_stripes /= map->num_domains;
set_num_data_stripes(map, num_data_stripes);
+ size_per_member += NUM_BLOCKS_DIRTY_STRIPE_REGION;
+ set_blocks_per_member(map, info_to_blocks_per_member(info,
+ size_per_member /
+ BLOCKS_PER_KB));
+
map->num_members = info->raid_disks;
for (i = 0; i < map->num_members; i++) {
/* initialized in add_to_super */
return 1;
}
+ if (mpb->num_disks == 0)
+ if (!get_dev_sector_size(dl->fd, dl->devname,
+ &super->sector_size))
+ return 1;
+
if (!drive_validate_sector_size(super, dl)) {
pr_err("Combining drives of different sector size in one volume is not allowed\n");
return 1;
mpb = super->anchor;
if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
- pr_err("RAID gemetry validation failed. Cannot proceed with the action(s).\n");
+ pr_err("RAID geometry validation failed. Cannot proceed with the action(s).\n");
return 0;
}
if (!dev) {
pos = 0;
i = 0;
- e = get_extents(super, dl);
+ e = get_extents(super, dl, 0);
if (!e) continue;
do {
unsigned long long esize;
}
/* retrieve the largest free space block */
- e = get_extents(super, dl);
+ e = get_extents(super, dl, 0);
maxsize = 0;
i = 0;
if (e) {
if (super->orom && dl->index < 0 && mpb->num_raid_devs)
continue;
- e = get_extents(super, dl);
+ e = get_extents(super, dl, 0);
if (!e)
continue;
for (i = 1; e[i-1].size; i++)
verbose);
}
+ if (size && (size < 1024)) {
+ pr_err("Given size must be greater than 1M.\n");
+ /* Depends on algorithm in Create.c :
+ * if container was given (dev == NULL) return -1,
+ * if block device was given ( dev != NULL) return 0.
+ */
+ return dev ? -1 : 0;
+ }
+
if (!dev) {
if (st->sb) {
struct intel_super *super = st->sb;
append_metadata_update(st, u, sizeof(*u));
} else {
struct imsm_dev *dev;
- int i;
+ int i, namelen;
dev = get_imsm_dev(super, vol);
- strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
- dev->volume[MAX_RAID_SERIAL_LEN-1] = '\0';
+ memset(dev->volume, '\0', MAX_RAID_SERIAL_LEN);
+ namelen = min((int)strlen(name), MAX_RAID_SERIAL_LEN);
+ memcpy(dev->volume, name, namelen);
for (i = 0; i < mpb->num_raid_devs; i++) {
dev = get_imsm_dev(super, i);
handle_missing(super, dev);
info_d->events = __le32_to_cpu(mpb->generation_num);
info_d->data_offset = pba_of_lba0(map);
+ info_d->component_size = calc_component_size(map, dev);
if (map->raid_level == 5) {
- info_d->component_size =
- num_data_stripes(map) *
- map->blocks_per_strip;
info_d->ppl_sector = this->ppl_sector;
info_d->ppl_size = this->ppl_size;
if (this->consistency_policy == CONSISTENCY_POLICY_PPL &&
recovery_start == 0)
this->resync_start = 0;
- } else {
- info_d->component_size = blocks_per_member(map);
}
info_d->bb.supported = 1;
strcat(buf, ":0");
if ((len = strlen(buf)) >= MAX_RAID_SERIAL_LEN)
shift = len - MAX_RAID_SERIAL_LEN + 1;
- strncpy((char *)disk->serial, &buf[shift], MAX_RAID_SERIAL_LEN);
+ memcpy(disk->serial, &buf[shift], len + 1 - shift);
disk->status |= FAILED_DISK;
set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
set_imsm_ord_tbl_ent(map2, slot2,
idx | IMSM_ORD_REBUILD);
}
- if (map->failed_disk_num == 0xff)
+ if (map->failed_disk_num == 0xff ||
+ (!is_rebuilding(dev) && map->failed_disk_num > slot))
map->failed_disk_num = slot;
clear_disk_badblocks(super->bbm_log, ord_to_idx(ord));
if (new_size <= 0)
/* OLCE size change is caused by added disks
*/
- array_blocks = blocks_per_member(map) * used_disks;
+ array_blocks = per_dev_array_size(map) * used_disks;
else
/* Online Volume Size Change
* Using available free space
used_disks = imsm_num_data_members(map);
if (used_disks > 0) {
array_blocks =
- blocks_per_member(map) *
+ per_dev_array_size(map) *
used_disks;
array_blocks =
round_size_to_mb(array_blocks,
disk = get_imsm_disk(super, ord_to_idx(ord));
/* check for new failures */
- if (state & DS_FAULTY) {
+ if (disk && (state & DS_FAULTY)) {
if (mark_failure(super, dev, disk, ord_to_idx(ord)))
super->updates_pending++;
}
break;
}
if (is_rebuilding(dev)) {
- dprintf_cont("while rebuilding.");
- if (map->map_state != map_state) {
- dprintf_cont(" Map state change");
- end_migration(dev, super, map_state);
+ dprintf_cont("while rebuilding ");
+ if (state & DS_FAULTY) {
+ dprintf_cont("removing failed drive ");
+ if (n == map->failed_disk_num) {
+ dprintf_cont("end migration");
+ end_migration(dev, super, map_state);
+ a->last_checkpoint = 0;
+ } else {
+ dprintf_cont("fail detected during rebuild, changing map state");
+ map->map_state = map_state;
+ }
super->updates_pending++;
- } else if (!rebuild_done) {
- break;
}
+ if (!rebuild_done)
+ break;
+
/* check if recovery is really finished */
for (mdi = a->info.devs; mdi ; mdi = mdi->next)
if (mdi->recovery_start != MaxSector) {
}
if (recovery_not_finished) {
dprintf_cont("\n");
- dprintf("Rebuild has not finished yet, state not changed");
+ dprintf_cont("Rebuild has not finished yet");
if (a->last_checkpoint < mdi->recovery_start) {
a->last_checkpoint =
mdi->recovery_start;
}
dprintf_cont(" Rebuild done, still degraded");
- dev->vol.migr_state = 0;
- set_migr_type(dev, 0);
- dev->vol.curr_migr_unit = 0;
+ end_migration(dev, super, map_state);
+ a->last_checkpoint = 0;
+ super->updates_pending++;
for (i = 0; i < map->num_members; i++) {
int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
/* Does this unused device have the requisite free space?
* It needs to be able to cover all member volumes
*/
- ex = get_extents(super, dl);
+ ex = get_extents(super, dl, 1);
if (!ex) {
dprintf("cannot get extents\n");
continue;
pos = 0;
array_start = pba_of_lba0(map);
array_end = array_start +
- blocks_per_member(map) - 1;
+ per_dev_array_size(map) - 1;
do {
/* check that we can start at pba_of_lba0 with
- * blocks_per_member of space
+ * num_data_stripes*blocks_per_stripe of space
*/
if (array_start >= pos && array_end < ex[j].start) {
found = 1;
set_num_data_stripes(map, num_data_stripes);
}
+ /* ensure blocks_per_member has valid value
+ */
+ set_blocks_per_member(map,
+ per_dev_array_size(map) +
+ NUM_BLOCKS_DIRTY_STRIPE_REGION);
+
/* add disk
*/
if (u->new_level != 5 || migr_map->raid_level != 0 ||
int used_disks = imsm_num_data_members(map);
unsigned long long blocks_per_member;
unsigned long long num_data_stripes;
+ unsigned long long new_size_per_disk;
+
+ if (used_disks == 0)
+ return 0;
/* calculate new size
*/
- blocks_per_member = u->new_size / used_disks;
- num_data_stripes = blocks_per_member /
+ new_size_per_disk = u->new_size / used_disks;
+ blocks_per_member = new_size_per_disk +
+ NUM_BLOCKS_DIRTY_STRIPE_REGION;
+ num_data_stripes = new_size_per_disk /
map->blocks_per_strip;
num_data_stripes /= map->num_domains;
dprintf("(size: %llu, blocks per member: %llu, num_data_stipes: %llu)\n",
- u->new_size, blocks_per_member,
+ u->new_size, new_size_per_disk,
num_data_stripes);
set_blocks_per_member(map, blocks_per_member);
set_num_data_stripes(map, num_data_stripes);
if (u->direction == R10_TO_R0) {
unsigned long long num_data_stripes;
- map->num_domains = 1;
- num_data_stripes = blocks_per_member(map);
- num_data_stripes /= map->blocks_per_strip;
- num_data_stripes /= map->num_domains;
- set_num_data_stripes(map, num_data_stripes);
-
/* Number of failed disks must be half of initial disk number */
if (imsm_count_failed(super, dev, MAP_0) !=
(map->num_members / 2))
map->num_domains = 1;
map->raid_level = 0;
map->failed_disk_num = -1;
+ num_data_stripes = imsm_dev_size(dev) / 2;
+ num_data_stripes /= map->blocks_per_strip;
+ set_num_data_stripes(map, num_data_stripes);
}
if (u->direction == R0_TO_R10) {
void **space;
+ unsigned long long num_data_stripes;
+
/* update slots in current disk list */
for (dm = super->disks; dm; dm = dm->next) {
if (dm->index >= 0)
map->map_state = IMSM_T_STATE_DEGRADED;
map->num_domains = 2;
map->raid_level = 1;
+ num_data_stripes = imsm_dev_size(dev) / 2;
+ num_data_stripes /= map->blocks_per_strip;
+ num_data_stripes /= map->num_domains;
+ set_num_data_stripes(map, num_data_stripes);
+
/* replace dev<->dev_new */
dv->dev = dev_new;
}
new_map = get_imsm_map(&u->dev, MAP_0);
new_start = pba_of_lba0(new_map);
- new_end = new_start + blocks_per_member(new_map);
+ new_end = new_start + per_dev_array_size(new_map);
inf = get_disk_info(u);
/* handle activate_spare versus create race:
dev = get_imsm_dev(super, i);
map = get_imsm_map(dev, MAP_0);
start = pba_of_lba0(map);
- end = start + blocks_per_member(map);
+ end = start + per_dev_array_size(map);
if ((new_start >= start && new_start <= end) ||
(start >= new_start && start <= new_end))
/* overlap */;
/* sanity check that we are not affecting the uuid of
* an active array
*/
+ memset(name, 0, sizeof(name));
snprintf(name, MAX_RAID_SERIAL_LEN, "%s", (char *) u->name);
name[MAX_RAID_SERIAL_LEN] = '\0';
for (a = st->arrays; a; a = a->next)
break;
}
- snprintf((char *) dev->volume, MAX_RAID_SERIAL_LEN, "%s", name);
+ memcpy(dev->volume, name, MAX_RAID_SERIAL_LEN);
super->updates_pending++;
break;
}
break;
}
default:
- pr_err("error: unsuported process update type:(type: %d)\n", type);
+ pr_err("error: unsupported process update type:(type: %d)\n", type);
}
}
return NULL;
get_volume_badblocks(super->bbm_log, ord_to_idx(ord), pba_of_lba0(map),
- blocks_per_member(map), &super->bb);
+ per_dev_array_size(map), &super->bb);
return &super->bb;
}
if (array_blocks % __le32_to_cpu(migr_rec->blocks_per_unit))
num_migr_units++;
- migr_rec->num_migr_units = __cpu_to_le32(num_migr_units);
+ set_num_migr_units(migr_rec, num_migr_units);
migr_rec->post_migr_vol_cap = dev->size_low;
migr_rec->post_migr_vol_cap_hi = dev->size_high;
min_dev_sectors = dev_sectors;
close(fd);
}
- migr_rec->ckpt_area_pba = __cpu_to_le32(min_dev_sectors -
+ set_migr_chkp_area_pba(migr_rec, min_dev_sectors -
RAID_DISK_RESERVED_BLOCKS_IMSM_HI);
write_imsm_migr_rec(st);
start = info->reshape_progress * 512;
for (i = 0; i < new_disks; i++) {
- target_offsets[i] = (unsigned long long)
- __le32_to_cpu(super->migr_rec->ckpt_area_pba) * 512;
+ target_offsets[i] = migr_chkp_area_pba(super->migr_rec) * 512;
/* move back copy area adderss, it will be moved forward
* in restore_stripes() using start input variable
*/
if (info->reshape_progress % blocks_per_unit)
curr_migr_unit++;
- super->migr_rec->curr_migr_unit =
- __cpu_to_le32(curr_migr_unit);
+ set_current_migr_unit(super->migr_rec, curr_migr_unit);
super->migr_rec->rec_status = __cpu_to_le32(state);
- super->migr_rec->dest_1st_member_lba =
- __cpu_to_le32(curr_migr_unit *
- __le32_to_cpu(super->migr_rec->dest_depth_per_unit));
+ set_migr_dest_1st_member_lba(super->migr_rec,
+ super->migr_rec->dest_depth_per_unit * curr_migr_unit);
+
if (write_imsm_migr_rec(st) < 0) {
dprintf("imsm: Cannot write migration record outside backup area\n");
return 1;
char *buf = NULL;
int retval = 1;
unsigned int sector_size = super->sector_size;
- unsigned long curr_migr_unit = __le32_to_cpu(migr_rec->curr_migr_unit);
- unsigned long num_migr_units = __le32_to_cpu(migr_rec->num_migr_units);
+ unsigned long curr_migr_unit = current_migr_unit(migr_rec);
+ unsigned long num_migr_units = get_num_migr_units(migr_rec);
char buffer[20];
int skipped_disks = 0;
map_dest = get_imsm_map(id->dev, MAP_0);
new_disks = map_dest->num_members;
- read_offset = (unsigned long long)
- __le32_to_cpu(migr_rec->ckpt_area_pba) * 512;
+ read_offset = migr_chkp_area_pba(migr_rec) * 512;
- write_offset = ((unsigned long long)
- __le32_to_cpu(migr_rec->dest_1st_member_lba) +
+ write_offset = (migr_dest_1st_member_lba(migr_rec) +
pba_of_lba0(map_dest)) * 512;
unit_len = __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
if (geo->size > 0 && geo->size != MAX_SIZE) {
/* align component size
*/
- geo->size = imsm_component_size_aligment_check(
+ geo->size = imsm_component_size_alignment_check(
get_imsm_raid_level(dev->vol.map),
chunk * 1024, super->sector_size,
geo->size * 2);
max_size = free_size + current_size;
/* align component size
*/
- max_size = imsm_component_size_aligment_check(
+ max_size = imsm_component_size_alignment_check(
get_imsm_raid_level(dev->vol.map),
chunk * 1024, super->sector_size,
max_size);
dprintf("for level : %i\n", geo.level);
dprintf("for raid_disks : %i\n", geo.raid_disks);
- if (experimental() == 0)
- return ret_val;
-
if (strcmp(st->container_devnm, st->devnm) == 0) {
/* On container level we can only increase number of devices. */
dprintf("imsm: info: Container operation\n");
buf_size = __le32_to_cpu(migr_rec->blocks_per_unit) * 512;
/* extend buffer size for parity disk */
buf_size += __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
- /* add space for stripe aligment */
+ /* add space for stripe alignment */
buf_size += old_data_stripe_length;
if (posix_memalign((void **)&buf, MAX_SECTOR_SIZE, buf_size)) {
dprintf("imsm: Cannot allocate checkpoint buffer\n");
max_position = sra->component_size * ndata;
source_layout = imsm_level_to_layout(map_src->raid_level);
- while (__le32_to_cpu(migr_rec->curr_migr_unit) <
- __le32_to_cpu(migr_rec->num_migr_units)) {
+ while (current_migr_unit(migr_rec) <
+ get_num_migr_units(migr_rec)) {
/* current reshape position [blocks] */
unsigned long long current_position =
__le32_to_cpu(migr_rec->blocks_per_unit)
- * __le32_to_cpu(migr_rec->curr_migr_unit);
+ * current_migr_unit(migr_rec);
unsigned long long border;
/* Check that array hasn't become failed.