]> git.ipfire.org Git - thirdparty/mdadm.git/blobdiff - super-intel.c
mdmon: fix wrong array state when disk fails during mdmon startup
[thirdparty/mdadm.git] / super-intel.c
index bbe7bc71c8da22aeda29222d5e61875a5e19046a..4fd5e84d0bc785e8cca476be2b8083000e0b0367 100644 (file)
 #define MPB_ATTRIB_IGNORED             (MPB_ATTRIB_NEVER_USE)
 
 #define MPB_SECTOR_CNT 2210
-#define IMSM_RESERVED_SECTORS 4096
-#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2056
+#define IMSM_RESERVED_SECTORS 8192
+#define NUM_BLOCKS_DIRTY_STRIPE_REGION 2048
 #define SECT_PER_MB_SHIFT 11
 #define MAX_SECTOR_SIZE 4096
+#define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
+                                                  * mutliple PPL area
+                                                  */
 
 /* Disk configuration info. */
 #define IMSM_MAX_DEVICES 255
@@ -207,6 +210,9 @@ struct imsm_dev {
 #define RWH_OFF 0
 #define RWH_DISTRIBUTED 1
 #define RWH_JOURNALING_DRIVE 2
+#define RWH_MULTIPLE_DISTRIBUTED 3
+#define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
+#define RWH_MULTIPLE_OFF 5
        __u8  rwh_policy; /* Raid Write Hole Policy */
        __u8  jd_serial[MAX_RAID_SERIAL_LEN]; /* Journal Drive serial number */
        __u8  filler1;
@@ -284,13 +290,13 @@ static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed"
                                 *  already been migrated and must
                                 *  be recovered from checkpoint area */
 
-#define PPL_ENTRY_SPACE (128 * 1024) /* Size of the PPL, without the header */
+#define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
 
 struct migr_record {
        __u32 rec_status;           /* Status used to determine how to restart
                                     * migration in case it aborts
                                     * in some fashion */
-       __u32 curr_migr_unit;       /* 0..numMigrUnits-1 */
+       __u32 curr_migr_unit_lo;    /* 0..numMigrUnits-1 */
        __u32 family_num;           /* Family number of MPB
                                     * containing the RaidDev
                                     * that is migrating */
@@ -300,16 +306,23 @@ struct migr_record {
        __u32 dest_depth_per_unit;  /* Num member blocks each destMap
                                     * member disk
                                     * advances per unit-of-operation */
-       __u32 ckpt_area_pba;        /* Pba of first block of ckpt copy area */
-       __u32 dest_1st_member_lba;  /* First member lba on first
-                                    * stripe of destination */
-       __u32 num_migr_units;       /* Total num migration units-of-op */
+       __u32 ckpt_area_pba_lo;     /* Pba of first block of ckpt copy area */
+       __u32 dest_1st_member_lba_lo;   /* First member lba on first
+                                        * stripe of destination */
+       __u32 num_migr_units_lo;    /* Total num migration units-of-op */
        __u32 post_migr_vol_cap;    /* Size of volume after
                                     * migration completes */
        __u32 post_migr_vol_cap_hi; /* Expansion space for LBA64 */
        __u32 ckpt_read_disk_num;   /* Which member disk in destSubMap[0] the
                                     * migration ckpt record was read from
                                     * (for recovered migrations) */
+       __u32 curr_migr_unit_hi;    /* 0..numMigrUnits-1 high order 32 bits */
+       __u32 ckpt_area_pba_hi;     /* Pba of first block of ckpt copy area
+                                    * high order 32 bits */
+       __u32 dest_1st_member_lba_hi; /* First member lba on first stripe of
+                                      * destination - high order 32 bits */
+       __u32 num_migr_units_hi;      /* Total num migration units-of-op
+                                      * high order 32 bits */
 } __attribute__ ((__packed__));
 
 struct md_list {
@@ -1195,6 +1208,45 @@ static unsigned long long num_data_stripes(struct imsm_map *map)
        return join_u32(map->num_data_stripes_lo, map->num_data_stripes_hi);
 }
 
+static unsigned long long imsm_dev_size(struct imsm_dev *dev)
+{
+       if (dev == NULL)
+               return 0;
+       return join_u32(dev->size_low, dev->size_high);
+}
+
+static unsigned long long migr_chkp_area_pba(struct migr_record *migr_rec)
+{
+       if (migr_rec == NULL)
+               return 0;
+       return join_u32(migr_rec->ckpt_area_pba_lo,
+                       migr_rec->ckpt_area_pba_hi);
+}
+
+static unsigned long long current_migr_unit(struct migr_record *migr_rec)
+{
+       if (migr_rec == NULL)
+               return 0;
+       return join_u32(migr_rec->curr_migr_unit_lo,
+                       migr_rec->curr_migr_unit_hi);
+}
+
+static unsigned long long migr_dest_1st_member_lba(struct migr_record *migr_rec)
+{
+       if (migr_rec == NULL)
+               return 0;
+       return join_u32(migr_rec->dest_1st_member_lba_lo,
+                       migr_rec->dest_1st_member_lba_hi);
+}
+
+static unsigned long long get_num_migr_units(struct migr_record *migr_rec)
+{
+       if (migr_rec == NULL)
+               return 0;
+       return join_u32(migr_rec->num_migr_units_lo,
+                       migr_rec->num_migr_units_hi);
+}
+
 static void set_total_blocks(struct imsm_disk *disk, unsigned long long n)
 {
        split_ull(n, &disk->total_blocks_lo, &disk->total_blocks_hi);
@@ -1215,7 +1267,54 @@ static void set_num_data_stripes(struct imsm_map *map, unsigned long long n)
        split_ull(n, &map->num_data_stripes_lo, &map->num_data_stripes_hi);
 }
 
-static struct extent *get_extents(struct intel_super *super, struct dl *dl)
+static void set_imsm_dev_size(struct imsm_dev *dev, unsigned long long n)
+{
+       split_ull(n, &dev->size_low, &dev->size_high);
+}
+
+static void set_migr_chkp_area_pba(struct migr_record *migr_rec,
+                                  unsigned long long n)
+{
+       split_ull(n, &migr_rec->ckpt_area_pba_lo, &migr_rec->ckpt_area_pba_hi);
+}
+
+static void set_current_migr_unit(struct migr_record *migr_rec,
+                                 unsigned long long n)
+{
+       split_ull(n, &migr_rec->curr_migr_unit_lo,
+                 &migr_rec->curr_migr_unit_hi);
+}
+
+static void set_migr_dest_1st_member_lba(struct migr_record *migr_rec,
+                                        unsigned long long n)
+{
+       split_ull(n, &migr_rec->dest_1st_member_lba_lo,
+                 &migr_rec->dest_1st_member_lba_hi);
+}
+
+static void set_num_migr_units(struct migr_record *migr_rec,
+                              unsigned long long n)
+{
+       split_ull(n, &migr_rec->num_migr_units_lo,
+                 &migr_rec->num_migr_units_hi);
+}
+
+static unsigned long long per_dev_array_size(struct imsm_map *map)
+{
+       unsigned long long array_size = 0;
+
+       if (map == NULL)
+               return array_size;
+
+       array_size = num_data_stripes(map) * map->blocks_per_strip;
+       if (get_imsm_raid_level(map) == 1 || get_imsm_raid_level(map) == 10)
+               array_size *= 2;
+
+       return array_size;
+}
+
+static struct extent *get_extents(struct intel_super *super, struct dl *dl,
+                                 int get_minimal_reservation)
 {
        /* find a list of used extents on the given physical device */
        struct extent *rv, *e;
@@ -1227,7 +1326,7 @@ static struct extent *get_extents(struct intel_super *super, struct dl *dl)
         * regardless of whether the OROM has assigned sectors from the
         * IMSM_RESERVED_SECTORS region
         */
-       if (dl->index == -1)
+       if (dl->index == -1 || get_minimal_reservation)
                reservation = imsm_min_reserved_sectors(super);
        else
                reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
@@ -1241,7 +1340,7 @@ static struct extent *get_extents(struct intel_super *super, struct dl *dl)
 
                if (get_imsm_disk_slot(map, dl->index) >= 0) {
                        e->start = pba_of_lba0(map);
-                       e->size = blocks_per_member(map);
+                       e->size = per_dev_array_size(map);
                        e++;
                }
        }
@@ -1288,7 +1387,7 @@ static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl)
        if (dl->index == -1)
                return MPB_SECTOR_CNT;
 
-       e = get_extents(super, dl);
+       e = get_extents(super, dl, 0);
        if (!e)
                return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
 
@@ -1336,6 +1435,20 @@ static unsigned long long round_size_to_mb(unsigned long long size, unsigned int
        return size;
 }
 
+static int able_to_resync(int raid_level, int missing_disks)
+{
+       int max_missing_disks = 0;
+
+       switch (raid_level) {
+       case 10:
+               max_missing_disks = 1;
+               break;
+       default:
+               max_missing_disks = 0;
+       }
+       return missing_disks <= max_missing_disks;
+}
+
 /* try to determine how much space is reserved for metadata from
  * the last get_extents() entry on the smallest active disk,
  * otherwise fallback to the default
@@ -1366,7 +1479,7 @@ static __u32 imsm_min_reserved_sectors(struct intel_super *super)
                return rv;
 
        /* find last lba used by subarrays on the smallest active disk */
-       e = get_extents(super, dl_min);
+       e = get_extents(super, dl_min, 0);
        if (!e)
                return rv;
        for (i = 0; e[i].size; i++)
@@ -1407,7 +1520,7 @@ int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
        if (!dl)
                return -EINVAL;
        /* find last lba used by subarrays */
-       e = get_extents(super, dl);
+       e = get_extents(super, dl, 0);
        if (!e)
                return -EINVAL;
        for (i = 0; e[i].size; i++)
@@ -1483,9 +1596,7 @@ static void print_imsm_dev(struct intel_super *super,
        } else
                printf("      This Slot : ?\n");
        printf("    Sector Size : %u\n", super->sector_size);
-       sz = __le32_to_cpu(dev->size_high);
-       sz <<= 32;
-       sz += __le32_to_cpu(dev->size_low);
+       sz = imsm_dev_size(dev);
        printf("     Array Size : %llu%s\n",
                   (unsigned long long)sz * 512 / super->sector_size,
               human_size(sz * 512));
@@ -1539,12 +1650,16 @@ static void print_imsm_dev(struct intel_super *super,
        printf("    Dirty State : %s\n", (dev->vol.dirty & RAIDVOL_DIRTY) ?
                                         "dirty" : "clean");
        printf("     RWH Policy : ");
-       if (dev->rwh_policy == RWH_OFF)
+       if (dev->rwh_policy == RWH_OFF || dev->rwh_policy == RWH_MULTIPLE_OFF)
                printf("off\n");
        else if (dev->rwh_policy == RWH_DISTRIBUTED)
                printf("PPL distributed\n");
        else if (dev->rwh_policy == RWH_JOURNALING_DRIVE)
                printf("PPL journaling drive\n");
+       else if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+               printf("Multiple distributed PPLs\n");
+       else if (dev->rwh_policy == RWH_MULTIPLE_PPLS_JOURNALING_DRIVE)
+               printf("Multiple PPLs on journaling drive\n");
        else
                printf("<unknown:%d>\n", dev->rwh_policy);
 }
@@ -1581,12 +1696,14 @@ void convert_to_4k_imsm_migr_rec(struct intel_super *super)
        struct migr_record *migr_rec = super->migr_rec;
 
        migr_rec->blocks_per_unit /= IMSM_4K_DIV;
-       migr_rec->ckpt_area_pba /= IMSM_4K_DIV;
-       migr_rec->dest_1st_member_lba /= IMSM_4K_DIV;
        migr_rec->dest_depth_per_unit /= IMSM_4K_DIV;
        split_ull((join_u32(migr_rec->post_migr_vol_cap,
                 migr_rec->post_migr_vol_cap_hi) / IMSM_4K_DIV),
                 &migr_rec->post_migr_vol_cap, &migr_rec->post_migr_vol_cap_hi);
+       set_migr_chkp_area_pba(migr_rec,
+                migr_chkp_area_pba(migr_rec) / IMSM_4K_DIV);
+       set_migr_dest_1st_member_lba(migr_rec,
+                migr_dest_1st_member_lba(migr_rec) / IMSM_4K_DIV);
 }
 
 void convert_to_4k_imsm_disk(struct imsm_disk *disk)
@@ -1610,8 +1727,7 @@ void convert_to_4k(struct intel_super *super)
                struct imsm_dev *dev = __get_imsm_dev(mpb, i);
                struct imsm_map *map = get_imsm_map(dev, MAP_0);
                /* dev */
-               split_ull((join_u32(dev->size_low, dev->size_high)/IMSM_4K_DIV),
-                                &dev->size_low, &dev->size_high);
+               set_imsm_dev_size(dev, imsm_dev_size(dev)/IMSM_4K_DIV);
                dev->vol.curr_migr_unit /= IMSM_4K_DIV;
 
                /* map0 */
@@ -1680,8 +1796,8 @@ void examine_migr_rec_imsm(struct intel_super *super)
                        printf("Normal\n");
                else
                        printf("Contains Data\n");
-               printf("               Current Unit : %u\n",
-                      __le32_to_cpu(migr_rec->curr_migr_unit));
+               printf("               Current Unit : %llu\n",
+                      current_migr_unit(migr_rec));
                printf("                     Family : %u\n",
                       __le32_to_cpu(migr_rec->family_num));
                printf("                  Ascending : %u\n",
@@ -1690,16 +1806,15 @@ void examine_migr_rec_imsm(struct intel_super *super)
                       __le32_to_cpu(migr_rec->blocks_per_unit));
                printf("       Dest. Depth Per Unit : %u\n",
                       __le32_to_cpu(migr_rec->dest_depth_per_unit));
-               printf("        Checkpoint Area pba : %u\n",
-                      __le32_to_cpu(migr_rec->ckpt_area_pba));
-               printf("           First member lba : %u\n",
-                      __le32_to_cpu(migr_rec->dest_1st_member_lba));
-               printf("      Total Number of Units : %u\n",
-                      __le32_to_cpu(migr_rec->num_migr_units));
-               printf("             Size of volume : %u\n",
-                      __le32_to_cpu(migr_rec->post_migr_vol_cap));
-               printf("  Expansion space for LBA64 : %u\n",
-                      __le32_to_cpu(migr_rec->post_migr_vol_cap_hi));
+               printf("        Checkpoint Area pba : %llu\n",
+                      migr_chkp_area_pba(migr_rec));
+               printf("           First member lba : %llu\n",
+                      migr_dest_1st_member_lba(migr_rec));
+               printf("      Total Number of Units : %llu\n",
+                      get_num_migr_units(migr_rec));
+               printf("             Size of volume : %llu\n",
+                      join_u32(migr_rec->post_migr_vol_cap,
+                               migr_rec->post_migr_vol_cap_hi));
                printf("       Record was read from : %u\n",
                       __le32_to_cpu(migr_rec->ckpt_read_disk_num));
 
@@ -1712,13 +1827,15 @@ void convert_from_4k_imsm_migr_rec(struct intel_super *super)
        struct migr_record *migr_rec = super->migr_rec;
 
        migr_rec->blocks_per_unit *= IMSM_4K_DIV;
-       migr_rec->ckpt_area_pba *= IMSM_4K_DIV;
-       migr_rec->dest_1st_member_lba *= IMSM_4K_DIV;
        migr_rec->dest_depth_per_unit *= IMSM_4K_DIV;
        split_ull((join_u32(migr_rec->post_migr_vol_cap,
                 migr_rec->post_migr_vol_cap_hi) * IMSM_4K_DIV),
                 &migr_rec->post_migr_vol_cap,
                 &migr_rec->post_migr_vol_cap_hi);
+       set_migr_chkp_area_pba(migr_rec,
+                migr_chkp_area_pba(migr_rec) * IMSM_4K_DIV);
+       set_migr_dest_1st_member_lba(migr_rec,
+                migr_dest_1st_member_lba(migr_rec) * IMSM_4K_DIV);
 }
 
 void convert_from_4k(struct intel_super *super)
@@ -1738,8 +1855,7 @@ void convert_from_4k(struct intel_super *super)
                struct imsm_dev *dev = __get_imsm_dev(mpb, i);
                struct imsm_map *map = get_imsm_map(dev, MAP_0);
                /* dev */
-               split_ull((join_u32(dev->size_low, dev->size_high)*IMSM_4K_DIV),
-                                &dev->size_low, &dev->size_high);
+               set_imsm_dev_size(dev, imsm_dev_size(dev)*IMSM_4K_DIV);
                dev->vol.curr_migr_unit *= IMSM_4K_DIV;
 
                /* map0 */
@@ -1882,7 +1998,6 @@ static void examine_super_imsm(struct supertype *st, char *homehost)
        strncpy(str, (char *)mpb->sig, MPB_SIG_LEN);
        str[MPB_SIG_LEN-1] = '\0';
        printf("          Magic : %s\n", str);
-       snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
        printf("        Version : %s\n", get_imsm_version(mpb));
        printf("    Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
        printf("         Family : %08x\n", __le32_to_cpu(mpb->family_num));
@@ -2345,6 +2460,8 @@ static void print_imsm_capability(const struct imsm_orom *orom)
        printf("       Platform : Intel(R) ");
        if (orom->capabilities == 0 && orom->driver_features == 0)
                printf("Matrix Storage Manager\n");
+       else if (imsm_orom_is_enterprise(orom) && orom->major_ver >= 6)
+               printf("Virtual RAID on CPU\n");
        else
                printf("Rapid Storage Technology%s\n",
                        imsm_orom_is_enterprise(orom) ? " enterprise" : "");
@@ -2733,13 +2850,11 @@ static __u32 num_stripes_per_unit_rebuild(struct imsm_dev *dev)
                return num_stripes_per_unit_resync(dev);
 }
 
-static __u8 imsm_num_data_members(struct imsm_dev *dev, int second_map)
+static __u8 imsm_num_data_members(struct imsm_map *map)
 {
        /* named 'imsm_' because raid0, raid1 and raid10
         * counter-intuitively have the same number of data disks
         */
-       struct imsm_map *map = get_imsm_map(dev, second_map);
-
        switch (get_imsm_raid_level(map)) {
        case 0:
                return map->num_members;
@@ -2755,6 +2870,36 @@ static __u8 imsm_num_data_members(struct imsm_dev *dev, int second_map)
        }
 }
 
+static unsigned long long calc_component_size(struct imsm_map *map,
+                                             struct imsm_dev *dev)
+{
+       unsigned long long component_size;
+       unsigned long long dev_size = imsm_dev_size(dev);
+       unsigned long long calc_dev_size = 0;
+       unsigned int member_disks = imsm_num_data_members(map);
+
+       if (member_disks == 0)
+               return 0;
+
+       component_size = per_dev_array_size(map);
+       calc_dev_size = component_size * member_disks;
+
+       /* Component size is rounded to 1MB so difference between size from
+        * metadata and size calculated from num_data_stripes equals up to
+        * 2048 blocks per each device. If the difference is higher it means
+        * that array size was expanded and num_data_stripes was not updated.
+        */
+       if ((unsigned int)abs(calc_dev_size - dev_size) >
+           (1 << SECT_PER_MB_SHIFT) * member_disks) {
+               component_size = dev_size / member_disks;
+               dprintf("Invalid num_data_stripes in metadata; expected=%llu, found=%llu\n",
+                       component_size / map->blocks_per_strip,
+                       num_data_stripes(map));
+       }
+
+       return component_size;
+}
+
 static __u32 parity_segment_depth(struct imsm_dev *dev)
 {
        struct imsm_map *map = get_imsm_map(dev, MAP_0);
@@ -2828,7 +2973,7 @@ static __u64 blocks_per_migr_unit(struct intel_super *super,
                 */
                stripes_per_unit = num_stripes_per_unit_resync(dev);
                migr_chunk = migr_strip_blocks_resync(dev);
-               disks = imsm_num_data_members(dev, MAP_0);
+               disks = imsm_num_data_members(map);
                blocks_per_unit = stripes_per_unit * migr_chunk * disks;
                stripe = __le16_to_cpu(map->blocks_per_strip) * disks;
                segment = blocks_per_unit / stripe;
@@ -3021,7 +3166,7 @@ static int imsm_create_metadata_checkpoint_update(
                return 0;
        }
        (*u)->type = update_general_migration_checkpoint;
-       (*u)->curr_migr_unit = __le32_to_cpu(super->migr_rec->curr_migr_unit);
+       (*u)->curr_migr_unit = current_migr_unit(super->migr_rec);
        dprintf("prepared for %u\n", (*u)->curr_migr_unit);
 
        return update_memory_size;
@@ -3152,27 +3297,27 @@ int imsm_reshape_blocks_arrays_changes(struct intel_super *super)
        }
        return rv;
 }
-static unsigned long long imsm_component_size_aligment_check(int level,
+static unsigned long long imsm_component_size_alignment_check(int level,
                                              int chunk_size,
                                              unsigned int sector_size,
                                              unsigned long long component_size)
 {
-       unsigned int component_size_alligment;
+       unsigned int component_size_alignment;
 
-       /* check component size aligment
+       /* check component size alignment
        */
-       component_size_alligment = component_size % (chunk_size/sector_size);
+       component_size_alignment = component_size % (chunk_size/sector_size);
 
-       dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alligment = %u\n",
+       dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alignment = %u\n",
                level, chunk_size, component_size,
-               component_size_alligment);
+               component_size_alignment);
 
-       if (component_size_alligment && (level != 1) && (level != UnSet)) {
-               dprintf("imsm: reported component size alligned from %llu ",
+       if (component_size_alignment && (level != 1) && (level != UnSet)) {
+               dprintf("imsm: reported component size aligned from %llu ",
                        component_size);
-               component_size -= component_size_alligment;
+               component_size -= component_size_alignment;
                dprintf_cont("to %llu (%i).\n",
-                       component_size, component_size_alligment);
+                       component_size, component_size_alignment);
        }
 
        return component_size;
@@ -3214,9 +3359,7 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
        info->array.chunk_size    =
                __le16_to_cpu(map_to_analyse->blocks_per_strip) << 9;
        info->array.state         = !(dev->vol.dirty & RAIDVOL_DIRTY);
-       info->custom_array_size   = __le32_to_cpu(dev->size_high);
-       info->custom_array_size   <<= 32;
-       info->custom_array_size   |= __le32_to_cpu(dev->size_low);
+       info->custom_array_size   = imsm_dev_size(dev);
        info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
 
        if (is_gen_migration(dev)) {
@@ -3276,15 +3419,8 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
        }
 
        info->data_offset         = pba_of_lba0(map_to_analyse);
-
-       if (info->array.level == 5) {
-               info->component_size = num_data_stripes(map_to_analyse) *
-                                      map_to_analyse->blocks_per_strip;
-       } else {
-               info->component_size = blocks_per_member(map_to_analyse);
-       }
-
-       info->component_size = imsm_component_size_aligment_check(
+       info->component_size = calc_component_size(map, dev);
+       info->component_size = imsm_component_size_alignment_check(
                                                        info->array.level,
                                                        info->array.chunk_size,
                                                        super->sector_size,
@@ -3294,10 +3430,16 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
        memset(info->uuid, 0, sizeof(info->uuid));
        info->recovery_start = MaxSector;
 
-       if (info->array.level == 5 && dev->rwh_policy == RWH_DISTRIBUTED) {
+       if (info->array.level == 5 &&
+           (dev->rwh_policy == RWH_DISTRIBUTED ||
+            dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)) {
                info->consistency_policy = CONSISTENCY_POLICY_PPL;
                info->ppl_sector = get_ppl_sector(super, super->current_vol);
-               info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+               if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+                       info->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
+               else
+                       info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE)
+                                         >> 9;
        } else if (info->array.level <= 0) {
                info->consistency_policy = CONSISTENCY_POLICY_NONE;
        } else {
@@ -3325,13 +3467,13 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
                case MIGR_GEN_MIGR: {
                        __u64 blocks_per_unit = blocks_per_migr_unit(super,
                                                                     dev);
-                       __u64 units = __le32_to_cpu(migr_rec->curr_migr_unit);
+                       __u64 units = current_migr_unit(migr_rec);
                        unsigned long long array_blocks;
                        int used_disks;
 
                        if (__le32_to_cpu(migr_rec->ascending_migr) &&
                            (units <
-                               (__le32_to_cpu(migr_rec->num_migr_units)-1)) &&
+                               (get_num_migr_units(migr_rec)-1)) &&
                            (super->migr_rec->rec_status ==
                                        __cpu_to_le32(UNIT_SRC_IN_CP_AREA)))
                                units++;
@@ -3343,9 +3485,9 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
                                (unsigned long long)blocks_per_unit,
                                info->reshape_progress);
 
-                       used_disks = imsm_num_data_members(dev, MAP_1);
+                       used_disks = imsm_num_data_members(prev_map);
                        if (used_disks > 0) {
-                               array_blocks = blocks_per_member(map) *
+                               array_blocks = per_dev_array_size(map) *
                                        used_disks;
                                info->custom_array_size =
                                        round_size_to_mb(array_blocks,
@@ -3486,6 +3628,9 @@ static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info, char *
                        __u32 ord = get_imsm_ord_tbl_ent(dev, j, MAP_0);
                        __u32 idx = ord_to_idx(ord);
 
+                       if (super->disks && super->disks->index == (int)idx)
+                               info->disk.raid_disk = j;
+
                        if (!(ord & IMSM_ORD_REBUILD) &&
                            get_imsm_missing(super, idx)) {
                                missing = 1;
@@ -4448,6 +4593,11 @@ static int find_intel_hba_capability(int fd, struct intel_super *super, char *de
        struct sys_dev *hba_name;
        int rv = 0;
 
+       if (fd >= 0 && test_partition(fd)) {
+               pr_err("imsm: %s is a partition, cannot be used in IMSM\n",
+                      devname);
+               return 1;
+       }
        if (fd < 0 || check_env("IMSM_NO_PLATFORM")) {
                super->orom = NULL;
                super->hba = NULL;
@@ -4481,8 +4631,7 @@ static int find_intel_hba_capability(int fd, struct intel_super *super, char *de
                                hba = hba->next;
                        }
                        fprintf(stderr, ").\n"
-                               "    Mixing devices attached to different %s is not allowed.\n",
-                               hba_name->type == SYS_DEV_VMD ? "VMD domains" : "controllers");
+                               "    Mixing devices attached to different controllers is not allowed.\n");
                }
                return 2;
        }
@@ -5211,10 +5360,22 @@ static int check_name(struct intel_super *super, char *name, int quiet)
 {
        struct imsm_super *mpb = super->anchor;
        char *reason = NULL;
+       char *start = name;
+       size_t len = strlen(name);
        int i;
 
-       if (strlen(name) > MAX_RAID_SERIAL_LEN)
+       if (len > 0) {
+               while (isspace(start[len - 1]))
+                       start[--len] = 0;
+               while (*start && isspace(*start))
+                       ++start, --len;
+               memmove(name, start, len + 1);
+       }
+
+       if (len > MAX_RAID_SERIAL_LEN)
                reason = "must be 16 characters or less";
+       else if (len == 0)
+               reason = "must be a non-empty string";
 
        for (i = 0; i < mpb->num_raid_devs; i++) {
                struct imsm_dev *dev = get_imsm_dev(super, i);
@@ -5248,6 +5409,7 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        struct imsm_map *map;
        int idx = mpb->num_raid_devs;
        int i;
+       int namelen;
        unsigned long long array_blocks;
        size_t size_old, size_new;
        unsigned long long num_data_stripes;
@@ -5327,7 +5489,12 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
                return 0;
        dv = xmalloc(sizeof(*dv));
        dev = xcalloc(1, sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
-       strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
+       /*
+        * Explicitly allow truncating to not confuse gcc's
+        * -Werror=stringop-truncation
+        */
+       namelen = min((int) strlen(name), MAX_RAID_SERIAL_LEN);
+       memcpy(dev->volume, name, namelen);
        array_blocks = calc_array_size(info->level, info->raid_disks,
                                               info->layout, info->chunk_size,
                                               s->size * BLOCKS_PER_KB);
@@ -5336,8 +5503,7 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        array_blocks = round_size_to_mb(array_blocks, data_disks);
        size_per_member = array_blocks / data_disks;
 
-       dev->size_low = __cpu_to_le32((__u32) array_blocks);
-       dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
+       set_imsm_dev_size(dev, array_blocks);
        dev->status = (DEV_READ_COALESCING | DEV_WRITE_COALESCING);
        vol = &dev->vol;
        vol->migr_state = 0;
@@ -5346,9 +5512,6 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        vol->curr_migr_unit = 0;
        map = get_imsm_map(dev, MAP_0);
        set_pba_of_lba0(map, super->create_offset);
-       set_blocks_per_member(map, info_to_blocks_per_member(info,
-                                                            size_per_member /
-                                                            BLOCKS_PER_KB));
        map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
        map->failed_disk_num = ~0;
        if (info->level > 0)
@@ -5380,6 +5543,11 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        num_data_stripes /= map->num_domains;
        set_num_data_stripes(map, num_data_stripes);
 
+       size_per_member += NUM_BLOCKS_DIRTY_STRIPE_REGION;
+       set_blocks_per_member(map, info_to_blocks_per_member(info,
+                                                            size_per_member /
+                                                            BLOCKS_PER_KB));
+
        map->num_members = info->raid_disks;
        for (i = 0; i < map->num_members; i++) {
                /* initialized in add_to_super */
@@ -5390,9 +5558,9 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        dev->my_vol_raid_dev_num = mpb->num_raid_devs_created;
 
        if (s->consistency_policy <= CONSISTENCY_POLICY_RESYNC) {
-               dev->rwh_policy = RWH_OFF;
+               dev->rwh_policy = RWH_MULTIPLE_OFF;
        } else if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
-               dev->rwh_policy = RWH_DISTRIBUTED;
+               dev->rwh_policy = RWH_MULTIPLE_DISTRIBUTED;
        } else {
                free(dev);
                free(dv);
@@ -5535,6 +5703,11 @@ static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
                return 1;
        }
 
+       if (mpb->num_disks == 0)
+               if (!get_dev_sector_size(dl->fd, dl->devname,
+                                        &super->sector_size))
+                       return 1;
+
        if (!drive_validate_sector_size(super, dl)) {
                pr_err("Combining drives of different sector size in one volume is not allowed\n");
                return 1;
@@ -5737,7 +5910,7 @@ static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
                } else if (super->hba->type == SYS_DEV_VMD && super->orom &&
                    !imsm_orom_has_tpv_support(super->orom)) {
                        pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
-                              "\tPlease refer to Intel(R) RSTe user guide.\n");
+                              "\tPlease refer to Intel(R) RSTe/VROC user guide.\n");
                        free(dd->devname);
                        free(dd);
                        return 1;
@@ -6042,6 +6215,30 @@ static int mgmt_disk(struct supertype *st)
 
 __u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
 
+static int write_ppl_header(unsigned long long ppl_sector, int fd, void *buf)
+{
+       struct ppl_header *ppl_hdr = buf;
+       int ret;
+
+       ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
+
+       if (lseek64(fd, ppl_sector * 512, SEEK_SET) < 0) {
+               ret = -errno;
+               perror("Failed to seek to PPL header location");
+               return ret;
+       }
+
+       if (write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+               ret = -errno;
+               perror("Write PPL header failed");
+               return ret;
+       }
+
+       fsync(fd);
+
+       return 0;
+}
+
 static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd)
 {
        struct intel_super *super = st->sb;
@@ -6049,99 +6246,189 @@ static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd
        struct ppl_header *ppl_hdr;
        int ret;
 
-       ret = posix_memalign(&buf, 4096, PPL_HEADER_SIZE);
+       /* first clear entire ppl space */
+       ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
+       if (ret)
+               return ret;
+
+       ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
        if (ret) {
                pr_err("Failed to allocate PPL header buffer\n");
-               return ret;
+               return -ret;
        }
 
        memset(buf, 0, PPL_HEADER_SIZE);
        ppl_hdr = buf;
        memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
        ppl_hdr->signature = __cpu_to_le32(super->anchor->orig_family_num);
-       ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
-
-       if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
-               ret = errno;
-               perror("Failed to seek to PPL header location");
-       }
 
-       if (!ret && write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
-               ret = errno;
-               perror("Write PPL header failed");
+       if (info->mismatch_cnt) {
+               /*
+                * We are overwriting an invalid ppl. Make one entry with wrong
+                * checksum to prevent the kernel from skipping resync.
+                */
+               ppl_hdr->entries_count = __cpu_to_le32(1);
+               ppl_hdr->entries[0].checksum = ~0;
        }
 
-       if (!ret)
-               fsync(fd);
+       ret = write_ppl_header(info->ppl_sector, fd, buf);
 
        free(buf);
        return ret;
 }
 
+static int is_rebuilding(struct imsm_dev *dev);
+
 static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
                             struct mdinfo *disk)
 {
        struct intel_super *super = st->sb;
        struct dl *d;
-       void *buf;
+       void *buf_orig, *buf, *buf_prev = NULL;
        int ret = 0;
-       struct ppl_header *ppl_hdr;
+       struct ppl_header *ppl_hdr = NULL;
        __u32 crc;
        struct imsm_dev *dev;
-       struct imsm_map *map;
        __u32 idx;
+       unsigned int i;
+       unsigned long long ppl_offset = 0;
+       unsigned long long prev_gen_num = 0;
 
        if (disk->disk.raid_disk < 0)
                return 0;
 
-       if (posix_memalign(&buf, 4096, PPL_HEADER_SIZE)) {
+       dev = get_imsm_dev(super, info->container_member);
+       idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_0);
+       d = get_imsm_dl_disk(super, idx);
+
+       if (!d || d->index < 0 || is_failed(&d->disk))
+               return 0;
+
+       if (posix_memalign(&buf_orig, MAX_SECTOR_SIZE, PPL_HEADER_SIZE * 2)) {
                pr_err("Failed to allocate PPL header buffer\n");
                return -1;
        }
+       buf = buf_orig;
 
-       dev = get_imsm_dev(super, info->container_member);
-       map = get_imsm_map(dev, MAP_X);
-       idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_X);
-       d = get_imsm_dl_disk(super, idx);
+       ret = 1;
+       while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
+               void *tmp;
 
-       if (!d || d->index < 0 || is_failed(&d->disk))
-               goto out;
+               dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
 
-       if (lseek64(d->fd, info->ppl_sector * 512, SEEK_SET) < 0) {
-               perror("Failed to seek to PPL header location");
-               ret = -1;
-               goto out;
+               if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
+                           SEEK_SET) < 0) {
+                       perror("Failed to seek to PPL header location");
+                       ret = -1;
+                       break;
+               }
+
+               if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+                       perror("Read PPL header failed");
+                       ret = -1;
+                       break;
+               }
+
+               ppl_hdr = buf;
+
+               crc = __le32_to_cpu(ppl_hdr->checksum);
+               ppl_hdr->checksum = 0;
+
+               if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
+                       dprintf("Wrong PPL header checksum on %s\n",
+                               d->devname);
+                       break;
+               }
+
+               if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
+                       /* previous was newest, it was already checked */
+                       break;
+               }
+
+               if ((__le32_to_cpu(ppl_hdr->signature) !=
+                             super->anchor->orig_family_num)) {
+                       dprintf("Wrong PPL header signature on %s\n",
+                               d->devname);
+                       ret = 1;
+                       break;
+               }
+
+               ret = 0;
+               prev_gen_num = __le64_to_cpu(ppl_hdr->generation);
+
+               ppl_offset += PPL_HEADER_SIZE;
+               for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
+                       ppl_offset +=
+                                  __le32_to_cpu(ppl_hdr->entries[i].pp_size);
+
+               if (!buf_prev)
+                       buf_prev = buf + PPL_HEADER_SIZE;
+               tmp = buf_prev;
+               buf_prev = buf;
+               buf = tmp;
        }
 
-       if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
-               perror("Read PPL header failed");
-               ret = -1;
-               goto out;
+       if (buf_prev) {
+               buf = buf_prev;
+               ppl_hdr = buf_prev;
        }
 
-       ppl_hdr = buf;
+       /*
+        * Update metadata to use mutliple PPLs area (1MB).
+        * This is done once for all RAID members
+        */
+       if (info->consistency_policy == CONSISTENCY_POLICY_PPL &&
+           info->ppl_size != (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9)) {
+               char subarray[20];
+               struct mdinfo *member_dev;
 
-       crc = __le32_to_cpu(ppl_hdr->checksum);
-       ppl_hdr->checksum = 0;
+               sprintf(subarray, "%d", info->container_member);
 
-       if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
-               dprintf("Wrong PPL header checksum on %s\n",
-                       d->devname);
-               ret = 1;
-       }
+               if (mdmon_running(st->container_devnm))
+                       st->update_tail = &st->updates;
 
-       if (!ret && (__le32_to_cpu(ppl_hdr->signature) !=
-                     super->anchor->orig_family_num)) {
-               dprintf("Wrong PPL header signature on %s\n",
-                       d->devname);
-               ret = 1;
+               if (st->ss->update_subarray(st, subarray, "ppl", NULL)) {
+                       pr_err("Failed to update subarray %s\n",
+                             subarray);
+               } else {
+                       if (st->update_tail)
+                               flush_metadata_updates(st);
+                       else
+                               st->ss->sync_metadata(st);
+                       info->ppl_size = (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+                       for (member_dev = info->devs; member_dev;
+                            member_dev = member_dev->next)
+                               member_dev->ppl_size =
+                                   (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+               }
        }
 
-out:
-       free(buf);
+       if (ret == 1) {
+               struct imsm_map *map = get_imsm_map(dev, MAP_X);
 
-       if (ret == 1 && map->map_state == IMSM_T_STATE_UNINITIALIZED)
-               return st->ss->write_init_ppl(st, info, d->fd);
+               if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
+                  (map->map_state == IMSM_T_STATE_NORMAL &&
+                  !(dev->vol.dirty & RAIDVOL_DIRTY)) ||
+                  (is_rebuilding(dev) &&
+                   dev->vol.curr_migr_unit == 0 &&
+                   get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_1) != idx))
+                       ret = st->ss->write_init_ppl(st, info, d->fd);
+               else
+                       info->mismatch_cnt++;
+       } else if (ret == 0 &&
+                  ppl_hdr->entries_count == 0 &&
+                  is_rebuilding(dev) &&
+                  info->resync_start == 0) {
+               /*
+                * The header has no entries - add a single empty entry and
+                * rewrite the header to prevent the kernel from going into
+                * resync after an interrupted rebuild.
+                */
+               ppl_hdr->entries_count = __cpu_to_le32(1);
+               ret = write_ppl_header(info->ppl_sector, d->fd, buf);
+       }
+
+       free(buf_orig);
 
        return ret;
 }
@@ -6899,7 +7186,7 @@ static int validate_geometry_imsm_volume(struct supertype *st, int level,
        mpb = super->anchor;
 
        if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
-               pr_err("RAID gemetry validation failed. Cannot proceed with the action(s).\n");
+               pr_err("RAID geometry validation failed. Cannot proceed with the action(s).\n");
                return 0;
        }
        if (!dev) {
@@ -6917,7 +7204,7 @@ static int validate_geometry_imsm_volume(struct supertype *st, int level,
 
                        pos = 0;
                        i = 0;
-                       e = get_extents(super, dl);
+                       e = get_extents(super, dl, 0);
                        if (!e) continue;
                        do {
                                unsigned long long esize;
@@ -6975,7 +7262,7 @@ static int validate_geometry_imsm_volume(struct supertype *st, int level,
        }
 
        /* retrieve the largest free space block */
-       e = get_extents(super, dl);
+       e = get_extents(super, dl, 0);
        maxsize = 0;
        i = 0;
        if (e) {
@@ -7073,7 +7360,7 @@ static int imsm_get_free_size(struct supertype *st, int raiddisks,
                if (super->orom && dl->index < 0 && mpb->num_raid_devs)
                        continue;
 
-               e = get_extents(super, dl);
+               e = get_extents(super, dl, 0);
                if (!e)
                        continue;
                for (i = 1; e[i-1].size; i++)
@@ -7168,6 +7455,15 @@ static int validate_geometry_imsm(struct supertype *st, int level, int layout,
                                                        verbose);
        }
 
+       if (size && (size < 1024)) {
+               pr_err("Given size must be greater than 1M.\n");
+               /* Depends on algorithm in Create.c :
+                * if container was given (dev == NULL) return -1,
+                * if block device was given ( dev != NULL) return 0.
+                */
+               return dev ? -1 : 0;
+       }
+
        if (!dev) {
                if (st->sb) {
                        struct intel_super *super = st->sb;
@@ -7382,11 +7678,12 @@ static int update_subarray_imsm(struct supertype *st, char *subarray,
                        append_metadata_update(st, u, sizeof(*u));
                } else {
                        struct imsm_dev *dev;
-                       int i;
+                       int i, namelen;
 
                        dev = get_imsm_dev(super, vol);
-                       strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
-                       dev->volume[MAX_RAID_SERIAL_LEN-1] = '\0';
+                       memset(dev->volume, '\0', MAX_RAID_SERIAL_LEN);
+                       namelen = min((int)strlen(name), MAX_RAID_SERIAL_LEN);
+                       memcpy(dev->volume, name, namelen);
                        for (i = 0; i < mpb->num_raid_devs; i++) {
                                dev = get_imsm_dev(super, i);
                                handle_missing(super, dev);
@@ -7403,9 +7700,9 @@ static int update_subarray_imsm(struct supertype *st, char *subarray,
                        return 2;
 
                if (strcmp(update, "ppl") == 0)
-                       new_policy = RWH_DISTRIBUTED;
+                       new_policy = RWH_MULTIPLE_DISTRIBUTED;
                else
-                       new_policy = RWH_OFF;
+                       new_policy = RWH_MULTIPLE_OFF;
 
                if (st->update_tail) {
                        struct imsm_update_rwh_policy *u = xmalloc(sizeof(*u));
@@ -7550,6 +7847,7 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                int slot;
                int chunk;
                char *ep;
+               int level;
 
                if (subarray &&
                    (i != strtoul(subarray, &ep, 10) || *ep != '\0'))
@@ -7558,6 +7856,7 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                dev = get_imsm_dev(super, i);
                map = get_imsm_map(dev, MAP_0);
                map2 = get_imsm_map(dev, MAP_1);
+               level = get_imsm_raid_level(map);
 
                /* do not publish arrays that are in the middle of an
                 * unsupported migration
@@ -7580,10 +7879,10 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                chunk = __le16_to_cpu(map->blocks_per_strip) >> 1;
                /* mdadm does not support all metadata features- set the bit in all arrays state */
                if (!validate_geometry_imsm_orom(super,
-                                                get_imsm_raid_level(map), /* RAID level */
-                                                imsm_level_to_layout(get_imsm_raid_level(map)),
+                                                level, /* RAID level */
+                                                imsm_level_to_layout(level),
                                                 map->num_members, /* raid disks */
-                                                &chunk, join_u32(dev->size_low, dev->size_high),
+                                                &chunk, imsm_dev_size(dev),
                                                 1 /* verbose */)) {
                        pr_err("IMSM RAID geometry validation failed.  Array %s activation is blocked.\n",
                                dev->volume);
@@ -7605,6 +7904,7 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                        int idx;
                        int skip;
                        __u32 ord;
+                       int missing = 0;
 
                        skip = 0;
                        idx = get_imsm_disk_idx(dev, slot, MAP_0);
@@ -7618,19 +7918,27 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                                skip = 1;
                        if (d && is_failed(&d->disk))
                                skip = 1;
-                       if (ord & IMSM_ORD_REBUILD)
+                       if (!skip && (ord & IMSM_ORD_REBUILD))
                                recovery_start = 0;
 
                        /*
                         * if we skip some disks the array will be assmebled degraded;
                         * reset resync start to avoid a dirty-degraded
                         * situation when performing the intial sync
-                        *
-                        * FIXME handle dirty degraded
                         */
-                       if ((skip || recovery_start == 0) &&
-                           !(dev->vol.dirty & RAIDVOL_DIRTY))
-                               this->resync_start = MaxSector;
+                       if (skip)
+                               missing++;
+
+                       if (!(dev->vol.dirty & RAIDVOL_DIRTY)) {
+                               if ((!able_to_resync(level, missing) ||
+                                    recovery_start == 0))
+                                       this->resync_start = MaxSector;
+                       } else {
+                               /*
+                                * FIXME handle dirty degraded
+                                */
+                       }
+
                        if (skip)
                                continue;
 
@@ -7659,17 +7967,15 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
 
                        info_d->events = __le32_to_cpu(mpb->generation_num);
                        info_d->data_offset = pba_of_lba0(map);
+                       info_d->component_size = calc_component_size(map, dev);
 
                        if (map->raid_level == 5) {
-                               info_d->component_size =
-                                               num_data_stripes(map) *
-                                               map->blocks_per_strip;
                                info_d->ppl_sector = this->ppl_sector;
                                info_d->ppl_size = this->ppl_size;
-                       } else {
-                               info_d->component_size = blocks_per_member(map);
+                               if (this->consistency_policy == CONSISTENCY_POLICY_PPL &&
+                                   recovery_start == 0)
+                                       this->resync_start = 0;
                        }
-                       info_d->consistency_policy = this->consistency_policy;
 
                        info_d->bb.supported = 1;
                        get_volume_badblocks(super->bbm_log, ord_to_idx(ord),
@@ -7883,7 +8189,7 @@ static int mark_failure(struct intel_super *super,
        strcat(buf, ":0");
        if ((len = strlen(buf)) >= MAX_RAID_SERIAL_LEN)
                shift = len - MAX_RAID_SERIAL_LEN + 1;
-       strncpy((char *)disk->serial, &buf[shift], MAX_RAID_SERIAL_LEN);
+       memcpy(disk->serial, &buf[shift], len + 1 - shift);
 
        disk->status |= FAILED_DISK;
        set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
@@ -7899,7 +8205,8 @@ static int mark_failure(struct intel_super *super,
                        set_imsm_ord_tbl_ent(map2, slot2,
                                             idx | IMSM_ORD_REBUILD);
        }
-       if (map->failed_disk_num == 0xff)
+       if (map->failed_disk_num == 0xff ||
+               (!is_rebuilding(dev) && map->failed_disk_num > slot))
                map->failed_disk_num = slot;
 
        clear_disk_badblocks(super->bbm_log, ord_to_idx(ord));
@@ -7974,38 +8281,33 @@ static void handle_missing(struct intel_super *super, struct imsm_dev *dev)
 static unsigned long long imsm_set_array_size(struct imsm_dev *dev,
                                              long long new_size)
 {
-       int used_disks = imsm_num_data_members(dev, MAP_0);
        unsigned long long array_blocks;
-       struct imsm_map *map;
+       struct imsm_map *map = get_imsm_map(dev, MAP_0);
+       int used_disks = imsm_num_data_members(map);
 
        if (used_disks == 0) {
                /* when problems occures
                 * return current array_blocks value
                 */
-               array_blocks = __le32_to_cpu(dev->size_high);
-               array_blocks = array_blocks << 32;
-               array_blocks += __le32_to_cpu(dev->size_low);
+               array_blocks = imsm_dev_size(dev);
 
                return array_blocks;
        }
 
        /* set array size in metadata
         */
-       if (new_size <= 0) {
+       if (new_size <= 0)
                /* OLCE size change is caused by added disks
                 */
-               map = get_imsm_map(dev, MAP_0);
-               array_blocks = blocks_per_member(map) * used_disks;
-       } else {
+               array_blocks = per_dev_array_size(map) * used_disks;
+       else
                /* Online Volume Size Change
                 * Using  available free space
                 */
                array_blocks = new_size;
-       }
 
        array_blocks = round_size_to_mb(array_blocks, used_disks);
-       dev->size_low = __cpu_to_le32((__u32)array_blocks);
-       dev->size_high = __cpu_to_le32((__u32)(array_blocks >> 32));
+       set_imsm_dev_size(dev, array_blocks);
 
        return array_blocks;
 }
@@ -8111,10 +8413,10 @@ static int imsm_set_array_state(struct active_array *a, int consistent)
                                int used_disks;
                                struct mdinfo *mdi;
 
-                               used_disks = imsm_num_data_members(dev, MAP_0);
+                               used_disks = imsm_num_data_members(map);
                                if (used_disks > 0) {
                                        array_blocks =
-                                               blocks_per_member(map) *
+                                               per_dev_array_size(map) *
                                                used_disks;
                                        array_blocks =
                                                round_size_to_mb(array_blocks,
@@ -8206,7 +8508,8 @@ skip_mark_checkpoint:
                        dev->vol.dirty = RAIDVOL_CLEAN;
                } else {
                        dev->vol.dirty = RAIDVOL_DIRTY;
-                       if (dev->rwh_policy == RWH_DISTRIBUTED)
+                       if (dev->rwh_policy == RWH_DISTRIBUTED ||
+                           dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
                                dev->vol.dirty |= RAIDVOL_DSRECORD_VALID;
                }
                super->updates_pending++;
@@ -8257,7 +8560,7 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
        disk = get_imsm_disk(super, ord_to_idx(ord));
 
        /* check for new failures */
-       if (state & DS_FAULTY) {
+       if (disk && (state & DS_FAULTY)) {
                if (mark_failure(super, dev, disk, ord_to_idx(ord)))
                        super->updates_pending++;
        }
@@ -8325,15 +8628,23 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
                        break;
                }
                if (is_rebuilding(dev)) {
-                       dprintf_cont("while rebuilding.");
-                       if (map->map_state != map_state)  {
-                               dprintf_cont(" Map state change");
-                               end_migration(dev, super, map_state);
+                       dprintf_cont("while rebuilding ");
+                       if (state & DS_FAULTY)  {
+                               dprintf_cont("removing failed drive ");
+                               if (n == map->failed_disk_num) {
+                                       dprintf_cont("end migration");
+                                       end_migration(dev, super, map_state);
+                                       a->last_checkpoint = 0;
+                               } else {
+                                       dprintf_cont("fail detected during rebuild, changing map state");
+                                       map->map_state = map_state;
+                               }
                                super->updates_pending++;
-                       } else if (!rebuild_done) {
-                               break;
                        }
 
+                       if (!rebuild_done)
+                               break;
+
                        /* check if recovery is really finished */
                        for (mdi = a->info.devs; mdi ; mdi = mdi->next)
                                if (mdi->recovery_start != MaxSector) {
@@ -8342,7 +8653,7 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
                                }
                        if (recovery_not_finished) {
                                dprintf_cont("\n");
-                               dprintf("Rebuild has not finished yet, state not changed");
+                               dprintf_cont("Rebuild has not finished yet");
                                if (a->last_checkpoint < mdi->recovery_start) {
                                        a->last_checkpoint =
                                                mdi->recovery_start;
@@ -8352,9 +8663,9 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
                        }
 
                        dprintf_cont(" Rebuild done, still degraded");
-                       dev->vol.migr_state = 0;
-                       set_migr_type(dev, 0);
-                       dev->vol.curr_migr_unit = 0;
+                       end_migration(dev, super, map_state);
+                       a->last_checkpoint = 0;
+                       super->updates_pending++;
 
                        for (i = 0; i < map->num_members; i++) {
                                int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
@@ -8535,7 +8846,7 @@ static struct dl *imsm_add_spare(struct intel_super *super, int slot,
                /* Does this unused device have the requisite free space?
                 * It needs to be able to cover all member volumes
                 */
-               ex = get_extents(super, dl);
+               ex = get_extents(super, dl, 1);
                if (!ex) {
                        dprintf("cannot get extents\n");
                        continue;
@@ -8555,11 +8866,11 @@ static struct dl *imsm_add_spare(struct intel_super *super, int slot,
                        pos = 0;
                        array_start = pba_of_lba0(map);
                        array_end = array_start +
-                                   blocks_per_member(map) - 1;
+                                   per_dev_array_size(map) - 1;
 
                        do {
                                /* check that we can start at pba_of_lba0 with
-                                * blocks_per_member of space
+                                * num_data_stripes*blocks_per_stripe of space
                                 */
                                if (array_start >= pos && array_end < ex[j].start) {
                                        found = 1;
@@ -8758,10 +9069,9 @@ static struct mdinfo *imsm_activate_spare(struct active_array *a,
                di->component_size = a->info.component_size;
                di->container_member = inst;
                di->bb.supported = 1;
-               if (dev->rwh_policy == RWH_DISTRIBUTED) {
-                       di->consistency_policy = CONSISTENCY_POLICY_PPL;
+               if (a->info.consistency_policy == CONSISTENCY_POLICY_PPL) {
                        di->ppl_sector = get_ppl_sector(super, inst);
-                       di->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+                       di->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
                }
                super->random = random32();
                di->next = rv;
@@ -8969,8 +9279,10 @@ static int apply_reshape_migration_update(struct imsm_update_reshape_migration *
                         */
                        if (u->new_chunksize > 0) {
                                unsigned long long num_data_stripes;
+                               struct imsm_map *dest_map =
+                                       get_imsm_map(dev, MAP_0);
                                int used_disks =
-                                       imsm_num_data_members(dev, MAP_0);
+                                       imsm_num_data_members(dest_map);
 
                                if (used_disks == 0)
                                        return ret_val;
@@ -8978,13 +9290,18 @@ static int apply_reshape_migration_update(struct imsm_update_reshape_migration *
                                map->blocks_per_strip =
                                        __cpu_to_le16(u->new_chunksize * 2);
                                num_data_stripes =
-                                       (join_u32(dev->size_low, dev->size_high)
-                                       / used_disks);
+                                       imsm_dev_size(dev) / used_disks;
                                num_data_stripes /= map->blocks_per_strip;
                                num_data_stripes /= map->num_domains;
                                set_num_data_stripes(map, num_data_stripes);
                        }
 
+                       /* ensure blocks_per_member has valid value
+                        */
+                       set_blocks_per_member(map,
+                                             per_dev_array_size(map) +
+                                             NUM_BLOCKS_DIRTY_STRIPE_REGION);
+
                        /* add disk
                         */
                        if (u->new_level != 5 || migr_map->raid_level != 0 ||
@@ -9048,18 +9365,24 @@ static int apply_size_change_update(struct imsm_update_size_change *u,
                if (id->index == (unsigned)u->subdev) {
                        struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
                        struct imsm_map *map = get_imsm_map(dev, MAP_0);
-                       int used_disks = imsm_num_data_members(dev, MAP_0);
+                       int used_disks = imsm_num_data_members(map);
                        unsigned long long blocks_per_member;
                        unsigned long long num_data_stripes;
+                       unsigned long long new_size_per_disk;
+
+                       if (used_disks == 0)
+                               return 0;
 
                        /* calculate new size
                         */
-                       blocks_per_member = u->new_size / used_disks;
-                       num_data_stripes = blocks_per_member /
+                       new_size_per_disk = u->new_size / used_disks;
+                       blocks_per_member = new_size_per_disk +
+                                           NUM_BLOCKS_DIRTY_STRIPE_REGION;
+                       num_data_stripes = new_size_per_disk /
                                           map->blocks_per_strip;
                        num_data_stripes /= map->num_domains;
                        dprintf("(size: %llu, blocks per member: %llu, num_data_stipes: %llu)\n",
-                               u->new_size, blocks_per_member,
+                               u->new_size, new_size_per_disk,
                                num_data_stripes);
                        set_blocks_per_member(map, blocks_per_member);
                        set_num_data_stripes(map, num_data_stripes);
@@ -9315,12 +9638,6 @@ static int apply_takeover_update(struct imsm_update_takeover *u,
        if (u->direction == R10_TO_R0) {
                unsigned long long num_data_stripes;
 
-               map->num_domains = 1;
-               num_data_stripes = blocks_per_member(map);
-               num_data_stripes /= map->blocks_per_strip;
-               num_data_stripes /= map->num_domains;
-               set_num_data_stripes(map, num_data_stripes);
-
                /* Number of failed disks must be half of initial disk number */
                if (imsm_count_failed(super, dev, MAP_0) !=
                                (map->num_members / 2))
@@ -9346,10 +9663,15 @@ static int apply_takeover_update(struct imsm_update_takeover *u,
                map->num_domains = 1;
                map->raid_level = 0;
                map->failed_disk_num = -1;
+               num_data_stripes = imsm_dev_size(dev) / 2;
+               num_data_stripes /= map->blocks_per_strip;
+               set_num_data_stripes(map, num_data_stripes);
        }
 
        if (u->direction == R0_TO_R10) {
                void **space;
+               unsigned long long num_data_stripes;
+
                /* update slots in current disk list */
                for (dm = super->disks; dm; dm = dm->next) {
                        if (dm->index >= 0)
@@ -9387,6 +9709,11 @@ static int apply_takeover_update(struct imsm_update_takeover *u,
                map->map_state = IMSM_T_STATE_DEGRADED;
                map->num_domains = 2;
                map->raid_level = 1;
+               num_data_stripes = imsm_dev_size(dev) / 2;
+               num_data_stripes /= map->blocks_per_strip;
+               num_data_stripes /= map->num_domains;
+               set_num_data_stripes(map, num_data_stripes);
+
                /* replace dev<->dev_new */
                dv->dev = dev_new;
        }
@@ -9532,7 +9859,7 @@ static void imsm_process_update(struct supertype *st,
 
                new_map = get_imsm_map(&u->dev, MAP_0);
                new_start = pba_of_lba0(new_map);
-               new_end = new_start + blocks_per_member(new_map);
+               new_end = new_start + per_dev_array_size(new_map);
                inf = get_disk_info(u);
 
                /* handle activate_spare versus create race:
@@ -9543,7 +9870,7 @@ static void imsm_process_update(struct supertype *st,
                        dev = get_imsm_dev(super, i);
                        map = get_imsm_map(dev, MAP_0);
                        start = pba_of_lba0(map);
-                       end = start + blocks_per_member(map);
+                       end = start + per_dev_array_size(map);
                        if ((new_start >= start && new_start <= end) ||
                            (start >= new_start && start <= new_end))
                                /* overlap */;
@@ -9659,6 +9986,7 @@ static void imsm_process_update(struct supertype *st,
                /* sanity check that we are not affecting the uuid of
                 * an active array
                 */
+               memset(name, 0, sizeof(name));
                snprintf(name, MAX_RAID_SERIAL_LEN, "%s", (char *) u->name);
                name[MAX_RAID_SERIAL_LEN] = '\0';
                for (a = st->arrays; a; a = a->next)
@@ -9670,7 +9998,7 @@ static void imsm_process_update(struct supertype *st,
                        break;
                }
 
-               snprintf((char *) dev->volume, MAX_RAID_SERIAL_LEN, "%s", name);
+               memcpy(dev->volume, name, MAX_RAID_SERIAL_LEN);
                super->updates_pending++;
                break;
        }
@@ -9706,7 +10034,7 @@ static void imsm_process_update(struct supertype *st,
                break;
        }
        default:
-               pr_err("error: unsuported process update type:(type: %d)\n",    type);
+               pr_err("error: unsupported process update type:(type: %d)\n",   type);
        }
 }
 
@@ -10332,7 +10660,7 @@ static struct md_bb *imsm_get_badblocks(struct active_array *a, int slot)
                return NULL;
 
        get_volume_badblocks(super->bbm_log, ord_to_idx(ord), pba_of_lba0(map),
-                            blocks_per_member(map), &super->bb);
+                            per_dev_array_size(map), &super->bb);
 
        return &super->bb;
 }
@@ -10427,7 +10755,7 @@ void init_migr_record_imsm(struct supertype *st, struct imsm_dev *dev,
                max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
        migr_rec->dest_depth_per_unit *=
                max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
-       new_data_disks = imsm_num_data_members(dev, MAP_0);
+       new_data_disks = imsm_num_data_members(map_dest);
        migr_rec->blocks_per_unit =
                __cpu_to_le32(migr_rec->dest_depth_per_unit * new_data_disks);
        migr_rec->dest_depth_per_unit =
@@ -10438,7 +10766,7 @@ void init_migr_record_imsm(struct supertype *st, struct imsm_dev *dev,
 
        if (array_blocks % __le32_to_cpu(migr_rec->blocks_per_unit))
                num_migr_units++;
-       migr_rec->num_migr_units = __cpu_to_le32(num_migr_units);
+       set_num_migr_units(migr_rec, num_migr_units);
 
        migr_rec->post_migr_vol_cap =  dev->size_low;
        migr_rec->post_migr_vol_cap_hi = dev->size_high;
@@ -10455,7 +10783,7 @@ void init_migr_record_imsm(struct supertype *st, struct imsm_dev *dev,
                        min_dev_sectors = dev_sectors;
                close(fd);
        }
-       migr_rec->ckpt_area_pba = __cpu_to_le32(min_dev_sectors -
+       set_migr_chkp_area_pba(migr_rec, min_dev_sectors -
                                        RAID_DISK_RESERVED_BLOCKS_IMSM_HI);
 
        write_imsm_migr_rec(st);
@@ -10495,7 +10823,7 @@ int save_backup_imsm(struct supertype *st,
        int dest_layout = 0;
        int dest_chunk;
        unsigned long long start;
-       int data_disks = imsm_num_data_members(dev, MAP_0);
+       int data_disks = imsm_num_data_members(map_dest);
 
        targets = xmalloc(new_disks * sizeof(int));
 
@@ -10506,8 +10834,7 @@ int save_backup_imsm(struct supertype *st,
 
        start = info->reshape_progress * 512;
        for (i = 0; i < new_disks; i++) {
-               target_offsets[i] = (unsigned long long)
-                 __le32_to_cpu(super->migr_rec->ckpt_area_pba) * 512;
+               target_offsets[i] = migr_chkp_area_pba(super->migr_rec) * 512;
                /* move back copy area adderss, it will be moved forward
                 * in restore_stripes() using start input variable
                 */
@@ -10586,12 +10913,11 @@ int save_checkpoint_imsm(struct supertype *st, struct mdinfo *info, int state)
        if (info->reshape_progress % blocks_per_unit)
                curr_migr_unit++;
 
-       super->migr_rec->curr_migr_unit =
-               __cpu_to_le32(curr_migr_unit);
+       set_current_migr_unit(super->migr_rec, curr_migr_unit);
        super->migr_rec->rec_status = __cpu_to_le32(state);
-       super->migr_rec->dest_1st_member_lba =
-               __cpu_to_le32(curr_migr_unit *
-                             __le32_to_cpu(super->migr_rec->dest_depth_per_unit));
+       set_migr_dest_1st_member_lba(super->migr_rec,
+                       super->migr_rec->dest_depth_per_unit * curr_migr_unit);
+
        if (write_imsm_migr_rec(st) < 0) {
                dprintf("imsm: Cannot write migration record outside backup area\n");
                return 1;
@@ -10625,8 +10951,8 @@ int recover_backup_imsm(struct supertype *st, struct mdinfo *info)
        char *buf = NULL;
        int retval = 1;
        unsigned int sector_size = super->sector_size;
-       unsigned long curr_migr_unit = __le32_to_cpu(migr_rec->curr_migr_unit);
-       unsigned long num_migr_units = __le32_to_cpu(migr_rec->num_migr_units);
+       unsigned long curr_migr_unit = current_migr_unit(migr_rec);
+       unsigned long num_migr_units = get_num_migr_units(migr_rec);
        char buffer[20];
        int skipped_disks = 0;
 
@@ -10653,11 +10979,9 @@ int recover_backup_imsm(struct supertype *st, struct mdinfo *info)
        map_dest = get_imsm_map(id->dev, MAP_0);
        new_disks = map_dest->num_members;
 
-       read_offset = (unsigned long long)
-                       __le32_to_cpu(migr_rec->ckpt_area_pba) * 512;
+       read_offset = migr_chkp_area_pba(migr_rec) * 512;
 
-       write_offset = ((unsigned long long)
-                       __le32_to_cpu(migr_rec->dest_1st_member_lba) +
+       write_offset = (migr_dest_1st_member_lba(migr_rec) +
                        pba_of_lba0(map_dest)) * 512;
 
        unit_len = __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
@@ -11117,6 +11441,7 @@ enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
        int imsm_layout = -1;
        int data_disks;
        struct imsm_dev *dev;
+       struct imsm_map *map;
        struct intel_super *super;
        unsigned long long current_size;
        unsigned long long free_size;
@@ -11207,7 +11532,8 @@ enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
 
        super = st->sb;
        dev = get_imsm_dev(super, super->current_vol);
-       data_disks = imsm_num_data_members(dev , MAP_0);
+       map = get_imsm_map(dev, MAP_0);
+       data_disks = imsm_num_data_members(map);
        /* compute current size per disk member
         */
        current_size = info.custom_array_size / data_disks;
@@ -11215,7 +11541,7 @@ enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
        if (geo->size > 0 && geo->size != MAX_SIZE) {
                /* align component size
                 */
-               geo->size = imsm_component_size_aligment_check(
+               geo->size = imsm_component_size_alignment_check(
                                    get_imsm_raid_level(dev->vol.map),
                                    chunk * 1024, super->sector_size,
                                    geo->size * 2);
@@ -11249,7 +11575,7 @@ enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
                        max_size = free_size + current_size;
                        /* align component size
                         */
-                       max_size = imsm_component_size_aligment_check(
+                       max_size = imsm_component_size_alignment_check(
                                        get_imsm_raid_level(dev->vol.map),
                                        chunk * 1024, super->sector_size,
                                        max_size);
@@ -11376,9 +11702,6 @@ static int imsm_reshape_super(struct supertype *st, unsigned long long size,
        dprintf("for level      : %i\n", geo.level);
        dprintf("for raid_disks : %i\n", geo.raid_disks);
 
-       if (experimental() == 0)
-               return ret_val;
-
        if (strcmp(st->container_devnm, st->devnm) == 0) {
                /* On container level we can only increase number of devices. */
                dprintf("imsm: info: Container operation\n");
@@ -11676,7 +11999,7 @@ static int imsm_manage_reshape(
        struct intel_dev *dv;
        unsigned int sector_size = super->sector_size;
        struct imsm_dev *dev = NULL;
-       struct imsm_map *map_src;
+       struct imsm_map *map_src, *map_dest;
        int migr_vol_qan = 0;
        int ndata, odata; /* [bytes] */
        int chunk; /* [bytes] */
@@ -11714,12 +12037,13 @@ static int imsm_manage_reshape(
                goto abort;
        }
 
+       map_dest = get_imsm_map(dev, MAP_0);
        map_src = get_imsm_map(dev, MAP_1);
        if (map_src == NULL)
                goto abort;
 
-       ndata = imsm_num_data_members(dev, MAP_0);
-       odata = imsm_num_data_members(dev, MAP_1);
+       ndata = imsm_num_data_members(map_dest);
+       odata = imsm_num_data_members(map_src);
 
        chunk = __le16_to_cpu(map_src->blocks_per_strip) * 512;
        old_data_stripe_length = odata * chunk;
@@ -11750,7 +12074,7 @@ static int imsm_manage_reshape(
        buf_size = __le32_to_cpu(migr_rec->blocks_per_unit) * 512;
        /* extend  buffer size for parity disk */
        buf_size += __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
-       /* add space for stripe aligment */
+       /* add space for stripe alignment */
        buf_size += old_data_stripe_length;
        if (posix_memalign((void **)&buf, MAX_SECTOR_SIZE, buf_size)) {
                dprintf("imsm: Cannot allocate checkpoint buffer\n");
@@ -11760,12 +12084,12 @@ static int imsm_manage_reshape(
        max_position = sra->component_size * ndata;
        source_layout = imsm_level_to_layout(map_src->raid_level);
 
-       while (__le32_to_cpu(migr_rec->curr_migr_unit) <
-              __le32_to_cpu(migr_rec->num_migr_units)) {
+       while (current_migr_unit(migr_rec) <
+              get_num_migr_units(migr_rec)) {
                /* current reshape position [blocks] */
                unsigned long long current_position =
                        __le32_to_cpu(migr_rec->blocks_per_unit)
-                       * __le32_to_cpu(migr_rec->curr_migr_unit);
+                       * current_migr_unit(migr_rec);
                unsigned long long border;
 
                /* Check that array hasn't become failed.