]> git.ipfire.org Git - thirdparty/mdadm.git/blobdiff - super-intel.c
imsm: use correct map when validating ppl
[thirdparty/mdadm.git] / super-intel.c
index be973f8b9e07c1caa778780b6f911a10424cea58..cf5d8226996a7a5274e5c229387ed8695aa61ee7 100644 (file)
@@ -92,6 +92,9 @@
 #define NUM_BLOCKS_DIRTY_STRIPE_REGION 2056
 #define SECT_PER_MB_SHIFT 11
 #define MAX_SECTOR_SIZE 4096
+#define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
+                                                  * mutliple PPL area
+                                                  */
 
 /* Disk configuration info. */
 #define IMSM_MAX_DEVICES 255
@@ -207,6 +210,9 @@ struct imsm_dev {
 #define RWH_OFF 0
 #define RWH_DISTRIBUTED 1
 #define RWH_JOURNALING_DRIVE 2
+#define RWH_MULTIPLE_DISTRIBUTED 3
+#define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
+#define RWH_MULTIPLE_OFF 5
        __u8  rwh_policy; /* Raid Write Hole Policy */
        __u8  jd_serial[MAX_RAID_SERIAL_LEN]; /* Journal Drive serial number */
        __u8  filler1;
@@ -284,7 +290,7 @@ static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed"
                                 *  already been migrated and must
                                 *  be recovered from checkpoint area */
 
-#define PPL_ENTRY_SPACE (128 * 1024) /* Size of the PPL, without the header */
+#define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
 
 struct migr_record {
        __u32 rec_status;           /* Status used to determine how to restart
@@ -1396,6 +1402,7 @@ int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
        unsigned long long size = 0;
 
        c->min_size = 0;
+       c->sector_size = 0;
 
        if (!super)
                return -EINVAL;
@@ -1419,6 +1426,7 @@ int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
        size += imsm_min_reserved_sectors(super);
 
        c->min_size = size * 512;
+       c->sector_size = super->sector_size;
 
        return 0;
 }
@@ -1480,13 +1488,16 @@ static void print_imsm_dev(struct intel_super *super,
                       ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
        } else
                printf("      This Slot : ?\n");
+       printf("    Sector Size : %u\n", super->sector_size);
        sz = __le32_to_cpu(dev->size_high);
        sz <<= 32;
        sz += __le32_to_cpu(dev->size_low);
-       printf("     Array Size : %llu%s\n", (unsigned long long)sz,
+       printf("     Array Size : %llu%s\n",
+                  (unsigned long long)sz * 512 / super->sector_size,
               human_size(sz * 512));
        sz = blocks_per_member(map);
-       printf("   Per Dev Size : %llu%s\n", (unsigned long long)sz,
+       printf("   Per Dev Size : %llu%s\n",
+                  (unsigned long long)sz * 512 / super->sector_size,
               human_size(sz * 512));
        printf("  Sector Offset : %llu\n",
                pba_of_lba0(map));
@@ -1534,12 +1545,16 @@ static void print_imsm_dev(struct intel_super *super,
        printf("    Dirty State : %s\n", (dev->vol.dirty & RAIDVOL_DIRTY) ?
                                         "dirty" : "clean");
        printf("     RWH Policy : ");
-       if (dev->rwh_policy == RWH_OFF)
+       if (dev->rwh_policy == RWH_OFF || dev->rwh_policy == RWH_MULTIPLE_OFF)
                printf("off\n");
        else if (dev->rwh_policy == RWH_DISTRIBUTED)
                printf("PPL distributed\n");
        else if (dev->rwh_policy == RWH_JOURNALING_DRIVE)
                printf("PPL journaling drive\n");
+       else if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+               printf("Multiple distributed PPLs\n");
+       else if (dev->rwh_policy == RWH_MULTIPLE_PPLS_JOURNALING_DRIVE)
+               printf("Multiple PPLs on journaling drive\n");
        else
                printf("<unknown:%d>\n", dev->rwh_policy);
 }
@@ -3289,10 +3304,16 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
        memset(info->uuid, 0, sizeof(info->uuid));
        info->recovery_start = MaxSector;
 
-       if (info->array.level == 5 && dev->rwh_policy == RWH_DISTRIBUTED) {
+       if (info->array.level == 5 &&
+           (dev->rwh_policy == RWH_DISTRIBUTED ||
+            dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)) {
                info->consistency_policy = CONSISTENCY_POLICY_PPL;
                info->ppl_sector = get_ppl_sector(super, super->current_vol);
-               info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+               if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
+                       info->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
+               else
+                       info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE)
+                                         >> 9;
        } else if (info->array.level <= 0) {
                info->consistency_policy = CONSISTENCY_POLICY_NONE;
        } else {
@@ -4018,7 +4039,7 @@ static void migrate(struct imsm_dev *dev, struct intel_super *super,
 
        /* duplicate and then set the target end state in map[0] */
        memcpy(dest, src, sizeof_imsm_map(src));
-       if (migr_type == MIGR_REBUILD || migr_type ==  MIGR_GEN_MIGR) {
+       if (migr_type == MIGR_GEN_MIGR) {
                __u32 ord;
                int i;
 
@@ -4160,8 +4181,8 @@ int check_mpb_migr_compatibility(struct intel_super *super)
                        if (pba_of_lba0(map0) != pba_of_lba0(map1))
                                /* migration optimization area was used */
                                return -1;
-                       if (migr_rec->ascending_migr == 0
-                               && migr_rec->dest_depth_per_unit > 0)
+                       if (migr_rec->ascending_migr == 0 &&
+                           migr_rec->dest_depth_per_unit > 0)
                                /* descending reshape not supported yet */
                                return -1;
                }
@@ -5385,9 +5406,9 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
        dev->my_vol_raid_dev_num = mpb->num_raid_devs_created;
 
        if (s->consistency_policy <= CONSISTENCY_POLICY_RESYNC) {
-               dev->rwh_policy = RWH_OFF;
+               dev->rwh_policy = RWH_MULTIPLE_OFF;
        } else if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
-               dev->rwh_policy = RWH_DISTRIBUTED;
+               dev->rwh_policy = RWH_MULTIPLE_DISTRIBUTED;
        } else {
                free(dev);
                free(dv);
@@ -6044,7 +6065,12 @@ static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd
        struct ppl_header *ppl_hdr;
        int ret;
 
-       ret = posix_memalign(&buf, 4096, PPL_HEADER_SIZE);
+       /* first clear entire ppl space */
+       ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
+       if (ret)
+               return ret;
+
+       ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
        if (ret) {
                pr_err("Failed to allocate PPL header buffer\n");
                return ret;
@@ -6054,6 +6080,16 @@ static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd
        ppl_hdr = buf;
        memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
        ppl_hdr->signature = __cpu_to_le32(super->anchor->orig_family_num);
+
+       if (info->mismatch_cnt) {
+               /*
+                * We are overwriting an invalid ppl. Make one entry with wrong
+                * checksum to prevent the kernel from skipping resync.
+                */
+               ppl_hdr->entries_count = __cpu_to_le32(1);
+               ppl_hdr->entries[0].checksum = ~0;
+       }
+
        ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
 
        if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
@@ -6083,60 +6119,119 @@ static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
        struct ppl_header *ppl_hdr;
        __u32 crc;
        struct imsm_dev *dev;
-       struct imsm_map *map;
        __u32 idx;
+       unsigned int i;
+       unsigned long long ppl_offset = 0;
+       unsigned long long prev_gen_num = 0;
 
        if (disk->disk.raid_disk < 0)
                return 0;
 
-       if (posix_memalign(&buf, 4096, PPL_HEADER_SIZE)) {
+       if (posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE)) {
                pr_err("Failed to allocate PPL header buffer\n");
                return -1;
        }
 
        dev = get_imsm_dev(super, info->container_member);
-       map = get_imsm_map(dev, MAP_X);
-       idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_X);
+       idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_0);
        d = get_imsm_dl_disk(super, idx);
 
        if (!d || d->index < 0 || is_failed(&d->disk))
                goto out;
 
-       if (lseek64(d->fd, info->ppl_sector * 512, SEEK_SET) < 0) {
-               perror("Failed to seek to PPL header location");
-               ret = -1;
-               goto out;
-       }
+       ret = 1;
+       while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
+               dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
 
-       if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
-               perror("Read PPL header failed");
-               ret = -1;
-               goto out;
-       }
+               if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
+                           SEEK_SET) < 0) {
+                       perror("Failed to seek to PPL header location");
+                       ret = -1;
+                       goto out;
+               }
 
-       ppl_hdr = buf;
+               if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
+                       perror("Read PPL header failed");
+                       ret = -1;
+                       goto out;
+               }
 
-       crc = __le32_to_cpu(ppl_hdr->checksum);
-       ppl_hdr->checksum = 0;
+               ppl_hdr = buf;
 
-       if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
-               dprintf("Wrong PPL header checksum on %s\n",
-                       d->devname);
-               ret = 1;
-       }
+               crc = __le32_to_cpu(ppl_hdr->checksum);
+               ppl_hdr->checksum = 0;
+
+               if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
+                       dprintf("Wrong PPL header checksum on %s\n",
+                               d->devname);
+                       goto out;
+               }
+
+               if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
+                       /* previous was newest, it was already checked */
+                       goto out;
+               }
+
+               if ((__le32_to_cpu(ppl_hdr->signature) !=
+                             super->anchor->orig_family_num)) {
+                       dprintf("Wrong PPL header signature on %s\n",
+                               d->devname);
+                       ret = 1;
+                       goto out;
+               }
 
-       if (!ret && (__le32_to_cpu(ppl_hdr->signature) !=
-                     super->anchor->orig_family_num)) {
-               dprintf("Wrong PPL header signature on %s\n",
-                       d->devname);
-               ret = 1;
+               ret = 0;
+               prev_gen_num = __le64_to_cpu(ppl_hdr->generation);
+
+               ppl_offset += PPL_HEADER_SIZE;
+               for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
+                       ppl_offset +=
+                                  __le32_to_cpu(ppl_hdr->entries[i].pp_size);
        }
 
 out:
        free(buf);
 
-       if (ret == 1 && map->map_state == IMSM_T_STATE_UNINITIALIZED)
-               return st->ss->write_init_ppl(st, info, d->fd);
+       /*
+        * Update metadata to use mutliple PPLs area (1MB).
+        * This is done once for all RAID members
+        */
+       if (info->consistency_policy == CONSISTENCY_POLICY_PPL &&
+           info->ppl_size != (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9)) {
+               char subarray[20];
+               struct mdinfo *member_dev;
+
+               sprintf(subarray, "%d", info->container_member);
+
+               if (mdmon_running(st->container_devnm))
+                       st->update_tail = &st->updates;
+
+               if (st->ss->update_subarray(st, subarray, "ppl", NULL)) {
+                       pr_err("Failed to update subarray %s\n",
+                             subarray);
+               } else {
+                       if (st->update_tail)
+                               flush_metadata_updates(st);
+                       else
+                               st->ss->sync_metadata(st);
+                       info->ppl_size = (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+                       for (member_dev = info->devs; member_dev;
+                            member_dev = member_dev->next)
+                               member_dev->ppl_size =
+                                   (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
+               }
+       }
+
+       if (ret == 1) {
+               struct imsm_map *map = get_imsm_map(dev, MAP_X);
+
+               if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
+                  (map->map_state == IMSM_T_STATE_NORMAL &&
+                  !(dev->vol.dirty & RAIDVOL_DIRTY)))
+                       ret = st->ss->write_init_ppl(st, info, d->fd);
+               else
+                       info->mismatch_cnt++;
+       }
 
        return ret;
 }
@@ -6437,7 +6532,7 @@ active_arrays_by_format(char *name, char* hba, struct md_list **devlist,
 
        for (memb = mdstat ; memb ; memb = memb->next) {
                if (memb->metadata_version &&
-                   (strncmp(memb->metadata_version, "external:", 9) == 0)  &&
+                   (strncmp(memb->metadata_version, "external:", 9) == 0) &&
                    (strcmp(&memb->metadata_version[9], name) == 0) &&
                    !is_subarray(memb->metadata_version+9) &&
                    memb->members) {
@@ -7398,9 +7493,9 @@ static int update_subarray_imsm(struct supertype *st, char *subarray,
                        return 2;
 
                if (strcmp(update, "ppl") == 0)
-                       new_policy = RWH_DISTRIBUTED;
+                       new_policy = RWH_MULTIPLE_DISTRIBUTED;
                else
-                       new_policy = RWH_OFF;
+                       new_policy = RWH_MULTIPLE_OFF;
 
                if (st->update_tail) {
                        struct imsm_update_rwh_policy *u = xmalloc(sizeof(*u));
@@ -7661,10 +7756,12 @@ static struct mdinfo *container_content_imsm(struct supertype *st, char *subarra
                                                map->blocks_per_strip;
                                info_d->ppl_sector = this->ppl_sector;
                                info_d->ppl_size = this->ppl_size;
+                               if (this->consistency_policy == CONSISTENCY_POLICY_PPL &&
+                                   recovery_start == 0)
+                                       this->resync_start = 0;
                        } else {
                                info_d->component_size = blocks_per_member(map);
                        }
-                       info_d->consistency_policy = this->consistency_policy;
 
                        info_d->bb.supported = 1;
                        get_volume_badblocks(super->bbm_log, ord_to_idx(ord),
@@ -7931,14 +8028,35 @@ static void handle_missing(struct intel_super *super, struct imsm_dev *dev)
        /* end process for initialization and rebuild only
         */
        if (is_gen_migration(dev) == 0) {
-               __u8 map_state;
-               int failed;
+               int failed = imsm_count_failed(super, dev, MAP_0);
 
-               failed = imsm_count_failed(super, dev, MAP_0);
-               map_state = imsm_check_degraded(super, dev, failed, MAP_0);
+               if (failed) {
+                       __u8 map_state;
+                       struct imsm_map *map = get_imsm_map(dev, MAP_0);
+                       struct imsm_map *map1;
+                       int i, ord, ord_map1;
+                       int rebuilt = 1;
 
-               if (failed)
-                       end_migration(dev, super, map_state);
+                       for (i = 0; i < map->num_members; i++) {
+                               ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
+                               if (!(ord & IMSM_ORD_REBUILD))
+                                       continue;
+
+                               map1 = get_imsm_map(dev, MAP_1);
+                               if (!map1)
+                                       continue;
+
+                               ord_map1 = __le32_to_cpu(map1->disk_ord_tbl[i]);
+                               if (ord_map1 & IMSM_ORD_REBUILD)
+                                       rebuilt = 0;
+                       }
+
+                       if (rebuilt) {
+                               map_state = imsm_check_degraded(super, dev,
+                                                               failed, MAP_0);
+                               end_migration(dev, super, map_state);
+                       }
+               }
        }
        for (dl = super->missing; dl; dl = dl->next)
                mark_missing(super, dev, &dl->disk, dl->index);
@@ -8180,7 +8298,8 @@ skip_mark_checkpoint:
                        dev->vol.dirty = RAIDVOL_CLEAN;
                } else {
                        dev->vol.dirty = RAIDVOL_DIRTY;
-                       if (dev->rwh_policy == RWH_DISTRIBUTED)
+                       if (dev->rwh_policy == RWH_DISTRIBUTED ||
+                           dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
                                dev->vol.dirty |= RAIDVOL_DSRECORD_VALID;
                }
                super->updates_pending++;
@@ -8220,8 +8339,10 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
        int failed;
        int ord;
        __u8 map_state;
+       int rebuild_done = 0;
+       int i;
 
-       ord = imsm_disk_slot_to_ord(a, n);
+       ord = get_imsm_ord_tbl_ent(dev, n, MAP_X);
        if (ord < 0)
                return;
 
@@ -8239,6 +8360,7 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
                struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
 
                set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
+               rebuild_done = 1;
                super->updates_pending++;
        }
 
@@ -8301,7 +8423,39 @@ static void imsm_set_disk(struct active_array *a, int n, int state)
                                dprintf_cont(" Map state change");
                                end_migration(dev, super, map_state);
                                super->updates_pending++;
+                       } else if (!rebuild_done) {
+                               break;
                        }
+
+                       /* check if recovery is really finished */
+                       for (mdi = a->info.devs; mdi ; mdi = mdi->next)
+                               if (mdi->recovery_start != MaxSector) {
+                                       recovery_not_finished = 1;
+                                       break;
+                               }
+                       if (recovery_not_finished) {
+                               dprintf_cont("\n");
+                               dprintf("Rebuild has not finished yet, state not changed");
+                               if (a->last_checkpoint < mdi->recovery_start) {
+                                       a->last_checkpoint =
+                                               mdi->recovery_start;
+                                       super->updates_pending++;
+                               }
+                               break;
+                       }
+
+                       dprintf_cont(" Rebuild done, still degraded");
+                       dev->vol.migr_state = 0;
+                       set_migr_type(dev, 0);
+                       dev->vol.curr_migr_unit = 0;
+
+                       for (i = 0; i < map->num_members; i++) {
+                               int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
+
+                               if (idx & IMSM_ORD_REBUILD)
+                                       map->failed_disk_num = i;
+                       }
+                       super->updates_pending++;
                        break;
                }
                if (is_gen_migration(dev)) {
@@ -8697,10 +8851,9 @@ static struct mdinfo *imsm_activate_spare(struct active_array *a,
                di->component_size = a->info.component_size;
                di->container_member = inst;
                di->bb.supported = 1;
-               if (dev->rwh_policy == RWH_DISTRIBUTED) {
-                       di->consistency_policy = CONSISTENCY_POLICY_PPL;
+               if (a->info.consistency_policy == CONSISTENCY_POLICY_PPL) {
                        di->ppl_sector = get_ppl_sector(super, inst);
-                       di->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE) >> 9;
+                       di->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
                }
                super->random = random32();
                di->next = rv;
@@ -9931,7 +10084,7 @@ static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned ind
        struct imsm_dev *dev;
        struct imsm_map *map;
        unsigned int i, j, num_members;
-       __u32 ord;
+       __u32 ord, ord_map0;
        struct bbm_log *log = super->bbm_log;
 
        dprintf("deleting device[%d] from imsm_super\n", index);
@@ -9953,12 +10106,13 @@ static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned ind
                         * ord-flags to the first map
                         */
                        ord = get_imsm_ord_tbl_ent(dev, j, MAP_X);
+                       ord_map0 = get_imsm_ord_tbl_ent(dev, j, MAP_0);
 
                        if (ord_to_idx(ord) <= index)
                                continue;
 
                        map = get_imsm_map(dev, MAP_0);
-                       set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1));
+                       set_imsm_ord_tbl_ent(map, j, ord_map0 - 1);
                        map = get_imsm_map(dev, MAP_1);
                        if (map)
                                set_imsm_ord_tbl_ent(map, j, ord - 1);
@@ -10687,6 +10841,10 @@ static const char *imsm_get_disk_controller_domain(const char *path)
                        drv = "isci";
                else if (hba && hba->type == SYS_DEV_SATA)
                        drv = "ahci";
+               else if (hba && hba->type == SYS_DEV_VMD)
+                       drv = "vmd";
+               else if (hba && hba->type == SYS_DEV_NVME)
+                       drv = "nvme";
                else
                        drv = "unknown";
                dprintf("path: %s hba: %s attached: %s\n",
@@ -10869,8 +11027,7 @@ static int imsm_create_metadata_update_for_reshape(
         */
        spares = get_spares_for_grow(st);
 
-       if (spares == NULL
-           || delta_disks > spares->array.spare_disks) {
+       if (spares == NULL || delta_disks > spares->array.spare_disks) {
                pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo->dev_name);
                i = -1;
                goto abort;
@@ -11635,8 +11792,8 @@ static int imsm_manage_reshape(
 
        /* Find volume during the reshape */
        for (dv = super->devlist; dv; dv = dv->next) {
-               if (dv->dev->vol.migr_type == MIGR_GEN_MIGR
-                   && dv->dev->vol.migr_state == 1) {
+               if (dv->dev->vol.migr_type == MIGR_GEN_MIGR &&
+                   dv->dev->vol.migr_state == 1) {
                        dev = dv->dev;
                        migr_vol_qan++;
                }