const unsigned char *buf,
unsigned len);
+#define DDF_NOTFOUND (~0U)
+#define DDF_CONTAINER (DDF_NOTFOUND-1)
+
/* The DDF metadata handling.
* DDF metadata lives at the end of the device.
* The last 512 byte block provides an 'anchor' which is used to locate
/*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
bvd are always the same size */
};
+#define LBA_OFFSET(ddf, vd) ((__u64 *) &(vd)->phys_refnum[(ddf)->mppe])
/* vd_config.cache_pol[7] is a bitmap */
#define DDF_cache_writeback 1 /* else writethrough */
char space[512];
struct {
struct vcl *next;
- __u64 *lba_offset; /* location in 'conf' of
- * the lba table */
unsigned int vcnum; /* index into ->virt */
+ struct vd_config **other_bvds;
__u64 *block_sizes; /* NULL if all the same */
};
};
char *devname;
int fd;
unsigned long long size; /* sectors */
+ unsigned long long primary_lba; /* sectors */
+ unsigned long long secondary_lba; /* sectors */
+ unsigned long long workspace_lba; /* sectors */
int pdnum; /* index in ->phys */
struct spare_assign *spare;
void *mdupdate; /* hold metadata update */
#define offsetof(t,f) ((size_t)&(((t*)0)->f))
#endif
+#if DEBUG
+static int all_ff(const char *guid);
+static void pr_state(struct ddf_super *ddf, const char *msg)
+{
+ unsigned int i;
+ dprintf("%s/%s: ", __func__, msg);
+ for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
+ if (all_ff(ddf->virt->entries[i].guid))
+ continue;
+ dprintf("%u(s=%02x i=%02x) ", i,
+ ddf->virt->entries[i].state,
+ ddf->virt->entries[i].init_state);
+ }
+ dprintf("\n");
+}
+#else
+static void pr_state(const struct ddf_super *ddf, const char *msg) {}
+#endif
+
+#define ddf_set_updates_pending(x) \
+ do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
+
+static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
+ __u32 refnum, unsigned int nmax,
+ const struct vd_config **bvd,
+ unsigned int *idx);
static unsigned int calc_crc(void *buf, int len)
{
return __cpu_to_be32(newcrc);
}
+#define DDF_INVALID_LEVEL 0xff
+#define DDF_NO_SECONDARY 0xff
+static int err_bad_md_layout(const mdu_array_info_t *array)
+{
+ pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
+ array->level, array->layout, array->raid_disks);
+ return DDF_INVALID_LEVEL;
+}
+
+static int layout_md2ddf(const mdu_array_info_t *array,
+ struct vd_config *conf)
+{
+ __u16 prim_elmnt_count = __cpu_to_be16(array->raid_disks);
+ __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
+ __u8 sec_elmnt_count = 1;
+ __u8 srl = DDF_NO_SECONDARY;
+
+ switch (array->level) {
+ case LEVEL_LINEAR:
+ prl = DDF_CONCAT;
+ break;
+ case 0:
+ rlq = DDF_RAID0_SIMPLE;
+ prl = DDF_RAID0;
+ break;
+ case 1:
+ switch (array->raid_disks) {
+ case 2:
+ rlq = DDF_RAID1_SIMPLE;
+ break;
+ case 3:
+ rlq = DDF_RAID1_MULTI;
+ break;
+ default:
+ return err_bad_md_layout(array);
+ }
+ prl = DDF_RAID1;
+ break;
+ case 4:
+ if (array->layout != 0)
+ return err_bad_md_layout(array);
+ rlq = DDF_RAID4_N;
+ prl = DDF_RAID4;
+ break;
+ case 5:
+ switch (array->layout) {
+ case ALGORITHM_LEFT_ASYMMETRIC:
+ rlq = DDF_RAID5_N_RESTART;
+ break;
+ case ALGORITHM_RIGHT_ASYMMETRIC:
+ rlq = DDF_RAID5_0_RESTART;
+ break;
+ case ALGORITHM_LEFT_SYMMETRIC:
+ rlq = DDF_RAID5_N_CONTINUE;
+ break;
+ case ALGORITHM_RIGHT_SYMMETRIC:
+ /* not mentioned in standard */
+ default:
+ return err_bad_md_layout(array);
+ }
+ prl = DDF_RAID5;
+ break;
+ case 6:
+ switch (array->layout) {
+ case ALGORITHM_ROTATING_N_RESTART:
+ rlq = DDF_RAID5_N_RESTART;
+ break;
+ case ALGORITHM_ROTATING_ZERO_RESTART:
+ rlq = DDF_RAID6_0_RESTART;
+ break;
+ case ALGORITHM_ROTATING_N_CONTINUE:
+ rlq = DDF_RAID5_N_CONTINUE;
+ break;
+ default:
+ return err_bad_md_layout(array);
+ }
+ prl = DDF_RAID6;
+ break;
+ case 10:
+ if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
+ rlq = DDF_RAID1_SIMPLE;
+ prim_elmnt_count = __cpu_to_be16(2);
+ sec_elmnt_count = array->raid_disks / 2;
+ } else if (array->raid_disks % 3 == 0
+ && array->layout == 0x103) {
+ rlq = DDF_RAID1_MULTI;
+ prim_elmnt_count = __cpu_to_be16(3);
+ sec_elmnt_count = array->raid_disks / 3;
+ } else
+ return err_bad_md_layout(array);
+ srl = DDF_2SPANNED;
+ prl = DDF_RAID1;
+ break;
+ default:
+ return err_bad_md_layout(array);
+ }
+ conf->prl = prl;
+ conf->prim_elmnt_count = prim_elmnt_count;
+ conf->rlq = rlq;
+ conf->srl = srl;
+ conf->sec_elmnt_count = sec_elmnt_count;
+ return 0;
+}
+
+static int err_bad_ddf_layout(const struct vd_config *conf)
+{
+ pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
+ conf->prl, conf->rlq, __be16_to_cpu(conf->prim_elmnt_count));
+ return -1;
+}
+
+static int layout_ddf2md(const struct vd_config *conf,
+ mdu_array_info_t *array)
+{
+ int level = LEVEL_UNSUPPORTED;
+ int layout = 0;
+ int raiddisks = __be16_to_cpu(conf->prim_elmnt_count);
+
+ if (conf->sec_elmnt_count > 1) {
+ /* see also check_secondary() */
+ if (conf->prl != DDF_RAID1 ||
+ (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
+ pr_err("Unsupported secondary RAID level %u/%u\n",
+ conf->prl, conf->srl);
+ return -1;
+ }
+ if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
+ layout = 0x102;
+ else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
+ layout = 0x103;
+ else
+ return err_bad_ddf_layout(conf);
+ raiddisks *= conf->sec_elmnt_count;
+ level = 10;
+ goto good;
+ }
+
+ switch (conf->prl) {
+ case DDF_CONCAT:
+ level = LEVEL_LINEAR;
+ break;
+ case DDF_RAID0:
+ if (conf->rlq != DDF_RAID0_SIMPLE)
+ return err_bad_ddf_layout(conf);
+ level = 0;
+ break;
+ case DDF_RAID1:
+ if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
+ (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
+ return err_bad_ddf_layout(conf);
+ level = 1;
+ break;
+ case DDF_RAID4:
+ if (conf->rlq != DDF_RAID4_N)
+ return err_bad_ddf_layout(conf);
+ level = 4;
+ break;
+ case DDF_RAID5:
+ switch (conf->rlq) {
+ case DDF_RAID5_N_RESTART:
+ layout = ALGORITHM_LEFT_ASYMMETRIC;
+ break;
+ case DDF_RAID5_0_RESTART:
+ layout = ALGORITHM_RIGHT_ASYMMETRIC;
+ break;
+ case DDF_RAID5_N_CONTINUE:
+ layout = ALGORITHM_LEFT_SYMMETRIC;
+ break;
+ default:
+ return err_bad_ddf_layout(conf);
+ }
+ level = 5;
+ break;
+ case DDF_RAID6:
+ switch (conf->rlq) {
+ case DDF_RAID5_N_RESTART:
+ layout = ALGORITHM_ROTATING_N_RESTART;
+ break;
+ case DDF_RAID6_0_RESTART:
+ layout = ALGORITHM_ROTATING_ZERO_RESTART;
+ break;
+ case DDF_RAID5_N_CONTINUE:
+ layout = ALGORITHM_ROTATING_N_CONTINUE;
+ break;
+ default:
+ return err_bad_ddf_layout(conf);
+ }
+ level = 6;
+ break;
+ default:
+ return err_bad_ddf_layout(conf);
+ };
+
+good:
+ array->level = level;
+ array->layout = layout;
+ array->raid_disks = raiddisks;
+ return 0;
+}
+
static int load_ddf_header(int fd, unsigned long long lba,
unsigned long long size,
int type,
if (lseek64(fd, dsize-512, 0) < 0) {
if (devname)
- fprintf(stderr,
- Name": Cannot seek to anchor block on %s: %s\n",
- devname, strerror(errno));
+ pr_err("Cannot seek to anchor block on %s: %s\n",
+ devname, strerror(errno));
return 1;
}
if (read(fd, &super->anchor, 512) != 512) {
if (devname)
- fprintf(stderr,
- Name ": Cannot read anchor block on %s: %s\n",
- devname, strerror(errno));
+ pr_err("Cannot read anchor block on %s: %s\n",
+ devname, strerror(errno));
return 1;
}
if (super->anchor.magic != DDF_HEADER_MAGIC) {
if (devname)
- fprintf(stderr, Name ": no DDF anchor found on %s\n",
+ pr_err("no DDF anchor found on %s\n",
devname);
return 2;
}
if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
if (devname)
- fprintf(stderr, Name ": bad CRC on anchor on %s\n",
+ pr_err("bad CRC on anchor on %s\n",
devname);
return 2;
}
if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
if (devname)
- fprintf(stderr, Name ": can only support super revision"
+ pr_err("can only support super revision"
" %.8s and earlier, not %.8s on %s\n",
DDF_REVISION_2, super->anchor.revision,devname);
return 2;
}
+ super->active = NULL;
if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
dsize >> 9, 1,
&super->primary, &super->anchor) == 0) {
if (devname)
- fprintf(stderr,
- Name ": Failed to load primary DDF header "
- "on %s\n", devname);
- return 2;
- }
- super->active = &super->primary;
+ pr_err("Failed to load primary DDF header "
+ "on %s\n", devname);
+ } else
+ super->active = &super->primary;
if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
dsize >> 9, 2,
&super->secondary, &super->anchor)) {
|| (__be32_to_cpu(super->primary.seq)
== __be32_to_cpu(super->secondary.seq) &&
super->primary.openflag && !super->secondary.openflag)
+ || super->active == NULL
)
super->active = &super->secondary;
- }
+ } else if (devname)
+ pr_err("Failed to load secondary DDF header on %s\n",
+ devname);
+ if (super->active == NULL)
+ return 2;
return 0;
}
return 0;
}
+#define DDF_UNUSED_BVD 0xff
+static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
+{
+ unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
+ unsigned int i, vdsize;
+ void *p;
+ if (n_vds == 0) {
+ vcl->other_bvds = NULL;
+ return 0;
+ }
+ vdsize = ddf->conf_rec_len * 512;
+ if (posix_memalign(&p, 512, n_vds *
+ (vdsize + sizeof(struct vd_config *))) != 0)
+ return -1;
+ vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
+ for (i = 0; i < n_vds; i++) {
+ vcl->other_bvds[i] = p + i * vdsize;
+ memset(vcl->other_bvds[i], 0, vdsize);
+ vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
+ }
+ return 0;
+}
+
+static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
+ unsigned int len)
+{
+ int i;
+ for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
+ if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
+ break;
+
+ if (i < vcl->conf.sec_elmnt_count-1) {
+ if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
+ return;
+ } else {
+ for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
+ if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
+ break;
+ if (i == vcl->conf.sec_elmnt_count-1) {
+ pr_err("no space for sec level config %u, count is %u\n",
+ vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
+ return;
+ }
+ }
+ memcpy(vcl->other_bvds[i], vd, len);
+}
+
static int load_ddf_local(int fd, struct ddf_super *super,
char *devname, int keep)
{
if (posix_memalign((void**)&dl, 512,
sizeof(*dl) +
(super->max_part) * sizeof(dl->vlist[0])) != 0) {
- fprintf(stderr, Name ": %s could not allocate disk info buffer\n",
+ pr_err("%s could not allocate disk info buffer\n",
__func__);
return 1;
}
super->active->data_section_offset,
super->active->data_section_length,
0);
- dl->devname = devname ? strdup(devname) : NULL;
+ dl->devname = devname ? xstrdup(devname) : NULL;
fstat(fd, &stb);
dl->major = major(stb.st_rdev);
dl->size = 0;
if (get_dev_size(fd, devname, &dsize))
dl->size = dsize >> 9;
+ /* If the disks have different sizes, the LBAs will differ
+ * between phys disks.
+ * At this point here, the values in super->active must be valid
+ * for this phys disk. */
+ dl->primary_lba = super->active->primary_lba;
+ dl->secondary_lba = super->active->secondary_lba;
+ dl->workspace_lba = super->active->workspace_lba;
dl->spare = NULL;
for (i = 0 ; i < super->max_part ; i++)
dl->vlist[i] = NULL;
continue;
if (posix_memalign((void**)&dl->spare, 512,
super->conf_rec_len*512) != 0) {
- fprintf(stderr, Name
- ": %s could not allocate spare info buf\n",
- __func__);
+ pr_err("%s could not allocate spare info buf\n",
+ __func__);
return 1;
}
-
+
memcpy(dl->spare, vd, super->conf_rec_len*512);
continue;
}
if (vcl) {
dl->vlist[vnum++] = vcl;
+ if (vcl->other_bvds != NULL &&
+ vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
+ add_other_bvd(vcl, vd, super->conf_rec_len*512);
+ continue;
+ }
if (__be32_to_cpu(vd->seqnum) <=
__be32_to_cpu(vcl->conf.seqnum))
continue;
if (posix_memalign((void**)&vcl, 512,
(super->conf_rec_len*512 +
offsetof(struct vcl, conf))) != 0) {
- fprintf(stderr, Name
- ": %s could not allocate vcl buf\n",
- __func__);
+ pr_err("%s could not allocate vcl buf\n",
+ __func__);
return 1;
}
vcl->next = super->conflist;
vcl->block_sizes = NULL; /* FIXME not for CONCAT */
+ vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
+ if (alloc_other_bvds(super, vcl) != 0) {
+ pr_err("%s could not allocate other bvds\n",
+ __func__);
+ free(vcl);
+ return 1;
+ };
super->conflist = vcl;
dl->vlist[vnum++] = vcl;
}
memcpy(&vcl->conf, vd, super->conf_rec_len*512);
- vcl->lba_offset = (__u64*)
- &vcl->conf.phys_refnum[super->mppe];
-
for (i=0; i < max_virt_disks ; i++)
if (memcmp(super->virt->entries[i].guid,
vcl->conf.guid, DDF_GUID_LEN)==0)
#ifndef MDASSEMBLE
static int load_super_ddf_all(struct supertype *st, int fd,
- void **sbp, char *devname, int keep_fd);
+ void **sbp, char *devname);
#endif
static void free_super_ddf(struct supertype *st);
struct ddf_super *super;
int rv;
-#ifndef MDASSEMBLE
- /* if 'fd' is a container, load metadata from all the devices */
- if (load_super_ddf_all(st, fd, &st->sb, devname, 1) == 0)
- return 0;
-#endif
- if (st->subarray[0])
- return 1; /* FIXME Is this correct */
-
if (get_dev_size(fd, devname, &dsize) == 0)
return 1;
- if (test_partition(fd))
+ if (!st->ignore_hw_compat && test_partition(fd))
/* DDF is not allowed on partitions */
return 1;
/* 32M is a lower bound */
if (dsize <= 32*1024*1024) {
if (devname)
- fprintf(stderr,
- Name ": %s is too small for ddf: "
- "size is %llu sectors.\n",
- devname, dsize>>9);
+ pr_err("%s is too small for ddf: "
+ "size is %llu sectors.\n",
+ devname, dsize>>9);
return 1;
}
if (dsize & 511) {
if (devname)
- fprintf(stderr,
- Name ": %s is an odd size for ddf: "
- "size is %llu bytes.\n",
- devname, dsize);
+ pr_err("%s is an odd size for ddf: "
+ "size is %llu bytes.\n",
+ devname, dsize);
return 1;
}
free_super_ddf(st);
if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
- fprintf(stderr, Name ": malloc of %zu failed.\n",
+ pr_err("malloc of %zu failed.\n",
sizeof(*super));
return 1;
}
if (rv) {
if (devname)
- fprintf(stderr,
- Name ": Failed to load all information "
- "sections on %s\n", devname);
+ pr_err("Failed to load all information "
+ "sections on %s\n", devname);
free(super);
return rv;
}
if (rv) {
if (devname)
- fprintf(stderr,
- Name ": Failed to load all information "
- "sections on %s\n", devname);
+ pr_err("Failed to load all information "
+ "sections on %s\n", devname);
free(super);
return rv;
}
- if (st->subarray[0]) {
- unsigned long val;
- struct vcl *v;
- char *ep;
-
- val = strtoul(st->subarray, &ep, 10);
- if (*ep != '\0') {
- free(super);
- return 1;
- }
-
- for (v = super->conflist; v; v = v->next)
- if (v->vcnum == val)
- super->currentconf = v;
- if (!super->currentconf) {
- free(super);
- return 1;
- }
- }
-
/* Should possibly check the sections .... */
st->sb = super;
st->minor_version = 0;
st->max_devs = 512;
}
- st->loaded_container = 0;
return 0;
}
ddf->conflist = v->next;
if (v->block_sizes)
free(v->block_sizes);
+ if (v->other_bvds)
+ /*
+ v->other_bvds[0] points to beginning of buffer,
+ see alloc_other_bvds()
+ */
+ free(v->other_bvds[0]);
free(v);
}
while (ddf->dlist) {
)
return NULL;
- st = malloc(sizeof(*st));
- memset(st, 0, sizeof(*st));
+ st = xcalloc(1, sizeof(*st));
st->ss = &super_ddf;
st->max_devs = 512;
st->minor_version = 0;
return st;
}
-
#ifndef MDASSEMBLE
static mapping_t ddf_state[] = {
};
#endif
-struct num_mapping {
- int num1, num2;
-};
-static struct num_mapping ddf_level_num[] = {
- { DDF_RAID0, 0 },
- { DDF_RAID1, 1 },
- { DDF_RAID3, LEVEL_UNSUPPORTED },
- { DDF_RAID4, 4 },
- { DDF_RAID5, 5 },
- { DDF_RAID1E, LEVEL_UNSUPPORTED },
- { DDF_JBOD, LEVEL_UNSUPPORTED },
- { DDF_CONCAT, LEVEL_LINEAR },
- { DDF_RAID5E, LEVEL_UNSUPPORTED },
- { DDF_RAID5EE, LEVEL_UNSUPPORTED },
- { DDF_RAID6, 6},
- { MAXINT, MAXINT }
-};
-
-static int map_num1(struct num_mapping *map, int num)
-{
- int i;
- for (i=0 ; map[i].num1 != MAXINT; i++)
- if (map[i].num1 == num)
- break;
- return map[i].num2;
-}
-
-static int all_ff(char *guid)
+static int all_ff(const char *guid)
{
int i;
for (i = 0; i < DDF_GUID_LEN; i++)
printf(")");
}
+static const char *guid_str(const char *guid)
+{
+ static char buf[DDF_GUID_LEN*2+1];
+ int i;
+ char *p = buf;
+ for (i = 0; i < DDF_GUID_LEN; i++)
+ p += sprintf(p, "%02x", (unsigned char)guid[i]);
+ *p = '\0';
+ return (const char *) buf;
+}
+
static void examine_vd(int n, struct ddf_super *sb, char *guid)
{
int crl = sb->conf_rec_len;
}
printf(")\n");
if (vc->chunk_shift != 255)
- printf(" Chunk Size[%d] : %d sectors\n", n,
- 1 << vc->chunk_shift);
+ printf(" Chunk Size[%d] : %d sectors\n", n,
+ 1 << vc->chunk_shift);
printf(" Raid Level[%d] : %s\n", n,
map_num(ddf_level, vc->prl)?:"-unknown-");
if (vc->sec_elmnt_count != 1) {
static void examine_vds(struct ddf_super *sb)
{
int cnt = __be16_to_cpu(sb->virt->populated_vdes);
- int i;
+ unsigned int i;
printf(" Virtual Disks : %d\n", cnt);
- for (i=0; i<cnt; i++) {
+ for (i = 0; i < __be16_to_cpu(sb->virt->max_vdes); i++) {
struct virtual_entry *ve = &sb->virt->entries[i];
+ if (all_ff(ve->guid))
+ continue;
printf("\n");
printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
printf("\n");
//printf("\n");
printf(" %3d %08x ", i,
__be32_to_cpu(pd->refnum));
- printf("%8lluK ",
+ printf("%8lluK ",
(unsigned long long)__be64_to_cpu(pd->config_size)>>1);
for (dl = sb->dlist; dl ; dl = dl->next) {
if (dl->disk.refnum == pd->refnum) {
examine_pds(sb);
}
-static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info);
+static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
+static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
+static unsigned int get_vd_num_of_subarray(struct supertype *st)
+{
+ /*
+ * Figure out the VD number for this supertype.
+ * Returns DDF_CONTAINER for the container itself,
+ * and DDF_NOTFOUND on error.
+ */
+ struct ddf_super *ddf = st->sb;
+ struct mdinfo *sra;
+ char *sub, *end;
+ unsigned int vcnum;
+
+ if (*st->container_devnm == '\0')
+ return DDF_CONTAINER;
+
+ sra = sysfs_read(-1, st->devnm, GET_VERSION);
+ if (!sra || sra->array.major_version != -1 ||
+ sra->array.minor_version != -2 ||
+ !is_subarray(sra->text_version))
+ return DDF_NOTFOUND;
+
+ sub = strchr(sra->text_version + 1, '/');
+ if (sub != NULL)
+ vcnum = strtoul(sub + 1, &end, 10);
+ if (sub == NULL || *sub == '\0' || *end != '\0' ||
+ vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
+ return DDF_NOTFOUND;
+
+ return vcnum;
+}
+
static void brief_examine_super_ddf(struct supertype *st, int verbose)
{
/* We just write a generic DDF ARRAY entry
*/
struct mdinfo info;
char nbuf[64];
- getinfo_super_ddf(st, &info);
+ getinfo_super_ddf(st, &info, NULL);
fname_from_uuid(st, &info, nbuf, ':');
printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
struct mdinfo info;
unsigned int i;
char nbuf[64];
- getinfo_super_ddf(st, &info);
+ getinfo_super_ddf(st, &info, NULL);
fname_from_uuid(st, &info, nbuf, ':');
for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
{
struct mdinfo info;
char nbuf[64];
- getinfo_super_ddf(st, &info);
+ getinfo_super_ddf(st, &info, NULL);
fname_from_uuid(st, &info, nbuf, ':');
printf("MD_METADATA=ddf\n");
printf("MD_LEVEL=container\n");
printf("MD_UUID=%s\n", nbuf+5);
}
-
+
+static int copy_metadata_ddf(struct supertype *st, int from, int to)
+{
+ void *buf;
+ unsigned long long dsize, offset;
+ int bytes;
+ struct ddf_header *ddf;
+ int written = 0;
+
+ /* The meta consists of an anchor, a primary, and a secondary.
+ * This all lives at the end of the device.
+ * So it is easiest to find the earliest of primary and
+ * secondary, and copy everything from there.
+ *
+ * Anchor is 512 from end It contains primary_lba and secondary_lba
+ * we choose one of those
+ */
+
+ if (posix_memalign(&buf, 4096, 4096) != 0)
+ return 1;
+
+ if (!get_dev_size(from, NULL, &dsize))
+ goto err;
+
+ if (lseek64(from, dsize-512, 0) < 0)
+ goto err;
+ if (read(from, buf, 512) != 512)
+ goto err;
+ ddf = buf;
+ if (ddf->magic != DDF_HEADER_MAGIC ||
+ calc_crc(ddf, 512) != ddf->crc ||
+ (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
+ memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
+ goto err;
+
+ offset = dsize - 512;
+ if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
+ offset = __be64_to_cpu(ddf->primary_lba) << 9;
+ if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
+ offset = __be64_to_cpu(ddf->secondary_lba) << 9;
+
+ bytes = dsize - offset;
+
+ if (lseek64(from, offset, 0) < 0 ||
+ lseek64(to, offset, 0) < 0)
+ goto err;
+ while (written < bytes) {
+ int n = bytes - written;
+ if (n > 4096)
+ n = 4096;
+ if (read(from, buf, n) != n)
+ goto err;
+ if (write(to, buf, n) != n)
+ goto err;
+ written += n;
+ }
+ free(buf);
+ return 0;
+err:
+ free(buf);
+ return 1;
+}
static void detail_super_ddf(struct supertype *st, char *homehost)
{
static void brief_detail_super_ddf(struct supertype *st)
{
- /* FIXME I really need to know which array we are detailing.
- * Can that be stored in ddf_super??
- */
-// struct ddf_super *ddf = st->sb;
struct mdinfo info;
char nbuf[64];
- getinfo_super_ddf(st, &info);
+ struct ddf_super *ddf = st->sb;
+ unsigned int vcnum = get_vd_num_of_subarray(st);
+ if (vcnum == DDF_CONTAINER)
+ uuid_from_super_ddf(st, info.uuid);
+ else if (vcnum == DDF_NOTFOUND)
+ return;
+ else
+ uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
fname_from_uuid(st, &info, nbuf,':');
printf(" UUID=%s", nbuf + 5);
}
}
#ifndef MDASSEMBLE
-static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst)
+static int find_index_in_bvd(const struct ddf_super *ddf,
+ const struct vd_config *conf, unsigned int n,
+ unsigned int *n_bvd)
+{
+ /*
+ * Find the index of the n-th valid physical disk in this BVD
+ */
+ unsigned int i, j;
+ for (i = 0, j = 0; i < ddf->mppe &&
+ j < __be16_to_cpu(conf->prim_elmnt_count); i++) {
+ if (conf->phys_refnum[i] != 0xffffffff) {
+ if (n == j) {
+ *n_bvd = i;
+ return 1;
+ }
+ j++;
+ }
+ }
+ dprintf("%s: couldn't find BVD member %u (total %u)\n",
+ __func__, n, __be16_to_cpu(conf->prim_elmnt_count));
+ return 0;
+}
+
+static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
+ unsigned int n,
+ unsigned int *n_bvd, struct vcl **vcl)
{
struct vcl *v;
- for (v = ddf->conflist; v; v = v->next)
- if (inst == v->vcnum)
- return &v->conf;
+ for (v = ddf->conflist; v; v = v->next) {
+ unsigned int nsec, ibvd;
+ struct vd_config *conf;
+ if (inst != v->vcnum)
+ continue;
+ conf = &v->conf;
+ if (conf->sec_elmnt_count == 1) {
+ if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
+ *vcl = v;
+ return conf;
+ } else
+ goto bad;
+ }
+ if (v->other_bvds == NULL) {
+ pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
+ __func__, conf->sec_elmnt_count);
+ goto bad;
+ }
+ nsec = n / __be16_to_cpu(conf->prim_elmnt_count);
+ if (conf->sec_elmnt_seq != nsec) {
+ for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
+ if (v->other_bvds[ibvd-1]->sec_elmnt_seq
+ == nsec)
+ break;
+ }
+ if (ibvd == conf->sec_elmnt_count)
+ goto bad;
+ conf = v->other_bvds[ibvd-1];
+ }
+ if (!find_index_in_bvd(ddf, conf,
+ n - nsec*conf->sec_elmnt_count, n_bvd))
+ goto bad;
+ dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
+ , __func__, n, *n_bvd, ibvd-1, inst);
+ *vcl = v;
+ return conf;
+ }
+bad:
+ pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
return NULL;
}
#endif
-static int find_phys(struct ddf_super *ddf, __u32 phys_refnum)
+static int find_phys(const struct ddf_super *ddf, __u32 phys_refnum)
{
/* Find the entry in phys_disk which has the given refnum
* and return it's index
return -1;
}
+static void uuid_from_ddf_guid(const char *guid, int uuid[4])
+{
+ char buf[20];
+ struct sha1_ctx ctx;
+ sha1_init_ctx(&ctx);
+ sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
+ sha1_finish_ctx(&ctx, buf);
+ memcpy(uuid, buf, 4*4);
+}
+
static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
{
/* The uuid returned here is used for:
* not the device-set.
* uuid to recognise same set when adding a missing device back
* to an array. This is a uuid for the device-set.
- *
+ *
* For each of these we can make do with a truncated
* or hashed uuid rather than the original, as long as
* everyone agrees.
struct ddf_super *ddf = st->sb;
struct vcl *vcl = ddf->currentconf;
char *guid;
- char buf[20];
- struct sha1_ctx ctx;
if (vcl)
guid = vcl->conf.guid;
else
guid = ddf->anchor.guid;
-
- sha1_init_ctx(&ctx);
- sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
- sha1_finish_ctx(&ctx, buf);
- memcpy(uuid, buf, 4*4);
+ uuid_from_ddf_guid(guid, uuid);
}
-static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info);
+static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
-static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info)
+static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
{
struct ddf_super *ddf = st->sb;
+ int map_disks = info->array.raid_disks;
+ __u32 *cptr;
if (ddf->currentconf) {
- getinfo_super_ddf_bvd(st, info);
+ getinfo_super_ddf_bvd(st, info, map);
return;
}
+ memset(info, 0, sizeof(*info));
info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
info->array.level = LEVEL_CONTAINER;
info->array.layout = 0;
info->array.md_minor = -1;
- info->array.ctime = DECADE + __be32_to_cpu(*(__u32*)
- (ddf->anchor.guid+16));
+ cptr = (__u32 *)(ddf->anchor.guid + 16);
+ info->array.ctime = DECADE + __be32_to_cpu(*cptr);
+
info->array.utime = 0;
info->array.chunk_size = 0;
info->container_enough = 1;
-
info->disk.major = 0;
info->disk.minor = 0;
if (ddf->dlist) {
info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
info->data_offset = __be64_to_cpu(ddf->phys->
- entries[info->disk.raid_disk].
- config_size);
+ entries[info->disk.raid_disk].
+ config_size);
info->component_size = ddf->dlist->size - info->data_offset;
} else {
info->disk.number = -1;
}
info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
-
info->recovery_start = MaxSector;
info->reshape_active = 0;
+ info->recovery_blocked = 0;
info->name[0] = 0;
info->array.major_version = -1;
uuid_from_super_ddf(st, info->uuid);
+ if (map) {
+ int i;
+ for (i = 0 ; i < map_disks; i++) {
+ if (i < info->array.raid_disks &&
+ (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
+ !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
+ map[i] = 1;
+ else
+ map[i] = 0;
+ }
+ }
}
-static int rlq_to_layout(int rlq, int prl, int raiddisks);
-
-static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info)
+static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
{
struct ddf_super *ddf = st->sb;
struct vcl *vc = ddf->currentconf;
int cd = ddf->currentdev;
+ int n_prim;
int j;
struct dl *dl;
+ int map_disks = info->array.raid_disks;
+ __u32 *cptr;
+ struct vd_config *conf;
- /* FIXME this returns BVD info - what if we want SVD ?? */
-
- info->array.raid_disks = __be16_to_cpu(vc->conf.prim_elmnt_count);
- info->array.level = map_num1(ddf_level_num, vc->conf.prl);
- info->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
- info->array.raid_disks);
+ memset(info, 0, sizeof(*info));
+ if (layout_ddf2md(&vc->conf, &info->array) == -1)
+ return;
info->array.md_minor = -1;
- info->array.ctime = DECADE +
- __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
+ cptr = (__u32 *)(vc->conf.guid + 16);
+ info->array.ctime = DECADE + __be32_to_cpu(*cptr);
info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
info->array.chunk_size = 512 << vc->conf.chunk_shift;
info->custom_array_size = 0;
+ conf = &vc->conf;
+ n_prim = __be16_to_cpu(conf->prim_elmnt_count);
+ if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
+ int ibvd = cd / n_prim - 1;
+ cd %= n_prim;
+ conf = vc->other_bvds[ibvd];
+ }
+
if (cd >= 0 && (unsigned)cd < ddf->mppe) {
- info->data_offset = __be64_to_cpu(vc->lba_offset[cd]);
+ info->data_offset =
+ __be64_to_cpu(LBA_OFFSET(ddf, &vc->conf)[cd]);
if (vc->block_sizes)
info->component_size = vc->block_sizes[cd];
else
}
for (dl = ddf->dlist; dl ; dl = dl->next)
- if (dl->raiddisk == info->disk.raid_disk)
+ if (dl->raiddisk == ddf->currentdev)
break;
+
info->disk.major = 0;
info->disk.minor = 0;
+ info->disk.state = 0;
if (dl) {
info->disk.major = dl->major;
info->disk.minor = dl->minor;
+ info->disk.raid_disk = dl->raiddisk;
+ info->disk.number = dl->pdnum;
+ info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
}
-// info->disk.number = __be32_to_cpu(ddf->disk.refnum);
-// info->disk.raid_disk = find refnum in the table and use index;
-// info->disk.state = ???;
info->container_member = ddf->currentconf->vcnum;
info->recovery_start = MaxSector;
info->resync_start = 0;
info->reshape_active = 0;
+ info->recovery_blocked = 0;
if (!(ddf->virt->entries[info->container_member].state
& DDF_state_inconsistent) &&
(ddf->virt->entries[info->container_member].init_state
info->array.major_version = -1;
info->array.minor_version = -2;
- sprintf(info->text_version, "/%s/%s",
- devnum2devname(st->container_dev),
- st->subarray);
+ sprintf(info->text_version, "/%s/%d",
+ st->container_devnm,
+ info->container_member);
info->safe_mode_delay = 200;
memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
for(j=0; j<16; j++)
if (info->name[j] == ' ')
info->name[j] = 0;
-}
+ if (map)
+ for (j = 0; j < map_disks; j++) {
+ map[j] = 0;
+ if (j < info->array.raid_disks) {
+ int i = find_phys(ddf, vc->conf.phys_refnum[j]);
+ if (i >= 0 &&
+ (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
+ !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
+ map[i] = 1;
+ }
+ }
+}
static int update_super_ddf(struct supertype *st, struct mdinfo *info,
char *update,
if (strcmp(update, "grow") == 0) {
/* FIXME */
- }
- if (strcmp(update, "resync") == 0) {
+ } else if (strcmp(update, "resync") == 0) {
// info->resync_checkpoint = 0;
- }
- /* We ignore UUID updates as they make even less sense
- * with DDF
- */
- if (strcmp(update, "homehost") == 0) {
+ } else if (strcmp(update, "homehost") == 0) {
/* homehost is stored in controller->vendor_data,
* or it is when we are the vendor
*/
// if (info->vendor_is_local)
// strcpy(ddf->controller.vendor_data, homehost);
- }
- if (strcmp(update, "name") == 0) {
+ rv = -1;
+ } else if (strcmp(update, "name") == 0) {
/* name is stored in virtual_entry->name */
// memset(ve->name, ' ', 16);
// strncpy(ve->name, info->name, 16);
- }
- if (strcmp(update, "_reshape_progress") == 0) {
+ rv = -1;
+ } else if (strcmp(update, "_reshape_progress") == 0) {
/* We don't support reshape yet */
- }
+ } else if (strcmp(update, "assemble") == 0 ) {
+ /* Do nothing, just succeed */
+ rv = 0;
+ } else
+ rv = -1;
// update_all_csum(ddf);
memcpy(guid+20, &stamp, 4);
}
-static int init_super_ddf_bvd(struct supertype *st,
- mdu_array_info_t *info,
- unsigned long long size,
- char *name, char *homehost,
- int *uuid);
-
-static int init_super_ddf(struct supertype *st,
- mdu_array_info_t *info,
- unsigned long long size, char *name, char *homehost,
- int *uuid)
+static unsigned int find_unused_vde(const struct ddf_super *ddf)
{
- /* This is primarily called by Create when creating a new array.
- * We will then get add_to_super called for each component, and then
+ unsigned int i;
+ for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
+ if (all_ff(ddf->virt->entries[i].guid))
+ return i;
+ }
+ return DDF_NOTFOUND;
+}
+
+static unsigned int find_vde_by_name(const struct ddf_super *ddf,
+ const char *name)
+{
+ unsigned int i;
+ if (name == NULL)
+ return DDF_NOTFOUND;
+ for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
+ if (all_ff(ddf->virt->entries[i].guid))
+ continue;
+ if (!strncmp(name, ddf->virt->entries[i].name,
+ sizeof(ddf->virt->entries[i].name)))
+ return i;
+ }
+ return DDF_NOTFOUND;
+}
+
+static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
+ const char *guid)
+{
+ unsigned int i;
+ if (guid == NULL || all_ff(guid))
+ return DDF_NOTFOUND;
+ for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++)
+ if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
+ return i;
+ return DDF_NOTFOUND;
+}
+
+static int init_super_ddf_bvd(struct supertype *st,
+ mdu_array_info_t *info,
+ unsigned long long size,
+ char *name, char *homehost,
+ int *uuid, unsigned long long data_offset);
+
+static int init_super_ddf(struct supertype *st,
+ mdu_array_info_t *info,
+ unsigned long long size, char *name, char *homehost,
+ int *uuid, unsigned long long data_offset)
+{
+ /* This is primarily called by Create when creating a new array.
+ * We will then get add_to_super called for each component, and then
* write_init_super called to write it out to each device.
* For DDF, Create can create on fresh devices or on a pre-existing
* array.
struct phys_disk *pd;
struct virtual_disk *vd;
+ if (data_offset != INVALID_SECTORS) {
+ pr_err("data-offset not supported by DDF\n");
+ return 0;
+ }
+
if (st->sb)
- return init_super_ddf_bvd(st, info, size, name, homehost, uuid);
+ return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
+ data_offset);
if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
- fprintf(stderr, Name ": %s could not allocate superblock\n", __func__);
+ pr_err("%s could not allocate superblock\n", __func__);
return 0;
}
memset(ddf, 0, sizeof(*ddf));
strcpy((char*)ddf->controller.vendor_data, homehost);
if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
- fprintf(stderr, Name ": %s could not allocate pd\n", __func__);
+ pr_err("%s could not allocate pd\n", __func__);
return 0;
}
ddf->phys = pd;
pd->used_pdes = __cpu_to_be16(0);
pd->max_pdes = __cpu_to_be16(max_phys_disks);
memset(pd->pad, 0xff, 52);
+ for (i = 0; i < max_phys_disks; i++)
+ memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
- fprintf(stderr, Name ": %s could not allocate vd\n", __func__);
+ pr_err("%s could not allocate vd\n", __func__);
return 0;
}
ddf->virt = vd;
memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
st->sb = ddf;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return 1;
}
return ffs(chunksize/512)-1;
}
-static int level_to_prl(int level)
-{
- switch (level) {
- case LEVEL_LINEAR: return DDF_CONCAT;
- case 0: return DDF_RAID0;
- case 1: return DDF_RAID1;
- case 4: return DDF_RAID4;
- case 5: return DDF_RAID5;
- case 6: return DDF_RAID6;
- default: return -1;
- }
-}
-static int layout_to_rlq(int level, int layout, int raiddisks)
-{
- switch(level) {
- case 0:
- return DDF_RAID0_SIMPLE;
- case 1:
- switch(raiddisks) {
- case 2: return DDF_RAID1_SIMPLE;
- case 3: return DDF_RAID1_MULTI;
- default: return -1;
- }
- case 4:
- switch(layout) {
- case 0: return DDF_RAID4_N;
- }
- break;
- case 5:
- switch(layout) {
- case ALGORITHM_LEFT_ASYMMETRIC:
- return DDF_RAID5_N_RESTART;
- case ALGORITHM_RIGHT_ASYMMETRIC:
- return DDF_RAID5_0_RESTART;
- case ALGORITHM_LEFT_SYMMETRIC:
- return DDF_RAID5_N_CONTINUE;
- case ALGORITHM_RIGHT_SYMMETRIC:
- return -1; /* not mentioned in standard */
- }
- case 6:
- switch(layout) {
- case ALGORITHM_ROTATING_N_RESTART:
- return DDF_RAID5_N_RESTART;
- case ALGORITHM_ROTATING_ZERO_RESTART:
- return DDF_RAID6_0_RESTART;
- case ALGORITHM_ROTATING_N_CONTINUE:
- return DDF_RAID5_N_CONTINUE;
- }
- }
- return -1;
-}
-
-static int rlq_to_layout(int rlq, int prl, int raiddisks)
-{
- switch(prl) {
- case DDF_RAID0:
- return 0; /* hopefully rlq == DDF_RAID0_SIMPLE */
- case DDF_RAID1:
- return 0; /* hopefully rlq == SIMPLE or MULTI depending
- on raiddisks*/
- case DDF_RAID4:
- switch(rlq) {
- case DDF_RAID4_N:
- return 0;
- default:
- /* not supported */
- return -1; /* FIXME this isn't checked */
- }
- case DDF_RAID5:
- switch(rlq) {
- case DDF_RAID5_N_RESTART:
- return ALGORITHM_LEFT_ASYMMETRIC;
- case DDF_RAID5_0_RESTART:
- return ALGORITHM_RIGHT_ASYMMETRIC;
- case DDF_RAID5_N_CONTINUE:
- return ALGORITHM_LEFT_SYMMETRIC;
- default:
- return -1;
- }
- case DDF_RAID6:
- switch(rlq) {
- case DDF_RAID5_N_RESTART:
- return ALGORITHM_ROTATING_N_RESTART;
- case DDF_RAID6_0_RESTART:
- return ALGORITHM_ROTATING_ZERO_RESTART;
- case DDF_RAID5_N_CONTINUE:
- return ALGORITHM_ROTATING_N_CONTINUE;
- default:
- return -1;
- }
- }
- return -1;
-}
-
#ifndef MDASSEMBLE
struct extent {
unsigned long long start, size;
* (dnum) of the given ddf.
* Return a malloced array of 'struct extent'
-FIXME ignore DDF_Legacy devices?
+ * FIXME ignore DDF_Legacy devices?
*/
struct extent *rv;
int n = 0;
- unsigned int i, j;
+ unsigned int i;
- rv = malloc(sizeof(struct extent) * (ddf->max_part + 2));
- if (!rv)
- return NULL;
+ rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
for (i = 0; i < ddf->max_part; i++) {
+ const struct vd_config *bvd;
+ unsigned int ibvd;
struct vcl *v = dl->vlist[i];
- if (v == NULL)
+ if (v == NULL ||
+ get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
+ &bvd, &ibvd) == DDF_NOTFOUND)
continue;
- for (j = 0; j < v->conf.prim_elmnt_count; j++)
- if (v->conf.phys_refnum[j] == dl->disk.refnum) {
- /* This device plays role 'j' in 'v'. */
- rv[n].start = __be64_to_cpu(v->lba_offset[j]);
- rv[n].size = __be64_to_cpu(v->conf.blocks);
- n++;
- break;
- }
+ rv[n].start = __be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
+ rv[n].size = __be64_to_cpu(bvd->blocks);
+ n++;
}
qsort(rv, n, sizeof(*rv), cmp_extent);
mdu_array_info_t *info,
unsigned long long size,
char *name, char *homehost,
- int *uuid)
+ int *uuid, unsigned long long data_offset)
{
/* We are creating a BVD inside a pre-existing container.
* so st->sb is already set.
* We need to create a new vd_config and a new virtual_entry
*/
struct ddf_super *ddf = st->sb;
- unsigned int venum;
+ unsigned int venum, i;
struct virtual_entry *ve;
struct vcl *vcl;
struct vd_config *vc;
- if (__be16_to_cpu(ddf->virt->populated_vdes)
- >= __be16_to_cpu(ddf->virt->max_vdes)) {
- fprintf(stderr, Name": This ddf already has the "
- "maximum of %d virtual devices\n",
- __be16_to_cpu(ddf->virt->max_vdes));
+ if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
+ pr_err("This ddf already has an array called %s\n", name);
return 0;
}
-
- if (name)
- for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
- if (!all_ff(ddf->virt->entries[venum].guid)) {
- char *n = ddf->virt->entries[venum].name;
-
- if (strncmp(name, n, 16) == 0) {
- fprintf(stderr, Name ": This ddf already"
- " has an array called %s\n",
- name);
- return 0;
- }
- }
-
- for (venum = 0; venum < __be16_to_cpu(ddf->virt->max_vdes); venum++)
- if (all_ff(ddf->virt->entries[venum].guid))
- break;
- if (venum == __be16_to_cpu(ddf->virt->max_vdes)) {
- fprintf(stderr, Name ": Cannot find spare slot for "
- "virtual disk - DDF is corrupt\n");
+ venum = find_unused_vde(ddf);
+ if (venum == DDF_NOTFOUND) {
+ pr_err("Cannot find spare slot for virtual disk\n");
return 0;
}
ve = &ddf->virt->entries[venum];
/* Now create a new vd_config */
if (posix_memalign((void**)&vcl, 512,
(offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
- fprintf(stderr, Name ": %s could not allocate vd_config\n", __func__);
+ pr_err("%s could not allocate vd_config\n", __func__);
return 0;
}
- vcl->lba_offset = (__u64*) &vcl->conf.phys_refnum[ddf->mppe];
vcl->vcnum = venum;
- sprintf(st->subarray, "%d", venum);
vcl->block_sizes = NULL; /* FIXME not for CONCAT */
-
vc = &vcl->conf;
vc->magic = DDF_VD_CONF_MAGIC;
vc->timestamp = __cpu_to_be32(time(0)-DECADE);
vc->seqnum = __cpu_to_be32(1);
memset(vc->pad0, 0xff, 24);
- vc->prim_elmnt_count = __cpu_to_be16(info->raid_disks);
vc->chunk_shift = chunk_to_shift(info->chunk_size);
- vc->prl = level_to_prl(info->level);
- vc->rlq = layout_to_rlq(info->level, info->layout, info->raid_disks);
- vc->sec_elmnt_count = 1;
+ if (layout_md2ddf(info, vc) == -1 ||
+ __be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
+ pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
+ __func__, info->level, info->layout, info->raid_disks);
+ free(vcl);
+ return 0;
+ }
vc->sec_elmnt_seq = 0;
- vc->srl = 0;
+ if (alloc_other_bvds(ddf, vcl) != 0) {
+ pr_err("%s could not allocate other bvds\n",
+ __func__);
+ free(vcl);
+ return 0;
+ }
vc->blocks = __cpu_to_be64(info->size * 2);
vc->array_blocks = __cpu_to_be64(
calc_array_size(info->level, info->raid_disks, info->layout,
memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
+ for (i = 1; i < vc->sec_elmnt_count; i++) {
+ memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
+ vcl->other_bvds[i-1]->sec_elmnt_seq = i;
+ }
+
vcl->next = ddf->conflist;
ddf->conflist = vcl;
ddf->currentconf = vcl;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return 1;
}
+static int get_svd_state(const struct ddf_super *, const struct vcl *);
+
#ifndef MDASSEMBLE
static void add_to_super_ddf_bvd(struct supertype *st,
mdu_disk_info_t *dk, int fd, char *devname)
struct dl *dl;
struct ddf_super *ddf = st->sb;
struct vd_config *vc;
- __u64 *lba_offset;
- unsigned int working;
unsigned int i;
unsigned long long blocks, pos, esize;
struct extent *ex;
+ unsigned int raid_disk = dk->raid_disk;
if (fd == -1) {
for (dl = ddf->dlist; dl ; dl = dl->next)
return;
vc = &ddf->currentconf->conf;
- lba_offset = ddf->currentconf->lba_offset;
+ if (vc->sec_elmnt_count > 1) {
+ unsigned int n = __be16_to_cpu(vc->prim_elmnt_count);
+ if (raid_disk >= n)
+ vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
+ raid_disk %= n;
+ }
ex = get_extents(ddf, dl);
if (!ex)
return;
ddf->currentdev = dk->raid_disk;
- vc->phys_refnum[dk->raid_disk] = dl->disk.refnum;
- lba_offset[dk->raid_disk] = __cpu_to_be64(pos);
+ vc->phys_refnum[raid_disk] = dl->disk.refnum;
+ LBA_OFFSET(ddf, vc)[raid_disk] = __cpu_to_be64(pos);
for (i = 0; i < ddf->max_part ; i++)
if (dl->vlist[i] == NULL)
if (devname)
dl->devname = devname;
- /* Check how many working raid_disks, and if we can mark
- * array as optimal yet
- */
- working = 0;
-
- for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++)
- if (vc->phys_refnum[i] != 0xffffffff)
- working++;
-
- /* Find which virtual_entry */
+ /* Check if we can mark array as optimal yet */
i = ddf->currentconf->vcnum;
- if (working == __be16_to_cpu(vc->prim_elmnt_count))
- ddf->virt->entries[i].state =
- (ddf->virt->entries[i].state & ~DDF_state_mask)
- | DDF_state_optimal;
-
- if (vc->prl == DDF_RAID6 &&
- working+1 == __be16_to_cpu(vc->prim_elmnt_count))
- ddf->virt->entries[i].state =
- (ddf->virt->entries[i].state & ~DDF_state_mask)
- | DDF_state_part_optimal;
-
+ ddf->virt->entries[i].state =
+ (ddf->virt->entries[i].state & ~DDF_state_mask)
+ | get_svd_state(ddf, ddf->currentconf);
ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
+}
+
+static unsigned int find_unused_pde(const struct ddf_super *ddf)
+{
+ unsigned int i;
+ for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++) {
+ if (all_ff(ddf->phys->entries[i].guid))
+ return i;
+ }
+ return DDF_NOTFOUND;
}
/* add a device to a container, either while creating it or while
* expanding a pre-existing container
*/
static int add_to_super_ddf(struct supertype *st,
- mdu_disk_info_t *dk, int fd, char *devname)
+ mdu_disk_info_t *dk, int fd, char *devname,
+ unsigned long long data_offset)
{
struct ddf_super *ddf = st->sb;
struct dl *dd;
struct phys_disk_entry *pde;
unsigned int n, i;
struct stat stb;
+ __u32 *tptr;
if (ddf->currentconf) {
add_to_super_ddf_bvd(st, dk, fd, devname);
* a phys_disk entry and a more detailed disk_data entry.
*/
fstat(fd, &stb);
+ n = find_unused_pde(ddf);
+ if (n == DDF_NOTFOUND) {
+ pr_err("%s: No free slot in array, cannot add disk\n",
+ __func__);
+ return 1;
+ }
+ pde = &ddf->phys->entries[n];
+ get_dev_size(fd, NULL, &size);
+ if (size <= 32*1024*1024) {
+ pr_err("%s: device size must be at least 32MB\n",
+ __func__);
+ return 1;
+ }
+ size >>= 9;
+
if (posix_memalign((void**)&dd, 512,
sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
- fprintf(stderr, Name
- ": %s could allocate buffer for new disk, aborting\n",
- __func__);
+ pr_err("%s could allocate buffer for new disk, aborting\n",
+ __func__);
return 1;
}
dd->major = major(stb.st_rdev);
tm = localtime(&now);
sprintf(dd->disk.guid, "%8s%04d%02d%02d",
T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
- *(__u32*)(dd->disk.guid + 16) = random32();
- *(__u32*)(dd->disk.guid + 20) = random32();
+ tptr = (__u32 *)(dd->disk.guid + 16);
+ *tptr++ = random32();
+ *tptr = random32();
do {
/* Cannot be bothered finding a CRC of some irrelevant details*/
for (i = 0; i < ddf->max_part ; i++)
dd->vlist[i] = NULL;
- n = __be16_to_cpu(ddf->phys->used_pdes);
- pde = &ddf->phys->entries[n];
dd->pdnum = n;
if (st->update_tail) {
sizeof(struct phys_disk_entry));
struct phys_disk *pd;
- pd = malloc(len);
+ pd = xmalloc(len);
pd->magic = DDF_PHYS_RECORDS_MAGIC;
pd->used_pdes = __cpu_to_be16(n);
pde = &pd->entries[0];
dd->mdupdate = pd;
- } else {
- n++;
- ddf->phys->used_pdes = __cpu_to_be16(n);
- }
+ } else
+ ddf->phys->used_pdes = __cpu_to_be16(
+ 1 + __be16_to_cpu(ddf->phys->used_pdes));
memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
pde->refnum = dd->disk.refnum;
pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
pde->state = __cpu_to_be16(DDF_Online);
- get_dev_size(fd, NULL, &size);
- /* We are required to reserve 32Meg, and record the size in sectors */
- pde->config_size = __cpu_to_be64( (size - 32*1024*1024) / 512);
+ dd->size = size;
+ /*
+ * If there is already a device in dlist, try to reserve the same
+ * amount of workspace. Otherwise, use 32MB.
+ * We checked disk size above already.
+ */
+#define __calc_lba(new, old, lba, mb) do { \
+ unsigned long long dif; \
+ if ((old) != NULL) \
+ dif = (old)->size - __be64_to_cpu((old)->lba); \
+ else \
+ dif = (new)->size; \
+ if ((new)->size > dif) \
+ (new)->lba = __cpu_to_be64((new)->size - dif); \
+ else \
+ (new)->lba = __cpu_to_be64((new)->size - (mb*1024*2)); \
+ } while (0)
+ __calc_lba(dd, ddf->dlist, workspace_lba, 32);
+ __calc_lba(dd, ddf->dlist, primary_lba, 16);
+ __calc_lba(dd, ddf->dlist, secondary_lba, 32);
+ pde->config_size = dd->workspace_lba;
+
sprintf(pde->path, "%17.17s","Information: nil") ;
memset(pde->pad, 0xff, 6);
- dd->size = size >> 9;
if (st->update_tail) {
dd->next = ddf->add_list;
ddf->add_list = dd;
} else {
dd->next = ddf->dlist;
ddf->dlist = dd;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
return 0;
sizeof(struct phys_disk_entry));
struct phys_disk *pd;
- pd = malloc(len);
+ pd = xmalloc(len);
pd->magic = DDF_PHYS_RECORDS_MAGIC;
pd->used_pdes = __cpu_to_be16(dl->pdnum);
pd->entries[0].state = __cpu_to_be16(DDF_Missing);
* called when creating a container or adding another device to a
* container.
*/
+#define NULL_CONF_SZ 4096
-static unsigned char null_conf[4096+512];
-
-static int __write_init_super_ddf(struct supertype *st, int do_close)
+static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
+ char *null_aligned)
{
+ unsigned long long sector;
+ struct ddf_header *header;
+ int fd, i, n_config, conf_size;
+ int ret = 0;
+
+ fd = d->fd;
+
+ switch (type) {
+ case DDF_HEADER_PRIMARY:
+ header = &ddf->primary;
+ sector = __be64_to_cpu(header->primary_lba);
+ break;
+ case DDF_HEADER_SECONDARY:
+ header = &ddf->secondary;
+ sector = __be64_to_cpu(header->secondary_lba);
+ break;
+ default:
+ return 0;
+ }
+
+ header->type = type;
+ header->openflag = 1;
+ header->crc = calc_crc(header, 512);
+
+ lseek64(fd, sector<<9, 0);
+ if (write(fd, header, 512) < 0)
+ goto out;
+
+ ddf->controller.crc = calc_crc(&ddf->controller, 512);
+ if (write(fd, &ddf->controller, 512) < 0)
+ goto out;
+
+ ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
+ if (write(fd, ddf->phys, ddf->pdsize) < 0)
+ goto out;
+ ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
+ if (write(fd, ddf->virt, ddf->vdsize) < 0)
+ goto out;
+
+ /* Now write lots of config records. */
+ n_config = ddf->max_part;
+ conf_size = ddf->conf_rec_len * 512;
+ for (i = 0 ; i <= n_config ; i++) {
+ struct vcl *c;
+ struct vd_config *vdc = NULL;
+ if (i == n_config) {
+ c = (struct vcl *)d->spare;
+ if (c)
+ vdc = &c->conf;
+ } else {
+ unsigned int dummy;
+ c = d->vlist[i];
+ if (c)
+ get_pd_index_from_refnum(
+ c, d->disk.refnum,
+ ddf->mppe,
+ (const struct vd_config **)&vdc,
+ &dummy);
+ }
+ if (c) {
+ dprintf("writing conf record %i on disk %08x for %s/%u\n",
+ i, d->disk.refnum, guid_str(vdc->guid),
+ vdc->sec_elmnt_seq);
+ vdc->seqnum = header->seq;
+ vdc->crc = calc_crc(vdc, conf_size);
+ if (write(fd, vdc, conf_size) < 0)
+ break;
+ } else {
+ unsigned int togo = conf_size;
+ while (togo > NULL_CONF_SZ) {
+ if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
+ break;
+ togo -= NULL_CONF_SZ;
+ }
+ if (write(fd, null_aligned, togo) < 0)
+ break;
+ }
+ }
+ if (i <= n_config)
+ goto out;
+
+ d->disk.crc = calc_crc(&d->disk, 512);
+ if (write(fd, &d->disk, 512) < 0)
+ goto out;
+ ret = 1;
+out:
+ header->openflag = 0;
+ header->crc = calc_crc(header, 512);
+
+ lseek64(fd, sector<<9, 0);
+ if (write(fd, header, 512) < 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int __write_init_super_ddf(struct supertype *st)
+{
struct ddf_super *ddf = st->sb;
- int i;
struct dl *d;
- int n_config;
- int conf_size;
int attempts = 0;
int successes = 0;
- unsigned long long size, sector;
+ unsigned long long size;
+ char *null_aligned;
+ __u32 seq;
+
+ pr_state(ddf, __func__);
+ if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
+ return -ENOMEM;
+ }
+ memset(null_aligned, 0xff, NULL_CONF_SZ);
+
+ seq = ddf->active->seq + 1;
/* try to write updated metadata,
* if we catch a failure move on to the next disk
*/
get_dev_size(fd, NULL, &size);
size /= 512;
- ddf->anchor.workspace_lba = __cpu_to_be64(size - 32*1024*2);
- ddf->anchor.primary_lba = __cpu_to_be64(size - 16*1024*2);
- ddf->anchor.seq = __cpu_to_be32(1);
+ if (d->workspace_lba != 0)
+ ddf->anchor.workspace_lba = d->workspace_lba;
+ else
+ ddf->anchor.workspace_lba =
+ __cpu_to_be64(size - 32*1024*2);
+ if (d->primary_lba != 0)
+ ddf->anchor.primary_lba = d->primary_lba;
+ else
+ ddf->anchor.primary_lba =
+ __cpu_to_be64(size - 16*1024*2);
+ if (d->secondary_lba != 0)
+ ddf->anchor.secondary_lba = d->secondary_lba;
+ else
+ ddf->anchor.secondary_lba =
+ __cpu_to_be64(size - 32*1024*2);
+ ddf->anchor.seq = seq;
memcpy(&ddf->primary, &ddf->anchor, 512);
memcpy(&ddf->secondary, &ddf->anchor, 512);
ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
- ddf->primary.openflag = 0;
- ddf->primary.type = DDF_HEADER_PRIMARY;
-
- ddf->secondary.openflag = 0;
- ddf->secondary.type = DDF_HEADER_SECONDARY;
-
- ddf->primary.crc = calc_crc(&ddf->primary, 512);
- ddf->secondary.crc = calc_crc(&ddf->secondary, 512);
-
- sector = size - 16*1024*2;
- lseek64(fd, sector<<9, 0);
- if (write(fd, &ddf->primary, 512) < 0)
+ if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
+ null_aligned))
continue;
- ddf->controller.crc = calc_crc(&ddf->controller, 512);
- if (write(fd, &ddf->controller, 512) < 0)
+ if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
+ null_aligned))
continue;
- ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
-
- if (write(fd, ddf->phys, ddf->pdsize) < 0)
- continue;
-
- ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
- if (write(fd, ddf->virt, ddf->vdsize) < 0)
- continue;
-
- /* Now write lots of config records. */
- n_config = ddf->max_part;
- conf_size = ddf->conf_rec_len * 512;
- for (i = 0 ; i <= n_config ; i++) {
- struct vcl *c = d->vlist[i];
- if (i == n_config)
- c = (struct vcl*)d->spare;
-
- if (c) {
- c->conf.crc = calc_crc(&c->conf, conf_size);
- if (write(fd, &c->conf, conf_size) < 0)
- break;
- } else {
- char *null_aligned = (char*)((((unsigned long)null_conf)+511)&~511UL);
- if (null_conf[0] != 0xff)
- memset(null_conf, 0xff, sizeof(null_conf));
- unsigned int togo = conf_size;
- while (togo > sizeof(null_conf)-512) {
- if (write(fd, null_aligned, sizeof(null_conf)-512) < 0)
- break;
- togo -= sizeof(null_conf)-512;
- }
- if (write(fd, null_aligned, togo) < 0)
- break;
- }
- }
- if (i <= n_config)
- continue;
- d->disk.crc = calc_crc(&d->disk, 512);
- if (write(fd, &d->disk, 512) < 0)
- continue;
-
- /* Maybe do the same for secondary */
-
lseek64(fd, (size-1)*512, SEEK_SET);
if (write(fd, &ddf->anchor, 512) < 0)
continue;
successes++;
}
-
- if (do_close)
- for (d = ddf->dlist; d; d=d->next) {
- close(d->fd);
- d->fd = -1;
- }
+ free(null_aligned);
return attempts != successes;
}
/* First the virtual disk. We have a slightly fake header */
len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
- vd = malloc(len);
+ vd = xmalloc(len);
*vd = *ddf->virt;
vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
/* Then the vd_config */
len = ddf->conf_rec_len * 512;
- vc = malloc(len);
+ vc = xmalloc(len);
memcpy(vc, ¤tconf->conf, len);
append_metadata_update(st, vc, len);
/* FIXME I need to close the fds! */
return 0;
- } else {
+ } else {
struct dl *d;
for (d = ddf->dlist; d; d=d->next)
- while (Kill(d->devname, NULL, 0, 1, 1) == 0);
- return __write_init_super_ddf(st, 1);
+ while (Kill(d->devname, NULL, 0, -1, 1) == 0);
+ return __write_init_super_ddf(st);
}
}
#endif
-static __u64 avail_size_ddf(struct supertype *st, __u64 devsize)
+static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
+ unsigned long long data_offset)
{
/* We must reserve the last 32Meg */
if (devsize <= 32*1024*2)
int cnt = 0;
for (dl = ddf->dlist; dl ; dl=dl->next) {
- dl->raiddisk = -1;
+ dl->raiddisk = -1;
dl->esize = 0;
}
/* Now find largest extent on each device */
free(e);
}
if (cnt < raiddisks) {
- fprintf(stderr, Name ": not enough devices with space to create array.\n");
+ pr_err("not enough devices with space to create array.\n");
return 0; /* No enough free spaces large enough */
}
if (size == 0) {
continue;
/* This is bigger than 'size', see if there are enough */
cnt = 0;
- for (dl2 = dl; dl2 ; dl2=dl2->next)
+ for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
if (dl2->esize >= dl->esize)
cnt++;
if (cnt >= raiddisks)
}
*freesize = size;
if (size < 32) {
- fprintf(stderr, Name ": not enough spare devices to create array.\n");
+ pr_err("not enough spare devices to create array.\n");
return 0;
}
}
for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
if (dl->esize < size)
continue;
-
+
dl->raiddisk = cnt;
cnt++;
}
return 1;
}
-
-
static int
validate_geometry_ddf_container(struct supertype *st,
int level, int layout, int raiddisks,
int chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose);
static int validate_geometry_ddf_bvd(struct supertype *st,
int level, int layout, int raiddisks,
- int chunk, unsigned long long size,
+ int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose);
static int validate_geometry_ddf(struct supertype *st,
int level, int layout, int raiddisks,
- int chunk, unsigned long long size,
+ int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
* If given BVDs, we make an SVD, changing all the GUIDs in the process.
*/
+ if (chunk && *chunk == UnSet)
+ *chunk = DEFAULT_CHUNK;
+
+ if (level == -1000000) level = LEVEL_CONTAINER;
if (level == LEVEL_CONTAINER) {
/* Must be a fresh device to add to a container */
return validate_geometry_ddf_container(st, level, layout,
- raiddisks, chunk,
- size, dev, freesize,
+ raiddisks, chunk?*chunk:0,
+ size, data_offset, dev,
+ freesize,
verbose);
}
if (!dev) {
- /* Initial sanity check. Exclude illegal levels. */
- int i;
- for (i=0; ddf_level_num[i].num1 != MAXINT; i++)
- if (ddf_level_num[i].num2 == level)
- break;
- if (ddf_level_num[i].num1 == MAXINT) {
+ mdu_array_info_t array = {
+ .level = level, .layout = layout,
+ .raid_disks = raiddisks
+ };
+ struct vd_config conf;
+ if (layout_md2ddf(&array, &conf) == -1) {
if (verbose)
- fprintf(stderr, Name ": DDF does not support level %d arrays\n",
- level);
+ pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
+ level, layout, raiddisks);
return 0;
}
/* Should check layout? etc */
* chosen so that add_to_super/getinfo_super
* can return them.
*/
- return reserve_space(st, raiddisks, size, chunk, freesize);
+ return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
}
return 1;
}
* Should make a distinction one day.
*/
return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
- chunk, size, dev, freesize,
+ chunk, size, data_offset, dev,
+ freesize,
verbose);
}
/* This is the first device for the array.
*/
fd = open(dev, O_RDONLY|O_EXCL, 0);
if (fd >= 0) {
- sra = sysfs_read(fd, 0, GET_VERSION);
+ sra = sysfs_read(fd, NULL, GET_VERSION);
close(fd);
if (sra && sra->array.major_version == -1 &&
strcmp(sra->text_version, "ddf") == 0) {
}
if (verbose)
- fprintf(stderr,
- Name ": ddf: Cannot create this array "
- "on device %s - a container is required.\n",
- dev);
+ pr_err("ddf: Cannot create this array "
+ "on device %s - a container is required.\n",
+ dev);
return 0;
}
if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
if (verbose)
- fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
- dev, strerror(errno));
+ pr_err("ddf: Cannot open %s: %s\n",
+ dev, strerror(errno));
return 0;
}
/* Well, it is in use by someone, maybe a 'ddf' container. */
if (cfd < 0) {
close(fd);
if (verbose)
- fprintf(stderr, Name ": ddf: Cannot use %s: %s\n",
- dev, strerror(EBUSY));
+ pr_err("ddf: Cannot use %s: %s\n",
+ dev, strerror(EBUSY));
return 0;
}
- sra = sysfs_read(cfd, 0, GET_VERSION);
+ sra = sysfs_read(cfd, NULL, GET_VERSION);
close(fd);
if (sra && sra->array.major_version == -1 &&
strcmp(sra->text_version, "ddf") == 0) {
* and try to create a bvd
*/
struct ddf_super *ddf;
- if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL, 1) == 0) {
+ if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
st->sb = ddf;
- st->container_dev = fd2devnum(cfd);
+ strcpy(st->container_devnm, fd2devnm(cfd));
close(cfd);
return validate_geometry_ddf_bvd(st, level, layout,
raiddisks, chunk, size,
+ data_offset,
dev, freesize,
verbose);
}
validate_geometry_ddf_container(struct supertype *st,
int level, int layout, int raiddisks,
int chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
fd = open(dev, O_RDONLY|O_EXCL, 0);
if (fd < 0) {
if (verbose)
- fprintf(stderr, Name ": ddf: Cannot open %s: %s\n",
- dev, strerror(errno));
+ pr_err("ddf: Cannot open %s: %s\n",
+ dev, strerror(errno));
return 0;
}
if (!get_dev_size(fd, dev, &ldsize)) {
}
close(fd);
- *freesize = avail_size_ddf(st, ldsize >> 9);
+ *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
if (*freesize == 0)
return 0;
static int validate_geometry_ddf_bvd(struct supertype *st,
int level, int layout, int raiddisks,
- int chunk, unsigned long long size,
+ int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
/* ddf/bvd supports lots of things, but not containers */
if (level == LEVEL_CONTAINER) {
if (verbose)
- fprintf(stderr, Name ": DDF cannot create a container within an container\n");
+ pr_err("DDF cannot create a container within an container\n");
return 0;
}
/* We must have the container info already read in. */
}
if (dcnt < raiddisks) {
if (verbose)
- fprintf(stderr,
- Name ": ddf: Not enough devices with "
- "space for this array (%d < %d)\n",
- dcnt, raiddisks);
+ pr_err("ddf: Not enough devices with "
+ "space for this array (%d < %d)\n",
+ dcnt, raiddisks);
return 0;
}
return 1;
}
if (!dl) {
if (verbose)
- fprintf(stderr, Name ": ddf: %s is not in the "
- "same DDF set\n",
- dev);
+ pr_err("ddf: %s is not in the "
+ "same DDF set\n",
+ dev);
return 0;
}
e = get_extents(ddf, dl);
maxsize = 0;
i = 0;
if (e) do {
- unsigned long long esize;
- esize = e[i].start - pos;
- if (esize >= maxsize)
- maxsize = esize;
- pos = e[i].start + e[i].size;
- i++;
- } while (e[i-1].size);
+ unsigned long long esize;
+ esize = e[i].start - pos;
+ if (esize >= maxsize)
+ maxsize = esize;
+ pos = e[i].start + e[i].size;
+ i++;
+ } while (e[i-1].size);
*freesize = maxsize;
// FIXME here I am
}
static int load_super_ddf_all(struct supertype *st, int fd,
- void **sbp, char *devname, int keep_fd)
+ void **sbp, char *devname)
{
struct mdinfo *sra;
struct ddf_super *super;
int rv;
sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
- dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
+ dfd = dev_open(nm, O_RDWR);
if (dfd < 0)
return 2;
rv = load_ddf_headers(dfd, super, NULL);
if (rv == 0)
- rv = load_ddf_local(dfd, super, NULL, keep_fd);
- if (!keep_fd) close(dfd);
+ rv = load_ddf_local(dfd, super, NULL, 1);
if (rv)
return 1;
}
- if (st->subarray[0]) {
- unsigned long val;
- struct vcl *v;
- char *ep;
-
- val = strtoul(st->subarray, &ep, 10);
- if (*ep != '\0') {
- free(super);
- return 1;
- }
-
- for (v = super->conflist; v; v = v->next)
- if (v->vcnum == val)
- super->currentconf = v;
- if (!super->currentconf) {
- free(super);
- return 1;
- }
- }
*sbp = super;
if (st->ss == NULL) {
st->ss = &super_ddf;
st->minor_version = 0;
st->max_devs = 512;
- st->container_dev = fd2devnum(fd);
}
- st->loaded_container = 1;
+ strcpy(st->container_devnm, fd2devnm(fd));
return 0;
}
+
+static int load_container_ddf(struct supertype *st, int fd,
+ char *devname)
+{
+ return load_super_ddf_all(st, fd, &st->sb, devname);
+}
+
#endif /* MDASSEMBLE */
-static struct mdinfo *container_content_ddf(struct supertype *st)
+static int check_secondary(const struct vcl *vc)
+{
+ const struct vd_config *conf = &vc->conf;
+ int i;
+
+ /* The only DDF secondary RAID level md can support is
+ * RAID 10, if the stripe sizes and Basic volume sizes
+ * are all equal.
+ * Other configurations could in theory be supported by exposing
+ * the BVDs to user space and using device mapper for the secondary
+ * mapping. So far we don't support that.
+ */
+
+ __u64 sec_elements[4] = {0, 0, 0, 0};
+#define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
+#define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
+
+ if (vc->other_bvds == NULL) {
+ pr_err("No BVDs for secondary RAID found\n");
+ return -1;
+ }
+ if (conf->prl != DDF_RAID1) {
+ pr_err("Secondary RAID level only supported for mirrored BVD\n");
+ return -1;
+ }
+ if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
+ pr_err("Secondary RAID level %d is unsupported\n",
+ conf->srl);
+ return -1;
+ }
+ __set_sec_seen(conf->sec_elmnt_seq);
+ for (i = 0; i < conf->sec_elmnt_count-1; i++) {
+ const struct vd_config *bvd = vc->other_bvds[i];
+ if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
+ continue;
+ if (bvd->srl != conf->srl) {
+ pr_err("Inconsistent secondary RAID level across BVDs\n");
+ return -1;
+ }
+ if (bvd->prl != conf->prl) {
+ pr_err("Different RAID levels for BVDs are unsupported\n");
+ return -1;
+ }
+ if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
+ pr_err("All BVDs must have the same number of primary elements\n");
+ return -1;
+ }
+ if (bvd->chunk_shift != conf->chunk_shift) {
+ pr_err("Different strip sizes for BVDs are unsupported\n");
+ return -1;
+ }
+ if (bvd->array_blocks != conf->array_blocks) {
+ pr_err("Different BVD sizes are unsupported\n");
+ return -1;
+ }
+ __set_sec_seen(bvd->sec_elmnt_seq);
+ }
+ for (i = 0; i < conf->sec_elmnt_count; i++) {
+ if (!__was_sec_seen(i)) {
+ pr_err("BVD %d is missing\n", i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
+ __u32 refnum, unsigned int nmax,
+ const struct vd_config **bvd,
+ unsigned int *idx)
+{
+ unsigned int i, j, n, sec, cnt;
+
+ cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
+ sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
+
+ for (i = 0, j = 0 ; i < nmax ; i++) {
+ /* j counts valid entries for this BVD */
+ if (vc->conf.phys_refnum[i] != 0xffffffff)
+ j++;
+ if (vc->conf.phys_refnum[i] == refnum) {
+ *bvd = &vc->conf;
+ *idx = i;
+ return sec * cnt + j - 1;
+ }
+ }
+ if (vc->other_bvds == NULL)
+ goto bad;
+
+ for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
+ struct vd_config *vd = vc->other_bvds[n-1];
+ sec = vd->sec_elmnt_seq;
+ if (sec == DDF_UNUSED_BVD)
+ continue;
+ for (i = 0, j = 0 ; i < nmax ; i++) {
+ if (vd->phys_refnum[i] != 0xffffffff)
+ j++;
+ if (vd->phys_refnum[i] == refnum) {
+ *bvd = vd;
+ *idx = i;
+ return sec * cnt + j - 1;
+ }
+ }
+ }
+bad:
+ *bvd = NULL;
+ return DDF_NOTFOUND;
+}
+
+static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
{
/* Given a container loaded by load_super_ddf_all,
* extract information about all the arrays into
unsigned int i;
unsigned int j;
struct mdinfo *this;
- this = malloc(sizeof(*this));
- memset(this, 0, sizeof(*this));
+ char *ep;
+ __u32 *cptr;
+ unsigned int pd;
+
+ if (subarray &&
+ (strtoul(subarray, &ep, 10) != vc->vcnum ||
+ *ep != '\0'))
+ continue;
+
+ if (vc->conf.sec_elmnt_count > 1) {
+ if (check_secondary(vc) != 0)
+ continue;
+ }
+
+ this = xcalloc(1, sizeof(*this));
this->next = rest;
rest = this;
- this->array.level = map_num1(ddf_level_num, vc->conf.prl);
- this->array.raid_disks =
- __be16_to_cpu(vc->conf.prim_elmnt_count);
- this->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
- this->array.raid_disks);
+ if (layout_ddf2md(&vc->conf, &this->array))
+ continue;
this->array.md_minor = -1;
this->array.major_version = -1;
this->array.minor_version = -2;
- this->array.ctime = DECADE +
- __be32_to_cpu(*(__u32*)(vc->conf.guid+16));
+ cptr = (__u32 *)(vc->conf.guid + 16);
+ this->array.ctime = DECADE + __be32_to_cpu(*cptr);
this->array.utime = DECADE +
__be32_to_cpu(vc->conf.timestamp);
this->array.chunk_size = 512 << vc->conf.chunk_shift;
ddf->currentconf = NULL;
sprintf(this->text_version, "/%s/%d",
- devnum2devname(st->container_dev),
- this->container_member);
+ st->container_devnm, this->container_member);
- for (i = 0 ; i < ddf->mppe ; i++) {
+ for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
struct mdinfo *dev;
struct dl *d;
+ const struct vd_config *bvd;
+ unsigned int iphys;
int stt;
- if (vc->conf.phys_refnum[i] == 0xFFFFFFFF)
+ if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
continue;
- for (d = ddf->dlist; d ; d=d->next)
- if (d->disk.refnum == vc->conf.phys_refnum[i])
- break;
- if (d == NULL)
- /* Haven't found that one yet, maybe there are others */
- continue;
- stt = __be16_to_cpu(ddf->phys->entries[d->pdnum].state);
+ stt = __be16_to_cpu(ddf->phys->entries[pd].state);
if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
!= DDF_Online)
continue;
+ i = get_pd_index_from_refnum(
+ vc, ddf->phys->entries[pd].refnum,
+ ddf->mppe, &bvd, &iphys);
+ if (i == DDF_NOTFOUND)
+ continue;
+
this->array.working_disks++;
- dev = malloc(sizeof(*dev));
- memset(dev, 0, sizeof(*dev));
+ for (d = ddf->dlist; d ; d=d->next)
+ if (d->disk.refnum ==
+ ddf->phys->entries[pd].refnum)
+ break;
+ if (d == NULL)
+ /* Haven't found that one yet, maybe there are others */
+ continue;
+
+ dev = xcalloc(1, sizeof(*dev));
dev->next = this->devs;
this->devs = dev;
dev->recovery_start = MaxSector;
dev->events = __be32_to_cpu(ddf->primary.seq);
- dev->data_offset = __be64_to_cpu(vc->lba_offset[i]);
- dev->component_size = __be64_to_cpu(vc->conf.blocks);
+ dev->data_offset =
+ __be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
+ dev->component_size = __be64_to_cpu(bvd->blocks);
if (d->devname)
strcpy(dev->name, d->devname);
}
if (!ddf)
return 1;
- /* ->dlist and ->conflist will be set for updates, currently not
- * supported
- */
- if (ddf->dlist || ddf->conflist)
- return 1;
-
if (!get_dev_size(fd, NULL, &dsize))
return 1;
+ if (ddf->dlist || ddf->conflist) {
+ struct stat sta;
+ struct dl *dl;
+ int ofd, ret;
+
+ if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
+ pr_err("%s: file descriptor for invalid device\n",
+ __func__);
+ return 1;
+ }
+ for (dl = ddf->dlist; dl; dl = dl->next)
+ if (dl->major == (int)major(sta.st_rdev) &&
+ dl->minor == (int)minor(sta.st_rdev))
+ break;
+ if (!dl) {
+ pr_err("%s: couldn't find disk %d/%d\n", __func__,
+ (int)major(sta.st_rdev),
+ (int)minor(sta.st_rdev));
+ return 1;
+ }
+ /*
+ For DDF, writing to just one disk makes no sense.
+ We would run the risk of writing inconsistent meta data
+ to the devices. So just call __write_init_super_ddf and
+ write to all devices, including this one.
+ Use the fd passed to this function, just in case dl->fd
+ is invalid.
+ */
+ ofd = dl->fd;
+ dl->fd = fd;
+ ret = __write_init_super_ddf(st);
+ dl->fd = ofd;
+ return ret;
+ }
+
if (posix_memalign(&buf, 512, 512) != 0)
return 1;
memset(buf, 0, 512);
*/
struct ddf_super *first = st->sb;
struct ddf_super *second = tst->sb;
+ struct dl *dl1, *dl2;
+ struct vcl *vl1, *vl2;
+ unsigned int max_vds, max_pds, pd, vd;
if (!first) {
st->sb = tst->sb;
if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
return 2;
+ if (first->anchor.seq != second->anchor.seq) {
+ dprintf("%s: sequence number mismatch %u/%u\n", __func__,
+ __be32_to_cpu(first->anchor.seq),
+ __be32_to_cpu(second->anchor.seq));
+ return 3;
+ }
+ if (first->max_part != second->max_part ||
+ first->phys->used_pdes != second->phys->used_pdes ||
+ first->virt->populated_vdes != second->virt->populated_vdes) {
+ dprintf("%s: PD/VD number mismatch\n", __func__);
+ return 3;
+ }
+
+ max_pds = __be16_to_cpu(first->phys->used_pdes);
+ for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
+ for (pd = 0; pd < max_pds; pd++)
+ if (first->phys->entries[pd].refnum == dl2->disk.refnum)
+ break;
+ if (pd == max_pds) {
+ dprintf("%s: no match for disk %08x\n", __func__,
+ __be32_to_cpu(dl2->disk.refnum));
+ return 3;
+ }
+ }
+
+ max_vds = __be16_to_cpu(first->active->max_vd_entries);
+ for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
+ if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
+ continue;
+ for (vd = 0; vd < max_vds; vd++)
+ if (!memcmp(first->virt->entries[vd].guid,
+ vl2->conf.guid, DDF_GUID_LEN))
+ break;
+ if (vd == max_vds) {
+ dprintf("%s: no match for VD config\n", __func__);
+ return 3;
+ }
+ }
/* FIXME should I look at anything else? */
+
+ /*
+ At this point we are fairly sure that the meta data matches.
+ But the new disk may contain additional local data.
+ Add it to the super block.
+ */
+ for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
+ for (vl1 = first->conflist; vl1; vl1 = vl1->next)
+ if (!memcmp(vl1->conf.guid, vl2->conf.guid,
+ DDF_GUID_LEN))
+ break;
+ if (vl1) {
+ if (vl1->other_bvds != NULL &&
+ vl1->conf.sec_elmnt_seq !=
+ vl2->conf.sec_elmnt_seq) {
+ dprintf("%s: adding BVD %u\n", __func__,
+ vl2->conf.sec_elmnt_seq);
+ add_other_bvd(vl1, &vl2->conf,
+ first->conf_rec_len*512);
+ }
+ continue;
+ }
+
+ if (posix_memalign((void **)&vl1, 512,
+ (first->conf_rec_len*512 +
+ offsetof(struct vcl, conf))) != 0) {
+ pr_err("%s could not allocate vcl buf\n",
+ __func__);
+ return 3;
+ }
+
+ vl1->next = first->conflist;
+ vl1->block_sizes = NULL;
+ memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
+ if (alloc_other_bvds(first, vl1) != 0) {
+ pr_err("%s could not allocate other bvds\n",
+ __func__);
+ free(vl1);
+ return 3;
+ }
+ for (vd = 0; vd < max_vds; vd++)
+ if (!memcmp(first->virt->entries[vd].guid,
+ vl1->conf.guid, DDF_GUID_LEN))
+ break;
+ vl1->vcnum = vd;
+ dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
+ first->conflist = vl1;
+ }
+
+ for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
+ for (dl1 = first->dlist; dl1; dl1 = dl1->next)
+ if (dl1->disk.refnum == dl2->disk.refnum)
+ break;
+ if (dl1)
+ continue;
+
+ if (posix_memalign((void **)&dl1, 512,
+ sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
+ != 0) {
+ pr_err("%s could not allocate disk info buffer\n",
+ __func__);
+ return 3;
+ }
+ memcpy(dl1, dl2, sizeof(*dl1));
+ dl1->mdupdate = NULL;
+ dl1->next = first->dlist;
+ dl1->fd = -1;
+ for (pd = 0; pd < max_pds; pd++)
+ if (first->phys->entries[pd].refnum == dl1->disk.refnum)
+ break;
+ dl1->pdnum = pd;
+ if (dl2->spare) {
+ if (posix_memalign((void **)&dl1->spare, 512,
+ first->conf_rec_len*512) != 0) {
+ pr_err("%s could not allocate spare info buf\n",
+ __func__);
+ return 3;
+ }
+ memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
+ }
+ for (vd = 0 ; vd < first->max_part ; vd++) {
+ if (!dl2->vlist[vd]) {
+ dl1->vlist[vd] = NULL;
+ continue;
+ }
+ for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
+ if (!memcmp(vl1->conf.guid,
+ dl2->vlist[vd]->conf.guid,
+ DDF_GUID_LEN))
+ break;
+ dl1->vlist[vd] = vl1;
+ }
+ }
+ first->dlist = dl1;
+ dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
+ dl1->disk.refnum);
+ }
+
return 0;
}
*/
static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
{
- dprintf("ddf: open_new %s\n", inst);
- a->info.container_member = atoi(inst);
+ struct ddf_super *ddf = c->sb;
+ int n = atoi(inst);
+ if (all_ff(ddf->virt->entries[n].guid)) {
+ pr_err("%s: subarray %d doesn't exist\n", __func__, n);
+ return -ENODEV;
+ }
+ dprintf("ddf: open_new %d\n", n);
+ a->info.container_member = n;
return 0;
}
else
ddf->virt->entries[inst].state |= DDF_state_inconsistent;
if (old != ddf->virt->entries[inst].state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
old = ddf->virt->entries[inst].init_state;
ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
else
ddf->virt->entries[inst].init_state |= DDF_init_quick;
if (old != ddf->virt->entries[inst].init_state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
- dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
+ dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
+ guid_str(ddf->virt->entries[inst].guid), a->curr_state,
+ consistent?"clean":"dirty",
a->info.resync_start);
return consistent;
}
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+static int get_bvd_state(const struct ddf_super *ddf,
+ const struct vd_config *vc)
+{
+ unsigned int i, n_bvd, working = 0;
+ unsigned int n_prim = __be16_to_cpu(vc->prim_elmnt_count);
+ int pd, st, state;
+ for (i = 0; i < n_prim; i++) {
+ if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
+ continue;
+ pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
+ if (pd < 0)
+ continue;
+ st = __be16_to_cpu(ddf->phys->entries[pd].state);
+ if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
+ == DDF_Online)
+ working++;
+ }
+
+ state = DDF_state_degraded;
+ if (working == n_prim)
+ state = DDF_state_optimal;
+ else
+ switch (vc->prl) {
+ case DDF_RAID0:
+ case DDF_CONCAT:
+ case DDF_JBOD:
+ state = DDF_state_failed;
+ break;
+ case DDF_RAID1:
+ if (working == 0)
+ state = DDF_state_failed;
+ else if (working >= 2)
+ state = DDF_state_part_optimal;
+ break;
+ case DDF_RAID4:
+ case DDF_RAID5:
+ if (working < n_prim - 1)
+ state = DDF_state_failed;
+ break;
+ case DDF_RAID6:
+ if (working < n_prim - 2)
+ state = DDF_state_failed;
+ else if (working == n_prim - 1)
+ state = DDF_state_part_optimal;
+ break;
+ }
+ return state;
+}
+
+static int secondary_state(int state, int other, int seclevel)
+{
+ if (state == DDF_state_optimal && other == DDF_state_optimal)
+ return DDF_state_optimal;
+ if (seclevel == DDF_2MIRRORED) {
+ if (state == DDF_state_optimal || other == DDF_state_optimal)
+ return DDF_state_part_optimal;
+ if (state == DDF_state_failed && other == DDF_state_failed)
+ return DDF_state_failed;
+ return DDF_state_degraded;
+ } else {
+ if (state == DDF_state_failed || other == DDF_state_failed)
+ return DDF_state_failed;
+ if (state == DDF_state_degraded || other == DDF_state_degraded)
+ return DDF_state_degraded;
+ return DDF_state_part_optimal;
+ }
+}
+
+static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
+{
+ int state = get_bvd_state(ddf, &vcl->conf);
+ unsigned int i;
+ for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
+ state = secondary_state(
+ state,
+ get_bvd_state(ddf, vcl->other_bvds[i-1]),
+ vcl->conf.srl);
+ }
+ return state;
+}
+
/*
* The state of each disk is stored in the global phys_disk structure
* in phys_disk.entries[n].state.
static void ddf_set_disk(struct active_array *a, int n, int state)
{
struct ddf_super *ddf = a->container->sb;
- unsigned int inst = a->info.container_member;
- struct vd_config *vc = find_vdcr(ddf, inst);
- int pd = find_phys(ddf, vc->phys_refnum[n]);
- int i, st, working;
+ unsigned int inst = a->info.container_member, n_bvd;
+ struct vcl *vcl;
+ struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
+ &n_bvd, &vcl);
+ int pd;
struct mdinfo *mdi;
struct dl *dl;
if (!dl)
return;
+ pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
if (pd < 0 || pd != dl->pdnum) {
/* disk doesn't currently exist or has changed.
* If it is now in_sync, insert it. */
+ dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
+ __func__, dl->pdnum, dl->major, dl->minor,
+ dl->disk.refnum);
+ dprintf("%s: array %u disk %u ref %08x pd %d\n",
+ __func__, inst, n_bvd, vc->phys_refnum[n_bvd], pd);
if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
- struct vcl *vcl;
- pd = dl->pdnum;
- vc->phys_refnum[n] = dl->disk.refnum;
- vcl = container_of(vc, struct vcl, conf);
- vcl->lba_offset[n] = mdi->data_offset;
+ pd = dl->pdnum; /* FIXME: is this really correct ? */
+ vc->phys_refnum[n_bvd] = dl->disk.refnum;
+ LBA_OFFSET(ddf, vc)[n_bvd] =
+ __cpu_to_be64(mdi->data_offset);
ddf->phys->entries[pd].type &=
~__cpu_to_be16(DDF_Global_Spare);
ddf->phys->entries[pd].type |=
__cpu_to_be16(DDF_Active_in_VD);
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
} else {
int old = ddf->phys->entries[pd].state;
ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
}
if (old != ddf->phys->entries[pd].state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
dprintf("ddf: set_disk %d to %x\n", n, state);
* It needs to be one of "optimal", "degraded", "failed".
* I don't understand 'deleted' or 'missing'.
*/
- working = 0;
- for (i=0; i < a->info.array.raid_disks; i++) {
- pd = find_phys(ddf, vc->phys_refnum[i]);
- if (pd < 0)
- continue;
- st = __be16_to_cpu(ddf->phys->entries[pd].state);
- if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
- == DDF_Online)
- working++;
- }
- state = DDF_state_degraded;
- if (working == a->info.array.raid_disks)
- state = DDF_state_optimal;
- else switch(vc->prl) {
- case DDF_RAID0:
- case DDF_CONCAT:
- case DDF_JBOD:
- state = DDF_state_failed;
- break;
- case DDF_RAID1:
- if (working == 0)
- state = DDF_state_failed;
- else if (working == 2 && state == DDF_state_degraded)
- state = DDF_state_part_optimal;
- break;
- case DDF_RAID4:
- case DDF_RAID5:
- if (working < a->info.array.raid_disks-1)
- state = DDF_state_failed;
- break;
- case DDF_RAID6:
- if (working < a->info.array.raid_disks-2)
- state = DDF_state_failed;
- else if (working == a->info.array.raid_disks-1)
- state = DDF_state_part_optimal;
- break;
- }
+ state = get_svd_state(ddf, vcl);
if (ddf->virt->entries[inst].state !=
((ddf->virt->entries[inst].state & ~DDF_state_mask)
ddf->virt->entries[inst].state =
(ddf->virt->entries[inst].state & ~DDF_state_mask)
| state;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
}
if (!ddf->updates_pending)
return;
ddf->updates_pending = 0;
- __write_init_super_ddf(st, 0);
+ __write_init_super_ddf(st);
dprintf("ddf: sync_metadata\n");
}
break;
}
}
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return;
}
if (!all_ff(ddf->phys->entries[ent].guid))
return;
ddf->phys->entries[ent] = pd->entries[0];
ddf->phys->used_pdes = __cpu_to_be16(1 +
- __be16_to_cpu(ddf->phys->used_pdes));
- ddf->updates_pending = 1;
+ __be16_to_cpu(ddf->phys->used_pdes));
+ ddf_set_updates_pending(ddf);
if (ddf->add_list) {
struct active_array *a;
struct dl *al = ddf->add_list;
return;
vd = (struct virtual_disk*)update->buf;
- ent = __be16_to_cpu(vd->populated_vdes);
- if (ent >= __be16_to_cpu(ddf->virt->max_vdes))
- return;
- if (!all_ff(ddf->virt->entries[ent].guid))
+ ent = find_unused_vde(ddf);
+ if (ent == DDF_NOTFOUND)
return;
ddf->virt->entries[ent] = vd->entries[0];
ddf->virt->populated_vdes = __cpu_to_be16(1 +
- __be16_to_cpu(ddf->virt->populated_vdes));
- ddf->updates_pending = 1;
+ __be16_to_cpu(ddf->virt->populated_vdes));
+ ddf_set_updates_pending(ddf);
break;
case DDF_VD_CONF_MAGIC:
/* An update, just copy the phys_refnum and lba_offset
* fields
*/
- memcpy(vcl->conf.phys_refnum, vc->phys_refnum,
+ struct vd_config *conf = &vcl->conf;
+ if (vcl->other_bvds != NULL &&
+ conf->sec_elmnt_seq != vc->sec_elmnt_seq) {
+ unsigned int i;
+ for (i = 1; i < conf->sec_elmnt_count; i++)
+ if (vcl->other_bvds[i-1]->sec_elmnt_seq
+ == vc->sec_elmnt_seq)
+ break;
+ if (i == conf->sec_elmnt_count) {
+ pr_err("%s/DDF_VD_CONF_MAGIC: BVD %u not found\n",
+ __func__, vc->sec_elmnt_seq);
+ return;
+ }
+ conf = vcl->other_bvds[i-1];
+ }
+ memcpy(conf->phys_refnum, vc->phys_refnum,
mppe * (sizeof(__u32) + sizeof(__u64)));
} else {
/* A new VD_CONF */
update->space = NULL;
vcl->next = ddf->conflist;
memcpy(&vcl->conf, vc, update->len);
- vcl->lba_offset = (__u64*)
- &vcl->conf.phys_refnum[mppe];
- for (ent = 0;
- ent < __be16_to_cpu(ddf->virt->populated_vdes);
- ent++)
- if (memcmp(vc->guid, ddf->virt->entries[ent].guid,
- DDF_GUID_LEN) == 0) {
- vcl->vcnum = ent;
- break;
- }
+ ent = find_vde_by_guid(ddf, vc->guid);
+ if (ent == DDF_NOTFOUND)
+ return;
+ vcl->vcnum = ent;
ddf->conflist = vcl;
}
/* Set DDF_Transition on all Failed devices - to help
|= __be16_to_cpu(DDF_Transition);
/* Now make sure vlist is correct for each dl. */
for (dl = ddf->dlist; dl; dl = dl->next) {
- unsigned int dn;
unsigned int vn = 0;
int in_degraded = 0;
- for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
- for (dn=0; dn < ddf->mppe ; dn++)
- if (vcl->conf.phys_refnum[dn] ==
- dl->disk.refnum) {
- int vstate;
- dprintf("dev %d has %p at %d\n",
- dl->pdnum, vcl, vn);
- /* Clear the Transition flag */
- if (ddf->phys->entries[dl->pdnum].state
- & __be16_to_cpu(DDF_Failed))
- ddf->phys->entries[dl->pdnum].state &=
- ~__be16_to_cpu(DDF_Transition);
-
- dl->vlist[vn++] = vcl;
- vstate = ddf->virt->entries[vcl->vcnum].state
- & DDF_state_mask;
- if (vstate == DDF_state_degraded ||
- vstate == DDF_state_part_optimal)
- in_degraded = 1;
- break;
- }
+ for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
+ unsigned int dn, ibvd;
+ const struct vd_config *conf;
+ int vstate;
+ dn = get_pd_index_from_refnum(vcl,
+ dl->disk.refnum,
+ ddf->mppe,
+ &conf, &ibvd);
+ if (dn == DDF_NOTFOUND)
+ continue;
+ dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
+ dl->pdnum, dl->disk.refnum,
+ guid_str(conf->guid),
+ conf->sec_elmnt_seq, vn);
+ /* Clear the Transition flag */
+ if (ddf->phys->entries[dl->pdnum].state
+ & __be16_to_cpu(DDF_Failed))
+ ddf->phys->entries[dl->pdnum].state &=
+ ~__be16_to_cpu(DDF_Transition);
+ dl->vlist[vn++] = vcl;
+ vstate = ddf->virt->entries[vcl->vcnum].state
+ & DDF_state_mask;
+ if (vstate == DDF_state_degraded ||
+ vstate == DDF_state_part_optimal)
+ in_degraded = 1;
+ }
while (vn < ddf->max_part)
dl->vlist[vn++] = NULL;
if (dl->vlist[0]) {
~__cpu_to_be16(DDF_Global_Spare);
if (!(ddf->phys->entries[dl->pdnum].type &
__cpu_to_be16(DDF_Active_in_VD))) {
- ddf->phys->entries[dl->pdnum].type |=
- __cpu_to_be16(DDF_Active_in_VD);
- if (in_degraded)
- ddf->phys->entries[dl->pdnum].state |=
- __cpu_to_be16(DDF_Rebuilding);
- }
+ ddf->phys->entries[dl->pdnum].type |=
+ __cpu_to_be16(DDF_Active_in_VD);
+ if (in_degraded)
+ ddf->phys->entries[dl->pdnum].state |=
+ __cpu_to_be16(DDF_Rebuilding);
+ }
}
if (dl->spare) {
ddf->phys->entries[dl->pdnum].type &=
pd2++;
}
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
break;
case DDF_SPARE_ASSIGN_MAGIC:
default: break;
__u32 *magic = (__u32*)update->buf;
if (*magic == DDF_VD_CONF_MAGIC)
if (posix_memalign(&update->space, 512,
- offsetof(struct vcl, conf)
- + ddf->conf_rec_len * 512) != 0)
+ offsetof(struct vcl, conf)
+ + ddf->conf_rec_len * 512) != 0)
update->space = NULL;
}
struct metadata_update *mu;
struct dl *dl;
int i;
+ struct vcl *vcl;
struct vd_config *vc;
- __u64 *lba;
+ unsigned int n_bvd;
for (d = a->info.devs ; d ; d = d->next) {
if ((d->curr_state & DS_FAULTY) &&
- d->state_fd >= 0)
+ d->state_fd >= 0)
/* wait for Removal to happen */
return NULL;
if (d->state_fd >= 0)
} else if (ddf->phys->entries[dl->pdnum].type &
__cpu_to_be16(DDF_Global_Spare)) {
is_global = 1;
+ } else if (!(ddf->phys->entries[dl->pdnum].state &
+ __cpu_to_be16(DDF_Failed))) {
+ /* we can possibly use some of this */
+ is_global = 1;
}
if ( ! (is_dedicated ||
(is_global && global_ok))) {
dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
- is_dedicated, is_global);
+ is_dedicated, is_global);
continue;
}
}
/* Cool, we have a device with some space at pos */
- di = malloc(sizeof(*di));
- if (!di)
- continue;
- memset(di, 0, sizeof(*di));
+ di = xcalloc(1, sizeof(*di));
di->disk.number = i;
di->disk.raid_disk = i;
di->disk.major = dl->major;
* Create a metadata_update record to update the
* phys_refnum and lba_offset values
*/
- mu = malloc(sizeof(*mu));
- if (mu && posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
+ mu = xmalloc(sizeof(*mu));
+ if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
free(mu);
mu = NULL;
}
- if (!mu) {
- while (rv) {
- struct mdinfo *n = rv->next;
-
- free(rv);
- rv = n;
- }
- return NULL;
- }
-
- mu->buf = malloc(ddf->conf_rec_len * 512);
+ mu->buf = xmalloc(ddf->conf_rec_len * 512);
mu->len = ddf->conf_rec_len * 512;
mu->space = NULL;
+ mu->space_list = NULL;
mu->next = *updates;
- vc = find_vdcr(ddf, a->info.container_member);
+ vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
+ &n_bvd, &vcl);
memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
vc = (struct vd_config*)mu->buf;
- lba = (__u64*)&vc->phys_refnum[ddf->mppe];
for (di = rv ; di ; di = di->next) {
vc->phys_refnum[di->disk.raid_disk] =
ddf->phys->entries[dl->pdnum].refnum;
- lba[di->disk.raid_disk] = di->data_offset;
+ LBA_OFFSET(ddf, vc)[di->disk.raid_disk]
+ = __cpu_to_be64(di->data_offset);
}
*updates = mu;
return rv;
}
}
+static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
+{
+ if (level && *level == UnSet)
+ *level = LEVEL_CONTAINER;
+
+ if (level && layout && *layout == UnSet)
+ *layout = ddf_level_to_layout(*level);
+}
+
struct superswitch super_ddf = {
#ifndef MDASSEMBLE
.examine_super = examine_super_ddf,
.write_init_super = write_init_super_ddf,
.add_to_super = add_to_super_ddf,
.remove_from_super = remove_from_super_ddf,
+ .load_container = load_container_ddf,
+ .copy_metadata = copy_metadata_ddf,
#endif
.match_home = match_home_ddf,
.uuid_from_super= uuid_from_super_ddf,
.free_super = free_super_ddf,
.match_metadata_desc = match_metadata_desc_ddf,
.container_content = container_content_ddf,
- .default_layout = ddf_level_to_layout,
+ .default_geometry = default_geometry_ddf,
.external = 1,