const unsigned char *buf,
unsigned len);
+#define DDF_NOTFOUND (~0U)
+#define DDF_CONTAINER (DDF_NOTFOUND-1)
+
/* The DDF metadata handling.
* DDF metadata lives at the end of the device.
* The last 512 byte block provides an 'anchor' which is used to locate
__u64 *lba_offset; /* location in 'conf' of
* the lba table */
unsigned int vcnum; /* index into ->virt */
+ struct vd_config **other_bvds;
__u64 *block_sizes; /* NULL if all the same */
};
};
char *devname;
int fd;
unsigned long long size; /* sectors */
+ unsigned long long primary_lba; /* sectors */
+ unsigned long long secondary_lba; /* sectors */
+ unsigned long long workspace_lba; /* sectors */
int pdnum; /* index in ->phys */
struct spare_assign *spare;
void *mdupdate; /* hold metadata update */
#define offsetof(t,f) ((size_t)&(((t*)0)->f))
#endif
+#if DEBUG
+static int all_ff(char *guid);
+static void pr_state(struct ddf_super *ddf, const char *msg)
+{
+ unsigned int i;
+ dprintf("%s/%s: ", __func__, msg);
+ for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
+ if (all_ff(ddf->virt->entries[i].guid))
+ continue;
+ dprintf("%u(s=%02x i=%02x) ", i,
+ ddf->virt->entries[i].state,
+ ddf->virt->entries[i].init_state);
+ }
+ dprintf("\n");
+}
+#else
+static void pr_state(const struct ddf_super *ddf, const char *msg) {}
+#endif
+
+#define ddf_set_updates_pending(x) \
+ do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
static unsigned int calc_crc(void *buf, int len)
{
return 0;
}
+static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
+ unsigned int len)
+{
+ int i;
+ for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
+ if (vcl->other_bvds[i] != NULL &&
+ vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
+ break;
+
+ if (i < vcl->conf.sec_elmnt_count-1) {
+ if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
+ return;
+ } else {
+ for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
+ if (vcl->other_bvds[i] == NULL)
+ break;
+ if (i == vcl->conf.sec_elmnt_count-1) {
+ pr_err("no space for sec level config %u, count is %u\n",
+ vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
+ return;
+ }
+ if (posix_memalign((void **)&vcl->other_bvds[i], 512, len)
+ != 0) {
+ pr_err("%s could not allocate vd buf\n", __func__);
+ return;
+ }
+ }
+ memcpy(vcl->other_bvds[i], vd, len);
+}
+
static int load_ddf_local(int fd, struct ddf_super *super,
char *devname, int keep)
{
super->active->data_section_offset,
super->active->data_section_length,
0);
- dl->devname = devname ? strdup(devname) : NULL;
+ dl->devname = devname ? xstrdup(devname) : NULL;
fstat(fd, &stb);
dl->major = major(stb.st_rdev);
dl->size = 0;
if (get_dev_size(fd, devname, &dsize))
dl->size = dsize >> 9;
+ /* If the disks have different sizes, the LBAs will differ
+ * between phys disks.
+ * At this point here, the values in super->active must be valid
+ * for this phys disk. */
+ dl->primary_lba = super->active->primary_lba;
+ dl->secondary_lba = super->active->secondary_lba;
+ dl->workspace_lba = super->active->workspace_lba;
dl->spare = NULL;
for (i = 0 ; i < super->max_part ; i++)
dl->vlist[i] = NULL;
__func__);
return 1;
}
-
+
memcpy(dl->spare, vd, super->conf_rec_len*512);
continue;
}
if (vcl) {
dl->vlist[vnum++] = vcl;
+ if (vcl->other_bvds != NULL &&
+ vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
+ add_other_bvd(vcl, vd, super->conf_rec_len*512);
+ continue;
+ }
if (__be32_to_cpu(vd->seqnum) <=
__be32_to_cpu(vcl->conf.seqnum))
continue;
}
vcl->next = super->conflist;
vcl->block_sizes = NULL; /* FIXME not for CONCAT */
+ if (vd->sec_elmnt_count > 1)
+ vcl->other_bvds =
+ xcalloc(vd->sec_elmnt_count - 1,
+ sizeof(struct vd_config *));
+ else
+ vcl->other_bvds = NULL;
super->conflist = vcl;
dl->vlist[vnum++] = vcl;
}
if (get_dev_size(fd, devname, &dsize) == 0)
return 1;
- if (test_partition(fd))
+ if (!st->ignore_hw_compat && test_partition(fd))
/* DDF is not allowed on partitions */
return 1;
ddf->conflist = v->next;
if (v->block_sizes)
free(v->block_sizes);
+ if (v->other_bvds) {
+ int i;
+ for (i = 0; i < v->conf.sec_elmnt_count-1; i++)
+ if (v->other_bvds[i] != NULL)
+ free(v->other_bvds[i]);
+ free(v->other_bvds);
+ }
free(v);
}
while (ddf->dlist) {
)
return NULL;
- st = malloc(sizeof(*st));
- memset(st, 0, sizeof(*st));
- st->container_dev = NoMdDev;
+ st = xcalloc(1, sizeof(*st));
st->ss = &super_ddf;
st->max_devs = 512;
st->minor_version = 0;
return st;
}
-
#ifndef MDASSEMBLE
static mapping_t ddf_state[] = {
}
printf(")\n");
if (vc->chunk_shift != 255)
- printf(" Chunk Size[%d] : %d sectors\n", n,
- 1 << vc->chunk_shift);
+ printf(" Chunk Size[%d] : %d sectors\n", n,
+ 1 << vc->chunk_shift);
printf(" Raid Level[%d] : %s\n", n,
map_num(ddf_level, vc->prl)?:"-unknown-");
if (vc->sec_elmnt_count != 1) {
//printf("\n");
printf(" %3d %08x ", i,
__be32_to_cpu(pd->refnum));
- printf("%8lluK ",
+ printf("%8lluK ",
(unsigned long long)__be64_to_cpu(pd->config_size)>>1);
for (dl = sb->dlist; dl ; dl = dl->next) {
if (dl->disk.refnum == pd->refnum) {
static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
+static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
+static unsigned int get_vd_num_of_subarray(struct supertype *st)
+{
+ /*
+ * Figure out the VD number for this supertype.
+ * Returns DDF_CONTAINER for the container itself,
+ * and DDF_NOTFOUND on error.
+ */
+ struct ddf_super *ddf = st->sb;
+ struct mdinfo *sra;
+ char *sub, *end;
+ unsigned int vcnum;
+
+ if (*st->container_devnm == '\0')
+ return DDF_CONTAINER;
+
+ sra = sysfs_read(-1, st->devnm, GET_VERSION);
+ if (!sra || sra->array.major_version != -1 ||
+ sra->array.minor_version != -2 ||
+ !is_subarray(sra->text_version))
+ return DDF_NOTFOUND;
+
+ sub = strchr(sra->text_version + 1, '/');
+ if (sub != NULL)
+ vcnum = strtoul(sub + 1, &end, 10);
+ if (sub == NULL || *sub == '\0' || *end != '\0' ||
+ vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
+ return DDF_NOTFOUND;
+
+ return vcnum;
+}
+
static void brief_examine_super_ddf(struct supertype *st, int verbose)
{
/* We just write a generic DDF ARRAY entry
printf("MD_LEVEL=container\n");
printf("MD_UUID=%s\n", nbuf+5);
}
-
+
+static int copy_metadata_ddf(struct supertype *st, int from, int to)
+{
+ void *buf;
+ unsigned long long dsize, offset;
+ int bytes;
+ struct ddf_header *ddf;
+ int written = 0;
+
+ /* The meta consists of an anchor, a primary, and a secondary.
+ * This all lives at the end of the device.
+ * So it is easiest to find the earliest of primary and
+ * secondary, and copy everything from there.
+ *
+ * Anchor is 512 from end It contains primary_lba and secondary_lba
+ * we choose one of those
+ */
+
+ if (posix_memalign(&buf, 4096, 4096) != 0)
+ return 1;
+
+ if (!get_dev_size(from, NULL, &dsize))
+ goto err;
+
+ if (lseek64(from, dsize-512, 0) < 0)
+ goto err;
+ if (read(from, buf, 512) != 512)
+ goto err;
+ ddf = buf;
+ if (ddf->magic != DDF_HEADER_MAGIC ||
+ calc_crc(ddf, 512) != ddf->crc ||
+ (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
+ memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
+ goto err;
+
+ offset = dsize - 512;
+ if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
+ offset = __be64_to_cpu(ddf->primary_lba) << 9;
+ if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
+ offset = __be64_to_cpu(ddf->secondary_lba) << 9;
+
+ bytes = dsize - offset;
+
+ if (lseek64(from, offset, 0) < 0 ||
+ lseek64(to, offset, 0) < 0)
+ goto err;
+ while (written < bytes) {
+ int n = bytes - written;
+ if (n > 4096)
+ n = 4096;
+ if (read(from, buf, n) != n)
+ goto err;
+ if (write(to, buf, n) != n)
+ goto err;
+ written += n;
+ }
+ free(buf);
+ return 0;
+err:
+ free(buf);
+ return 1;
+}
static void detail_super_ddf(struct supertype *st, char *homehost)
{
static void brief_detail_super_ddf(struct supertype *st)
{
- /* FIXME I really need to know which array we are detailing.
- * Can that be stored in ddf_super??
- */
-// struct ddf_super *ddf = st->sb;
struct mdinfo info;
char nbuf[64];
- getinfo_super_ddf(st, &info, NULL);
+ struct ddf_super *ddf = st->sb;
+ unsigned int vcnum = get_vd_num_of_subarray(st);
+ if (vcnum == DDF_CONTAINER)
+ uuid_from_super_ddf(st, info.uuid);
+ else if (vcnum == DDF_NOTFOUND)
+ return;
+ else
+ uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
fname_from_uuid(st, &info, nbuf,':');
printf(" UUID=%s", nbuf + 5);
}
return -1;
}
+static void uuid_from_ddf_guid(const char *guid, int uuid[4])
+{
+ char buf[20];
+ struct sha1_ctx ctx;
+ sha1_init_ctx(&ctx);
+ sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
+ sha1_finish_ctx(&ctx, buf);
+ memcpy(uuid, buf, 4*4);
+}
+
static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
{
/* The uuid returned here is used for:
* not the device-set.
* uuid to recognise same set when adding a missing device back
* to an array. This is a uuid for the device-set.
- *
+ *
* For each of these we can make do with a truncated
* or hashed uuid rather than the original, as long as
* everyone agrees.
struct ddf_super *ddf = st->sb;
struct vcl *vcl = ddf->currentconf;
char *guid;
- char buf[20];
- struct sha1_ctx ctx;
if (vcl)
guid = vcl->conf.guid;
else
guid = ddf->anchor.guid;
-
- sha1_init_ctx(&ctx);
- sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
- sha1_finish_ctx(&ctx, buf);
- memcpy(uuid, buf, 4*4);
+ uuid_from_ddf_guid(guid, uuid);
}
static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
info->array.chunk_size = 0;
info->container_enough = 1;
-
info->disk.major = 0;
info->disk.minor = 0;
if (ddf->dlist) {
info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
info->data_offset = __be64_to_cpu(ddf->phys->
- entries[info->disk.raid_disk].
- config_size);
+ entries[info->disk.raid_disk].
+ config_size);
info->component_size = ddf->dlist->size - info->data_offset;
} else {
info->disk.number = -1;
}
info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
-
info->recovery_start = MaxSector;
info->reshape_active = 0;
info->recovery_blocked = 0;
info->array.major_version = -1;
info->array.minor_version = -2;
sprintf(info->text_version, "/%s/%d",
- devnum2devname(st->container_dev),
+ st->container_devnm,
info->container_member);
info->safe_mode_delay = 200;
map[j] = 0;
if (j < info->array.raid_disks) {
int i = find_phys(ddf, vc->conf.phys_refnum[j]);
- if (i >= 0 &&
+ if (i >= 0 &&
(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
!(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
map[i] = 1;
}
}
-
static int update_super_ddf(struct supertype *st, struct mdinfo *info,
char *update,
char *devname, int verbose,
mdu_array_info_t *info,
unsigned long long size,
char *name, char *homehost,
- int *uuid);
+ int *uuid, unsigned long long data_offset);
static int init_super_ddf(struct supertype *st,
mdu_array_info_t *info,
unsigned long long size, char *name, char *homehost,
- int *uuid)
+ int *uuid, unsigned long long data_offset)
{
/* This is primarily called by Create when creating a new array.
* We will then get add_to_super called for each component, and then
struct phys_disk *pd;
struct virtual_disk *vd;
+ if (data_offset != INVALID_SECTORS) {
+ pr_err("data-offset not supported by DDF\n");
+ return 0;
+ }
+
if (st->sb)
- return init_super_ddf_bvd(st, info, size, name, homehost, uuid);
+ return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
+ data_offset);
if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
pr_err("%s could not allocate superblock\n", __func__);
memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
st->sb = ddf;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return 1;
}
default: return -1;
}
}
+
static int layout_to_rlq(int level, int layout, int raiddisks)
{
switch(level) {
* (dnum) of the given ddf.
* Return a malloced array of 'struct extent'
-FIXME ignore DDF_Legacy devices?
+ * FIXME ignore DDF_Legacy devices?
*/
struct extent *rv;
int n = 0;
unsigned int i, j;
- rv = malloc(sizeof(struct extent) * (ddf->max_part + 2));
- if (!rv)
- return NULL;
+ rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
for (i = 0; i < ddf->max_part; i++) {
struct vcl *v = dl->vlist[i];
mdu_array_info_t *info,
unsigned long long size,
char *name, char *homehost,
- int *uuid)
+ int *uuid, unsigned long long data_offset)
{
/* We are creating a BVD inside a pre-existing container.
* so st->sb is already set.
break;
if (venum == __be16_to_cpu(ddf->virt->max_vdes)) {
pr_err("Cannot find spare slot for "
- "virtual disk - DDF is corrupt\n");
+ "virtual disk - DDF is corrupt\n");
return 0;
}
ve = &ddf->virt->entries[venum];
vcl->lba_offset = (__u64*) &vcl->conf.phys_refnum[ddf->mppe];
vcl->vcnum = venum;
vcl->block_sizes = NULL; /* FIXME not for CONCAT */
+ vcl->other_bvds = NULL;
vc = &vcl->conf;
vcl->next = ddf->conflist;
ddf->conflist = vcl;
ddf->currentconf = vcl;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return 1;
}
ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
/* add a device to a container, either while creating it or while
* expanding a pre-existing container
*/
static int add_to_super_ddf(struct supertype *st,
- mdu_disk_info_t *dk, int fd, char *devname)
+ mdu_disk_info_t *dk, int fd, char *devname,
+ unsigned long long data_offset)
{
struct ddf_super *ddf = st->sb;
struct dl *dd;
sizeof(struct phys_disk_entry));
struct phys_disk *pd;
- pd = malloc(len);
+ pd = xmalloc(len);
pd->magic = DDF_PHYS_RECORDS_MAGIC;
pd->used_pdes = __cpu_to_be16(n);
pde = &pd->entries[0];
} else {
dd->next = ddf->dlist;
ddf->dlist = dd;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
return 0;
sizeof(struct phys_disk_entry));
struct phys_disk *pd;
- pd = malloc(len);
+ pd = xmalloc(len);
pd->magic = DDF_PHYS_RECORDS_MAGIC;
pd->used_pdes = __cpu_to_be16(dl->pdnum);
pd->entries[0].state = __cpu_to_be16(DDF_Missing);
*/
#define NULL_CONF_SZ 4096
-static int __write_init_super_ddf(struct supertype *st)
+static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
+ __u32 refnum, unsigned int nmax,
+ const struct vd_config **bvd,
+ unsigned int *idx);
+
+static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
+ char *null_aligned)
{
+ unsigned long long sector;
+ struct ddf_header *header;
+ int fd, i, n_config, conf_size;
+
+ fd = d->fd;
+
+ switch (type) {
+ case DDF_HEADER_PRIMARY:
+ header = &ddf->primary;
+ sector = __be64_to_cpu(header->primary_lba);
+ break;
+ case DDF_HEADER_SECONDARY:
+ header = &ddf->secondary;
+ sector = __be64_to_cpu(header->secondary_lba);
+ break;
+ default:
+ return 0;
+ }
+
+ header->type = type;
+ header->openflag = 0;
+ header->crc = calc_crc(header, 512);
+
+ lseek64(fd, sector<<9, 0);
+ if (write(fd, header, 512) < 0)
+ return 0;
+
+ ddf->controller.crc = calc_crc(&ddf->controller, 512);
+ if (write(fd, &ddf->controller, 512) < 0)
+ return 0;
+
+ ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
+ if (write(fd, ddf->phys, ddf->pdsize) < 0)
+ return 0;
+ ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
+ if (write(fd, ddf->virt, ddf->vdsize) < 0)
+ return 0;
+ /* Now write lots of config records. */
+ n_config = ddf->max_part;
+ conf_size = ddf->conf_rec_len * 512;
+ for (i = 0 ; i <= n_config ; i++) {
+ struct vcl *c;
+ struct vd_config *vdc = NULL;
+ if (i == n_config) {
+ c = (struct vcl *)d->spare;
+ if (c)
+ vdc = &c->conf;
+ } else {
+ unsigned int dummy;
+ c = d->vlist[i];
+ if (c)
+ get_pd_index_from_refnum(
+ c, d->disk.refnum,
+ ddf->mppe,
+ (const struct vd_config **)&vdc,
+ &dummy);
+ }
+ if (c) {
+ vdc->seqnum = header->seq;
+ vdc->crc = calc_crc(vdc, conf_size);
+ if (write(fd, vdc, conf_size) < 0)
+ break;
+ } else {
+ unsigned int togo = conf_size;
+ while (togo > NULL_CONF_SZ) {
+ if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
+ break;
+ togo -= NULL_CONF_SZ;
+ }
+ if (write(fd, null_aligned, togo) < 0)
+ break;
+ }
+ }
+ if (i <= n_config)
+ return 0;
+
+ d->disk.crc = calc_crc(&d->disk, 512);
+ if (write(fd, &d->disk, 512) < 0)
+ return 0;
+
+ return 1;
+}
+
+static int __write_init_super_ddf(struct supertype *st)
+{
struct ddf_super *ddf = st->sb;
- int i;
struct dl *d;
- int n_config;
- int conf_size;
int attempts = 0;
int successes = 0;
- unsigned long long size, sector;
+ unsigned long long size;
char *null_aligned;
+ __u32 seq;
+ pr_state(ddf, __func__);
if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
return -ENOMEM;
}
memset(null_aligned, 0xff, NULL_CONF_SZ);
+ seq = ddf->active->seq + 1;
+
/* try to write updated metadata,
* if we catch a failure move on to the next disk
*/
*/
get_dev_size(fd, NULL, &size);
size /= 512;
- ddf->anchor.workspace_lba = __cpu_to_be64(size - 32*1024*2);
- ddf->anchor.primary_lba = __cpu_to_be64(size - 16*1024*2);
- ddf->anchor.seq = __cpu_to_be32(1);
+ if (d->workspace_lba != 0)
+ ddf->anchor.workspace_lba = d->workspace_lba;
+ else
+ ddf->anchor.workspace_lba =
+ __cpu_to_be64(size - 32*1024*2);
+ if (d->primary_lba != 0)
+ ddf->anchor.primary_lba = d->primary_lba;
+ else
+ ddf->anchor.primary_lba =
+ __cpu_to_be64(size - 16*1024*2);
+ if (d->secondary_lba != 0)
+ ddf->anchor.secondary_lba = d->secondary_lba;
+ else
+ ddf->anchor.secondary_lba =
+ __cpu_to_be64(size - 32*1024*2);
+ ddf->anchor.seq = seq;
memcpy(&ddf->primary, &ddf->anchor, 512);
memcpy(&ddf->secondary, &ddf->anchor, 512);
ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
- ddf->primary.openflag = 0;
- ddf->primary.type = DDF_HEADER_PRIMARY;
-
- ddf->secondary.openflag = 0;
- ddf->secondary.type = DDF_HEADER_SECONDARY;
-
- ddf->primary.crc = calc_crc(&ddf->primary, 512);
- ddf->secondary.crc = calc_crc(&ddf->secondary, 512);
-
- sector = size - 16*1024*2;
- lseek64(fd, sector<<9, 0);
- if (write(fd, &ddf->primary, 512) < 0)
- continue;
-
- ddf->controller.crc = calc_crc(&ddf->controller, 512);
- if (write(fd, &ddf->controller, 512) < 0)
- continue;
-
- ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
-
- if (write(fd, ddf->phys, ddf->pdsize) < 0)
- continue;
-
- ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
- if (write(fd, ddf->virt, ddf->vdsize) < 0)
+ if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
+ null_aligned))
continue;
- /* Now write lots of config records. */
- n_config = ddf->max_part;
- conf_size = ddf->conf_rec_len * 512;
- for (i = 0 ; i <= n_config ; i++) {
- struct vcl *c = d->vlist[i];
- if (i == n_config)
- c = (struct vcl*)d->spare;
-
- if (c) {
- c->conf.crc = calc_crc(&c->conf, conf_size);
- if (write(fd, &c->conf, conf_size) < 0)
- break;
- } else {
- unsigned int togo = conf_size;
- while (togo > NULL_CONF_SZ) {
- if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
- break;
- togo -= NULL_CONF_SZ;
- }
- if (write(fd, null_aligned, togo) < 0)
- break;
- }
- }
- if (i <= n_config)
- continue;
- d->disk.crc = calc_crc(&d->disk, 512);
- if (write(fd, &d->disk, 512) < 0)
+ if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
+ null_aligned))
continue;
- /* Maybe do the same for secondary */
-
lseek64(fd, (size-1)*512, SEEK_SET);
if (write(fd, &ddf->anchor, 512) < 0)
continue;
/* First the virtual disk. We have a slightly fake header */
len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
- vd = malloc(len);
+ vd = xmalloc(len);
*vd = *ddf->virt;
vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
/* Then the vd_config */
len = ddf->conf_rec_len * 512;
- vc = malloc(len);
+ vc = xmalloc(len);
memcpy(vc, ¤tconf->conf, len);
append_metadata_update(st, vc, len);
/* FIXME I need to close the fds! */
return 0;
- } else {
+ } else {
struct dl *d;
for (d = ddf->dlist; d; d=d->next)
- while (Kill(d->devname, NULL, 0, 1, 1) == 0);
+ while (Kill(d->devname, NULL, 0, -1, 1) == 0);
return __write_init_super_ddf(st);
}
}
#endif
-static __u64 avail_size_ddf(struct supertype *st, __u64 devsize)
+static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
+ unsigned long long data_offset)
{
/* We must reserve the last 32Meg */
if (devsize <= 32*1024*2)
int cnt = 0;
for (dl = ddf->dlist; dl ; dl=dl->next) {
- dl->raiddisk = -1;
+ dl->raiddisk = -1;
dl->esize = 0;
}
/* Now find largest extent on each device */
for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
if (dl->esize < size)
continue;
-
+
dl->raiddisk = cnt;
cnt++;
}
return 1;
}
-
-
static int
validate_geometry_ddf_container(struct supertype *st,
int level, int layout, int raiddisks,
int chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose);
static int validate_geometry_ddf_bvd(struct supertype *st,
int level, int layout, int raiddisks,
int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose);
static int validate_geometry_ddf(struct supertype *st,
int level, int layout, int raiddisks,
int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
if (chunk && *chunk == UnSet)
*chunk = DEFAULT_CHUNK;
-
+ if (level == -1000000) level = LEVEL_CONTAINER;
if (level == LEVEL_CONTAINER) {
/* Must be a fresh device to add to a container */
return validate_geometry_ddf_container(st, level, layout,
raiddisks, chunk?*chunk:0,
- size, dev, freesize,
+ size, data_offset, dev,
+ freesize,
verbose);
}
if (ddf_level_num[i].num1 == MAXINT) {
if (verbose)
pr_err("DDF does not support level %d arrays\n",
- level);
+ level);
return 0;
}
/* Should check layout? etc */
* Should make a distinction one day.
*/
return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
- chunk, size, dev, freesize,
+ chunk, size, data_offset, dev,
+ freesize,
verbose);
}
/* This is the first device for the array.
*/
fd = open(dev, O_RDONLY|O_EXCL, 0);
if (fd >= 0) {
- sra = sysfs_read(fd, 0, GET_VERSION);
+ sra = sysfs_read(fd, NULL, GET_VERSION);
close(fd);
if (sra && sra->array.major_version == -1 &&
strcmp(sra->text_version, "ddf") == 0) {
if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
if (verbose)
pr_err("ddf: Cannot open %s: %s\n",
- dev, strerror(errno));
+ dev, strerror(errno));
return 0;
}
/* Well, it is in use by someone, maybe a 'ddf' container. */
close(fd);
if (verbose)
pr_err("ddf: Cannot use %s: %s\n",
- dev, strerror(EBUSY));
+ dev, strerror(EBUSY));
return 0;
}
- sra = sysfs_read(cfd, 0, GET_VERSION);
+ sra = sysfs_read(cfd, NULL, GET_VERSION);
close(fd);
if (sra && sra->array.major_version == -1 &&
strcmp(sra->text_version, "ddf") == 0) {
struct ddf_super *ddf;
if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
st->sb = ddf;
- st->container_dev = fd2devnum(cfd);
+ strcpy(st->container_devnm, fd2devnm(cfd));
close(cfd);
return validate_geometry_ddf_bvd(st, level, layout,
raiddisks, chunk, size,
+ data_offset,
dev, freesize,
verbose);
}
validate_geometry_ddf_container(struct supertype *st,
int level, int layout, int raiddisks,
int chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
if (fd < 0) {
if (verbose)
pr_err("ddf: Cannot open %s: %s\n",
- dev, strerror(errno));
+ dev, strerror(errno));
return 0;
}
if (!get_dev_size(fd, dev, &ldsize)) {
}
close(fd);
- *freesize = avail_size_ddf(st, ldsize >> 9);
+ *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
if (*freesize == 0)
return 0;
static int validate_geometry_ddf_bvd(struct supertype *st,
int level, int layout, int raiddisks,
int *chunk, unsigned long long size,
+ unsigned long long data_offset,
char *dev, unsigned long long *freesize,
int verbose)
{
if (!dl) {
if (verbose)
pr_err("ddf: %s is not in the "
- "same DDF set\n",
- dev);
+ "same DDF set\n",
+ dev);
return 0;
}
e = get_extents(ddf, dl);
maxsize = 0;
i = 0;
if (e) do {
- unsigned long long esize;
- esize = e[i].start - pos;
- if (esize >= maxsize)
- maxsize = esize;
- pos = e[i].start + e[i].size;
- i++;
- } while (e[i-1].size);
+ unsigned long long esize;
+ esize = e[i].start - pos;
+ if (esize >= maxsize)
+ maxsize = esize;
+ pos = e[i].start + e[i].size;
+ i++;
+ } while (e[i-1].size);
*freesize = maxsize;
// FIXME here I am
st->minor_version = 0;
st->max_devs = 512;
}
- st->container_dev = fd2devnum(fd);
+ strcpy(st->container_devnm, fd2devnm(fd));
return 0;
}
#endif /* MDASSEMBLE */
+static int check_secondary(const struct vcl *vc)
+{
+ const struct vd_config *conf = &vc->conf;
+ int i;
+
+ /* The only DDF secondary RAID level md can support is
+ * RAID 10, if the stripe sizes and Basic volume sizes
+ * are all equal.
+ * Other configurations could in theory be supported by exposing
+ * the BVDs to user space and using device mapper for the secondary
+ * mapping. So far we don't support that.
+ */
+
+ __u64 sec_elements[4] = {0, 0, 0, 0};
+#define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
+#define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
+
+ if (vc->other_bvds == NULL) {
+ pr_err("No BVDs for secondary RAID found\n");
+ return -1;
+ }
+ if (conf->prl != DDF_RAID1) {
+ pr_err("Secondary RAID level only supported for mirrored BVD\n");
+ return -1;
+ }
+ if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
+ pr_err("Secondary RAID level %d is unsupported\n",
+ conf->srl);
+ return -1;
+ }
+ __set_sec_seen(conf->sec_elmnt_seq);
+ for (i = 0; i < conf->sec_elmnt_count-1; i++) {
+ const struct vd_config *bvd = vc->other_bvds[i];
+ if (bvd == NULL) {
+ pr_err("BVD %d is missing\n", i+1);
+ return -1;
+ }
+ if (bvd->srl != conf->srl) {
+ pr_err("Inconsistent secondary RAID level across BVDs\n");
+ return -1;
+ }
+ if (bvd->prl != conf->prl) {
+ pr_err("Different RAID levels for BVDs are unsupported\n");
+ return -1;
+ }
+ if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
+ pr_err("All BVDs must have the same number of primary elements\n");
+ return -1;
+ }
+ if (bvd->chunk_shift != conf->chunk_shift) {
+ pr_err("Different strip sizes for BVDs are unsupported\n");
+ return -1;
+ }
+ if (bvd->array_blocks != conf->array_blocks) {
+ pr_err("Different BVD sizes are unsupported\n");
+ return -1;
+ }
+ __set_sec_seen(bvd->sec_elmnt_seq);
+ }
+ for (i = 0; i < conf->sec_elmnt_count; i++) {
+ if (!__was_sec_seen(i)) {
+ pr_err("BVD %d is missing\n", i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+#define NO_SUCH_REFNUM (0xFFFFFFFF)
+static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
+ __u32 refnum, unsigned int nmax,
+ const struct vd_config **bvd,
+ unsigned int *idx)
+{
+ unsigned int i, j, n, sec, cnt;
+
+ cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
+ sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
+
+ for (i = 0, j = 0 ; i < nmax ; i++) {
+ /* j counts valid entries for this BVD */
+ if (vc->conf.phys_refnum[i] != 0xffffffff)
+ j++;
+ if (vc->conf.phys_refnum[i] == refnum) {
+ *bvd = &vc->conf;
+ *idx = i;
+ return sec * cnt + j - 1;
+ }
+ }
+ if (vc->other_bvds == NULL)
+ goto bad;
+
+ for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
+ struct vd_config *vd = vc->other_bvds[n-1];
+ if (vd == NULL)
+ continue;
+ sec = vd->sec_elmnt_seq;
+ for (i = 0, j = 0 ; i < nmax ; i++) {
+ if (vd->phys_refnum[i] != 0xffffffff)
+ j++;
+ if (vd->phys_refnum[i] == refnum) {
+ *bvd = vd;
+ *idx = i;
+ return sec * cnt + j - 1;
+ }
+ }
+ }
+bad:
+ *bvd = NULL;
+ return NO_SUCH_REFNUM;
+}
+
static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
{
/* Given a container loaded by load_super_ddf_all,
struct mdinfo *this;
char *ep;
__u32 *cptr;
+ unsigned int pd;
if (subarray &&
(strtoul(subarray, &ep, 10) != vc->vcnum ||
*ep != '\0'))
continue;
- this = malloc(sizeof(*this));
- memset(this, 0, sizeof(*this));
+ if (vc->conf.sec_elmnt_count > 1) {
+ if (check_secondary(vc) != 0)
+ continue;
+ }
+
+ this = xcalloc(1, sizeof(*this));
this->next = rest;
rest = this;
- this->array.level = map_num1(ddf_level_num, vc->conf.prl);
- this->array.raid_disks =
- __be16_to_cpu(vc->conf.prim_elmnt_count);
- this->array.layout = rlq_to_layout(vc->conf.rlq, vc->conf.prl,
- this->array.raid_disks);
+ if (vc->conf.sec_elmnt_count == 1) {
+ this->array.level = map_num1(ddf_level_num,
+ vc->conf.prl);
+ this->array.raid_disks =
+ __be16_to_cpu(vc->conf.prim_elmnt_count);
+ this->array.layout =
+ rlq_to_layout(vc->conf.rlq, vc->conf.prl,
+ this->array.raid_disks);
+ } else {
+ /* The only supported layout is RAID 10.
+ * Compatibility has been checked in check_secondary()
+ * above.
+ */
+ this->array.level = 10;
+ this->array.raid_disks =
+ __be16_to_cpu(vc->conf.prim_elmnt_count)
+ * vc->conf.sec_elmnt_count;
+ this->array.layout = 0x100 |
+ __be16_to_cpu(vc->conf.prim_elmnt_count);
+ }
this->array.md_minor = -1;
this->array.major_version = -1;
this->array.minor_version = -2;
ddf->currentconf = NULL;
sprintf(this->text_version, "/%s/%d",
- devnum2devname(st->container_dev),
- this->container_member);
+ st->container_devnm, this->container_member);
- for (i = 0 ; i < ddf->mppe ; i++) {
+ for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
struct mdinfo *dev;
struct dl *d;
+ const struct vd_config *bvd;
+ unsigned int iphys;
+ __u64 *lba_offset;
int stt;
- int pd;
- if (vc->conf.phys_refnum[i] == 0xFFFFFFFF)
- continue;
-
- for (pd = __be16_to_cpu(ddf->phys->used_pdes);
- pd--;)
- if (ddf->phys->entries[pd].refnum
- == vc->conf.phys_refnum[i])
- break;
- if (pd < 0)
+ if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
continue;
stt = __be16_to_cpu(ddf->phys->entries[pd].state);
!= DDF_Online)
continue;
+ i = get_pd_index_from_refnum(
+ vc, ddf->phys->entries[pd].refnum,
+ ddf->mppe, &bvd, &iphys);
+ if (i == NO_SUCH_REFNUM)
+ continue;
+
this->array.working_disks++;
for (d = ddf->dlist; d ; d=d->next)
- if (d->disk.refnum == vc->conf.phys_refnum[i])
+ if (d->disk.refnum ==
+ ddf->phys->entries[pd].refnum)
break;
if (d == NULL)
/* Haven't found that one yet, maybe there are others */
continue;
- dev = malloc(sizeof(*dev));
- memset(dev, 0, sizeof(*dev));
+ dev = xcalloc(1, sizeof(*dev));
dev->next = this->devs;
this->devs = dev;
dev->recovery_start = MaxSector;
dev->events = __be32_to_cpu(ddf->primary.seq);
- dev->data_offset = __be64_to_cpu(vc->lba_offset[i]);
- dev->component_size = __be64_to_cpu(vc->conf.blocks);
+ lba_offset = (__u64 *)&bvd->phys_refnum[ddf->mppe];
+ dev->data_offset = __be64_to_cpu(lba_offset[iphys]);
+ dev->component_size = __be64_to_cpu(bvd->blocks);
if (d->devname)
strcpy(dev->name, d->devname);
}
*/
struct ddf_super *first = st->sb;
struct ddf_super *second = tst->sb;
+ struct dl *dl1, *dl2;
+ struct vcl *vl1, *vl2;
+ unsigned int max_vds, max_pds, pd, vd;
if (!first) {
st->sb = tst->sb;
if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
return 2;
+ if (first->anchor.seq != second->anchor.seq) {
+ dprintf("%s: sequence number mismatch %u/%u\n", __func__,
+ __be32_to_cpu(first->anchor.seq),
+ __be32_to_cpu(second->anchor.seq));
+ return 3;
+ }
+ if (first->max_part != second->max_part ||
+ first->phys->used_pdes != second->phys->used_pdes ||
+ first->virt->populated_vdes != second->virt->populated_vdes) {
+ dprintf("%s: PD/VD number mismatch\n", __func__);
+ return 3;
+ }
+
+ max_pds = __be16_to_cpu(first->phys->used_pdes);
+ for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
+ for (pd = 0; pd < max_pds; pd++)
+ if (first->phys->entries[pd].refnum == dl2->disk.refnum)
+ break;
+ if (pd == max_pds) {
+ dprintf("%s: no match for disk %08x\n", __func__,
+ __be32_to_cpu(dl2->disk.refnum));
+ return 3;
+ }
+ }
+
+ max_vds = __be16_to_cpu(first->active->max_vd_entries);
+ for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
+ if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
+ continue;
+ for (vd = 0; vd < max_vds; vd++)
+ if (!memcmp(first->virt->entries[vd].guid,
+ vl2->conf.guid, DDF_GUID_LEN))
+ break;
+ if (vd == max_vds) {
+ dprintf("%s: no match for VD config\n", __func__);
+ return 3;
+ }
+ }
/* FIXME should I look at anything else? */
+
+ /*
+ At this point we are fairly sure that the meta data matches.
+ But the new disk may contain additional local data.
+ Add it to the super block.
+ */
+ for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
+ for (vl1 = first->conflist; vl1; vl1 = vl1->next)
+ if (!memcmp(vl1->conf.guid, vl2->conf.guid,
+ DDF_GUID_LEN))
+ break;
+ if (vl1) {
+ if (vl1->other_bvds != NULL &&
+ vl1->conf.sec_elmnt_seq !=
+ vl2->conf.sec_elmnt_seq) {
+ dprintf("%s: adding BVD %u\n", __func__,
+ vl2->conf.sec_elmnt_seq);
+ add_other_bvd(vl1, &vl2->conf,
+ first->conf_rec_len*512);
+ }
+ continue;
+ }
+
+ if (posix_memalign((void **)&vl1, 512,
+ (first->conf_rec_len*512 +
+ offsetof(struct vcl, conf))) != 0) {
+ pr_err("%s could not allocate vcl buf\n",
+ __func__);
+ return 3;
+ }
+
+ vl1->next = first->conflist;
+ vl1->block_sizes = NULL;
+ if (vl2->conf.sec_elmnt_count > 1) {
+ vl1->other_bvds = xcalloc(vl2->conf.sec_elmnt_count - 1,
+ sizeof(struct vd_config *));
+ } else
+ vl1->other_bvds = NULL;
+ memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
+ vl1->lba_offset = (__u64 *)
+ &vl1->conf.phys_refnum[first->mppe];
+ for (vd = 0; vd < max_vds; vd++)
+ if (!memcmp(first->virt->entries[vd].guid,
+ vl1->conf.guid, DDF_GUID_LEN))
+ break;
+ vl1->vcnum = vd;
+ dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
+ first->conflist = vl1;
+ }
+
+ for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
+ for (dl1 = first->dlist; dl1; dl1 = dl1->next)
+ if (dl1->disk.refnum == dl2->disk.refnum)
+ break;
+ if (dl1)
+ continue;
+
+ if (posix_memalign((void **)&dl1, 512,
+ sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
+ != 0) {
+ pr_err("%s could not allocate disk info buffer\n",
+ __func__);
+ return 3;
+ }
+ memcpy(dl1, dl2, sizeof(*dl1));
+ dl1->mdupdate = NULL;
+ dl1->next = first->dlist;
+ dl1->fd = -1;
+ for (pd = 0; pd < max_pds; pd++)
+ if (first->phys->entries[pd].refnum == dl1->disk.refnum)
+ break;
+ dl1->pdnum = pd;
+ if (dl2->spare) {
+ if (posix_memalign((void **)&dl1->spare, 512,
+ first->conf_rec_len*512) != 0) {
+ pr_err("%s could not allocate spare info buf\n",
+ __func__);
+ return 3;
+ }
+ memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
+ }
+ for (vd = 0 ; vd < first->max_part ; vd++) {
+ if (!dl2->vlist[vd]) {
+ dl1->vlist[vd] = NULL;
+ continue;
+ }
+ for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
+ if (!memcmp(vl1->conf.guid,
+ dl2->vlist[vd]->conf.guid,
+ DDF_GUID_LEN))
+ break;
+ dl1->vlist[vd] = vl1;
+ }
+ }
+ first->dlist = dl1;
+ dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
+ dl1->disk.refnum);
+ }
+
return 0;
}
else
ddf->virt->entries[inst].state |= DDF_state_inconsistent;
if (old != ddf->virt->entries[inst].state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
old = ddf->virt->entries[inst].init_state;
ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
else
ddf->virt->entries[inst].init_state |= DDF_init_quick;
if (old != ddf->virt->entries[inst].init_state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
a->info.resync_start);
}
#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
/*
* The state of each disk is stored in the global phys_disk structure
* in phys_disk.entries[n].state.
~__cpu_to_be16(DDF_Global_Spare);
ddf->phys->entries[pd].type |=
__cpu_to_be16(DDF_Active_in_VD);
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
} else {
int old = ddf->phys->entries[pd].state;
ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
}
if (old != ddf->phys->entries[pd].state)
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
dprintf("ddf: set_disk %d to %x\n", n, state);
if (working == a->info.array.raid_disks)
state = DDF_state_optimal;
else switch(vc->prl) {
- case DDF_RAID0:
- case DDF_CONCAT:
- case DDF_JBOD:
- state = DDF_state_failed;
- break;
- case DDF_RAID1:
- if (working == 0)
- state = DDF_state_failed;
- else if (working == 2 && state == DDF_state_degraded)
- state = DDF_state_part_optimal;
- break;
- case DDF_RAID4:
- case DDF_RAID5:
- if (working < a->info.array.raid_disks-1)
+ case DDF_RAID0:
+ case DDF_CONCAT:
+ case DDF_JBOD:
state = DDF_state_failed;
- break;
- case DDF_RAID6:
- if (working < a->info.array.raid_disks-2)
- state = DDF_state_failed;
- else if (working == a->info.array.raid_disks-1)
- state = DDF_state_part_optimal;
- break;
- }
+ break;
+ case DDF_RAID1:
+ if (working == 0)
+ state = DDF_state_failed;
+ else if (working == 2 && state == DDF_state_degraded)
+ state = DDF_state_part_optimal;
+ break;
+ case DDF_RAID4:
+ case DDF_RAID5:
+ if (working < a->info.array.raid_disks-1)
+ state = DDF_state_failed;
+ break;
+ case DDF_RAID6:
+ if (working < a->info.array.raid_disks-2)
+ state = DDF_state_failed;
+ else if (working == a->info.array.raid_disks-1)
+ state = DDF_state_part_optimal;
+ break;
+ }
if (ddf->virt->entries[inst].state !=
((ddf->virt->entries[inst].state & ~DDF_state_mask)
ddf->virt->entries[inst].state =
(ddf->virt->entries[inst].state & ~DDF_state_mask)
| state;
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
}
}
break;
}
}
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
return;
}
if (!all_ff(ddf->phys->entries[ent].guid))
return;
ddf->phys->entries[ent] = pd->entries[0];
ddf->phys->used_pdes = __cpu_to_be16(1 +
- __be16_to_cpu(ddf->phys->used_pdes));
- ddf->updates_pending = 1;
+ __be16_to_cpu(ddf->phys->used_pdes));
+ ddf_set_updates_pending(ddf);
if (ddf->add_list) {
struct active_array *a;
struct dl *al = ddf->add_list;
return;
ddf->virt->entries[ent] = vd->entries[0];
ddf->virt->populated_vdes = __cpu_to_be16(1 +
- __be16_to_cpu(ddf->virt->populated_vdes));
- ddf->updates_pending = 1;
+ __be16_to_cpu(ddf->virt->populated_vdes));
+ ddf_set_updates_pending(ddf);
break;
case DDF_VD_CONF_MAGIC:
~__cpu_to_be16(DDF_Global_Spare);
if (!(ddf->phys->entries[dl->pdnum].type &
__cpu_to_be16(DDF_Active_in_VD))) {
- ddf->phys->entries[dl->pdnum].type |=
- __cpu_to_be16(DDF_Active_in_VD);
- if (in_degraded)
- ddf->phys->entries[dl->pdnum].state |=
- __cpu_to_be16(DDF_Rebuilding);
- }
+ ddf->phys->entries[dl->pdnum].type |=
+ __cpu_to_be16(DDF_Active_in_VD);
+ if (in_degraded)
+ ddf->phys->entries[dl->pdnum].state |=
+ __cpu_to_be16(DDF_Rebuilding);
+ }
}
if (dl->spare) {
ddf->phys->entries[dl->pdnum].type &=
pd2++;
}
- ddf->updates_pending = 1;
+ ddf_set_updates_pending(ddf);
break;
case DDF_SPARE_ASSIGN_MAGIC:
default: break;
__u32 *magic = (__u32*)update->buf;
if (*magic == DDF_VD_CONF_MAGIC)
if (posix_memalign(&update->space, 512,
- offsetof(struct vcl, conf)
- + ddf->conf_rec_len * 512) != 0)
+ offsetof(struct vcl, conf)
+ + ddf->conf_rec_len * 512) != 0)
update->space = NULL;
}
for (d = a->info.devs ; d ; d = d->next) {
if ((d->curr_state & DS_FAULTY) &&
- d->state_fd >= 0)
+ d->state_fd >= 0)
/* wait for Removal to happen */
return NULL;
if (d->state_fd >= 0)
} else if (ddf->phys->entries[dl->pdnum].type &
__cpu_to_be16(DDF_Global_Spare)) {
is_global = 1;
+ } else if (!(ddf->phys->entries[dl->pdnum].state &
+ __cpu_to_be16(DDF_Failed))) {
+ /* we can possibly use some of this */
+ is_global = 1;
}
if ( ! (is_dedicated ||
(is_global && global_ok))) {
dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
- is_dedicated, is_global);
+ is_dedicated, is_global);
continue;
}
}
/* Cool, we have a device with some space at pos */
- di = malloc(sizeof(*di));
- if (!di)
- continue;
- memset(di, 0, sizeof(*di));
+ di = xcalloc(1, sizeof(*di));
di->disk.number = i;
di->disk.raid_disk = i;
di->disk.major = dl->major;
* Create a metadata_update record to update the
* phys_refnum and lba_offset values
*/
- mu = malloc(sizeof(*mu));
- if (mu && posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
+ mu = xmalloc(sizeof(*mu));
+ if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
free(mu);
mu = NULL;
}
- if (!mu) {
- while (rv) {
- struct mdinfo *n = rv->next;
-
- free(rv);
- rv = n;
- }
- return NULL;
- }
-
- mu->buf = malloc(ddf->conf_rec_len * 512);
+ mu->buf = xmalloc(ddf->conf_rec_len * 512);
mu->len = ddf->conf_rec_len * 512;
mu->space = NULL;
mu->space_list = NULL;
.add_to_super = add_to_super_ddf,
.remove_from_super = remove_from_super_ddf,
.load_container = load_container_ddf,
+ .copy_metadata = copy_metadata_ddf,
#endif
.match_home = match_home_ddf,
.uuid_from_super= uuid_from_super_ddf,