]> git.ipfire.org Git - thirdparty/mdadm.git/commitdiff
DDF: support more RAID10 levels.
authorNeilBrown <neilb@suse.de>
Mon, 28 Apr 2014 05:31:50 +0000 (15:31 +1000)
committerNeilBrown <neilb@suse.de>
Wed, 21 May 2014 01:54:48 +0000 (11:54 +1000)
The DDF "RAID1E" level is similar to md "raid10".

So use raid10 to support RAID1E, and create RAID1E for raid10
configs not already supported.

Signed-off-by: NeilBrown <neilb@suse.de>
super-ddf.c

index fda416012f08430b4bff2fabc880f67733e04a47..2f72556a90c77f49e479b8ac8edcc535f13f20a0 100644 (file)
@@ -673,15 +673,23 @@ static int layout_md2ddf(const mdu_array_info_t *array,
                        rlq = DDF_RAID1_SIMPLE;
                        prim_elmnt_count =  cpu_to_be16(2);
                        sec_elmnt_count = array->raid_disks / 2;
+                       srl = DDF_2SPANNED;
+                       prl = DDF_RAID1;
                } else if (array->raid_disks % 3 == 0
                           && array->layout == 0x103) {
                        rlq = DDF_RAID1_MULTI;
                        prim_elmnt_count =  cpu_to_be16(3);
                        sec_elmnt_count = array->raid_disks / 3;
+                       srl = DDF_2SPANNED;
+                       prl = DDF_RAID1;
+               } else if (array->layout == 0x201) {
+                       prl = DDF_RAID1E;
+                       rlq = DDF_RAID1E_OFFSET;
+               } else if (array->layout == 0x102) {
+                       prl = DDF_RAID1E;
+                       rlq = DDF_RAID1E_ADJACENT;
                } else
                        return err_bad_md_layout(array);
-               srl = DDF_2SPANNED;
-               prl = DDF_RAID1;
                break;
        default:
                return err_bad_md_layout(array);
@@ -742,6 +750,15 @@ static int layout_ddf2md(const struct vd_config *conf,
                        return err_bad_ddf_layout(conf);
                level = 1;
                break;
+       case DDF_RAID1E:
+               if (conf->rlq == DDF_RAID1E_ADJACENT)
+                       layout = 0x102;
+               else if (conf->rlq == DDF_RAID1E_OFFSET)
+                       layout = 0x201;
+               else
+                       return err_bad_ddf_layout(conf);
+               level = 10;
+               break;
        case DDF_RAID4:
                if (conf->rlq != DDF_RAID4_N)
                        return err_bad_ddf_layout(conf);
@@ -4242,6 +4259,11 @@ static int get_bvd_state(const struct ddf_super *ddf,
        unsigned int i, n_bvd, working = 0;
        unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
        int pd, st, state;
+       char *avail = xcalloc(1, n_prim);
+       mdu_array_info_t array;
+
+       layout_ddf2md(vc, &array);
+
        for (i = 0; i < n_prim; i++) {
                if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
                        continue;
@@ -4250,8 +4272,10 @@ static int get_bvd_state(const struct ddf_super *ddf,
                        continue;
                st = be16_to_cpu(ddf->phys->entries[pd].state);
                if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
-                   == DDF_Online)
+                   == DDF_Online) {
                        working++;
+                       avail[i] = 1;
+               }
        }
 
        state = DDF_state_degraded;
@@ -4270,6 +4294,10 @@ static int get_bvd_state(const struct ddf_super *ddf,
                        else if (working >= 2)
                                state = DDF_state_part_optimal;
                        break;
+               case DDF_RAID1E:
+                       if (!enough(10, n_prim, array.layout, 1, avail))
+                               state = DDF_state_failed;
+                       break;
                case DDF_RAID4:
                case DDF_RAID5:
                        if (working < n_prim - 1)