raid0->raid10 transition needs at least 2 spare devices.
After level changing to raid10 recovery is triggered on
failed (missing) disks. At the end of recovery process
we have fully operational (not degraded) raid10 array.
Initialy there was possibility to migrate raid0->raid10
without recovery triggering (it results degraded raid10).
Now it is not possible.
This patch adapt tests to new mdadm's behavior.
Signed-off-by: Krzysztof Wojcik <krzysztof.wojcik@intel.com>
Signed-off-by: NeilBrown <neilb@suse.de>
num_disks=2
device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3"
# Before: RAID 0 volume, 2 disks, 256k chunk size
vol0_level=0
vol0_new_num_comps=$vol0_num_comps
vol0_new_chunk=128
-. tests/imsm-grow-template 0 1 1
+. tests/imsm-grow-template 0 1
t_size=$6
t_offset=$7
t_chunk=$8
- t_takeover10=$9
err=0
echo "**Error**: Chunk size mismatch - expected $t_chunk, actual $_chunk" >&2
err=$((err + 1))
fi
- if [ ! -z $t_takeover10 ] ; then
- t_num_disks=$(( t_num_disks * 2 ))
- fi
for i in `seq 0 $((t_num_disks - 1))`; do
- if [ ! -z $t_takeover10 ] && [ ! -z $(( $i % 2 )) ] ; then
- continue
- fi
_offset=`cat ${sysfs}/md/rd${i}/offset`
if [ $t_offset -ne $((_offset / 2)) ]; then
echo "**Error**: Offset mismatch - expected $t_offset, actual $_offset" >&2
# 0 - On-line Capacity Expansion test, otherwise LEVEL migration or CHUNK size migration test
migration_test=$2
-# 1 - raid0 -> raid10 takeover verification
-takeover10_test=$3
-
function grow_member() {
local member=$1
local disks=$2
fi
check wait
sleep 5
- imsm_check member $member $disks $level $size $array_size $offset $chunk $takeover10_test
- testdev $member $comps $size $chunk $takeover10_test
+ imsm_check member $member $disks $level $size $array_size $offset $chunk
+ testdev $member $comps $size $chunk
}
# Create container