1 # Simple fail / re-add test
2 . tests/env-ddf-template
4 tmp=$(mktemp /tmp/mdtest-XXXXXX)
7 mdadm --zero-superblock $dev8 $dev9
8 mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
10 mdadm -CR $member0 -l raid1 -n 2 $container
11 #$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
15 set -- $(get_raiddisks $member0)
17 mdadm $member0 --fail $fail0
20 set -- $(get_raiddisks $member0)
21 case $1 in MISSING) shift;; esac
24 # Check that the meta data now show one disk as failed
28 if ! grep -q 'state\[0\] : Degraded, Consistent' $tmp; then
29 echo ERROR: member 0 should be degraded in meta data on $x
34 $fail0:*active/Offline,\ Failed);;
35 $good0:*active/Online);;
36 *) echo ERROR: wrong phys disk state for $x
42 mdadm $container --remove $fail0
44 # We re-add the disk now
45 mdadm $container --add $fail0
48 mdadm --wait $member0 || true
50 set -- $(get_raiddisks $member0)
52 $dev8:$dev9|$dev9:$dev8);;
53 *) echo ERROR: bad raid disks "$@"; ret=1;;
59 if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
60 echo ERROR: member 0 should be optimal in meta data on $x
66 if [ $ret -ne 0 ]; then