5 NODE1
="$(grep '^NODE1' $CLUSTER_CONF | cut -d'=' -f2)"
6 NODE2
="$(grep '^NODE2' $CLUSTER_CONF | cut -d'=' -f2)"
7 [ -z "$NODE1" -o -z "$NODE2" ] && {
8 echo "Please provide node-ip in $CLUSTER_CONF."
11 for ip
in $NODE1 $NODE2
13 ssh -o NumberOfPasswordPrompts
=0 $ip -l root
"pwd" > /dev
/null
15 echo "Please setup ssh-access with no-authorized mode."
23 ISCSI_ID
="$(grep '^ISCSI_TARGET_ID' $CLUSTER_CONF | cut -d'=' -f2)"
24 devlist
="$(grep '^devlist' $CLUSTER_CONF | cut -d'=' -f2)"
25 if [ ! -z "$ISCSI_ID" -a ! -z "$devlist" ]
27 echo "Config ISCSI_TARGET_ID or devlist in $CLUSTER_CONF."
29 elif [ ! -z "$ISCSI_ID" -a -z "$devlist" ]
31 for ip
in $NODE1 $NODE2
33 ssh $ip "ls /dev/disk/by-path/*$ISCSI_ID*" > /dev
/null
35 echo "$ip: No disks found in '$ISCSI_ID' connection."
39 devlist
=($
(ls /dev
/disk
/by-path
/*$ISCSI_ID*))
41 # sbd disk cannot use in testing
42 for i
in ${devlist[@]}
44 sbd
-d $i dump
&> /dev
/null
45 [ $?
-eq '0' ] && devlist
=(${devlist[@]#$i})
47 for i
in $
(seq 0 ${#devlist[@]})
49 eval "dev$i=${devlist[$i]}"
51 [ "${#devlist[@]}" -lt 6 ] && {
52 echo "Cluster-md testing requires 6 disks at least."
59 if ! crm configure show |
grep -q dlm
61 crm configure primitive dlm ocf
:pacemaker
:controld \
62 op monitor interval
=60 timeout
=60 \
63 meta target-role
=Started
&> /dev
/null
64 crm configure group base-group dlm
65 crm configure clone base-clone base-group \
69 for ip
in $NODE1 $NODE2
71 ssh $ip "pgrep dlm_controld > /dev/null" ||
{
72 echo "$ip: dlm_controld daemon doesn't exist."
76 crm_mon
-r -n1 |
grep -iq "fail\|not" && {
77 echo "Please clear cluster-resource errors."
85 [ "X$user" = "Xroot" ] ||
{
86 echo "testing can only be done as 'root'."
90 commands
=(mdadm iscsiadm
bc modinfo dlm_controld
91 udevadm crm crm_mon lsblk pgrep sbd
)
92 mdadm_src_ver
="$($mdadm -V 2>&1)"
93 for ip
in $NODE1 $NODE2
95 for cmd
in ${commands[@]}
97 ssh $ip "which $cmd &> /dev/null" ||
{
98 echo "$ip: $cmd, command not found!"
102 mdadm_sbin_ver
="$(ssh $ip "mdadm
-V 2>&1")"
103 if [ "$mdadm_src_ver" != "$mdadm_sbin_ver" ]
105 echo "$ip: please run 'make install' before testing."
108 mods
=(raid1 raid10 md_mod dlm md-cluster
)
109 for mod
in ${mods[@]}
111 ssh $ip "modinfo $mod > /dev/null" ||
{
112 echo "$ip: $mod, module doesn't exist."
116 ssh $ip "lsblk -a | grep -iq raid"
118 echo "$ip: Please run testing without running RAIDs environment."
121 ssh $ip "modprobe md_mod"
125 [ -d $logdir ] || mkdir
-p $logdir
128 # $1/node, $2/optional
133 NODES
=($NODE1 $NODE2)
134 elif [ "$1" == "$NODE1" -o "$1" == "$NODE2" ]
138 die
"$1: unknown parameter."
142 for ip
in ${NODES[@]}
147 for ip
in ${NODES[@]}
154 # $1/optional, it shows why to save log
158 logfile
="$status""$_basename".log
160 cat $targetdir/stderr
>> $targetdir/log
161 cp $targetdir/log
$logdir/$_basename.log
163 for ip
in $NODE1 $NODE2
165 echo "##$ip: saving dmesg." >> $logdir/$logfile
166 ssh $ip "dmesg -c" >> $logdir/$logfile
167 echo "##$ip: saving proc mdstat." >> $logdir/$logfile
168 ssh $ip "cat /proc/mdstat" >> $logdir/$logfile
169 array
=($
(ssh $ip "mdadm -Ds | cut -d' ' -f2"))
171 if [ ! -z "$array" -a ${#array[@]} -ge 1 ]
173 echo "##$ip: mdadm -D ${array[@]}" >> $logdir/$logfile
174 ssh $ip "mdadm -D ${array[@]}" >> $logdir/$logfile
175 md_disks
=($
(ssh $ip "mdadm -DY ${array[@]} | grep "/dev
/" | cut -d'=' -f2"))
176 cat /proc
/mdstat |
grep -q "bitmap"
179 echo "##$ip: mdadm -X ${md_disks[@]}" >> $logdir/$logfile
180 ssh $ip "mdadm -X ${md_disks[@]}" >> $logdir/$logfile
181 echo "##$ip: mdadm -E ${md_disks[@]}" >> $logdir/$logfile
182 ssh $ip "mdadm -E ${md_disks[@]}" >> $logdir/$logfile
185 echo "##$ip: no array assembled!" >> $logdir/$logfile
188 [ "$1" == "fail" ] &&
189 echo "See $logdir/$_basename.log and $logdir/$logfile for details"
201 for ip
in $NODE1 $NODE2
203 ssh $ip "mdadm -Ssq; dmesg -c > /dev/null"
205 mdadm
--zero ${devlist[@]} &> /dev
/null
214 # check: $1/cluster_node $2/feature $3/optional
220 NODES
=($NODE1 $NODE2)
221 elif [ "$1" == "$NODE1" -o "$1" == "$NODE2" ]
225 die
"$1: unknown parameter."
229 for ip
in ${NODES[@]}
231 spares
=$
(ssh $ip "tr '] ' '\012\012' < /proc/mdstat | grep -c '(S)'")
232 [ "$spares" -ne "$3" ] &&
233 die
"$ip: expected $3 spares, but found $spares"
237 for ip
in ${NODES[@]}
239 ssh $ip "grep -sq "$2" /proc/mdstat" ||
240 die
"$ip: check '$2' failed."
243 PENDING | recovery | resync | reshape
)
245 for ip
in ${NODES[@]}
247 while ! ssh $ip "grep -sq '$2' /proc/mdstat"
249 if [ "$cnt" -gt '0' ]
254 die
"$ip: no '$2' happening!"
261 for ip
in ${NODES[@]}
263 p
=$
(ssh $ip "cat /proc/sys/dev/raid/speed_limit_max")
264 ssh $ip "echo 200000 > /proc/sys/dev/raid/speed_limit_max"
265 while ssh $ip "grep -Esq '(resync|recovery|reshape|check|repair)' /proc/mdstat"
267 if [ "$cnt" -gt '0' ]
272 die
"$ip: Check '$2' timeout over 300 seconds."
275 ssh $ip "echo $p > /proc/sys/dev/raid/speed_limit_max"
279 for ip
in ${NODES[@]}
281 ssh $ip "grep -sq '$2' /proc/mdstat" ||
282 die
"$ip: no '$2' found in /proc/mdstat."
286 for ip
in ${NODES[@]}
288 ssh $ip "grep -sq 'bitmap' /proc/mdstat" &&
289 die
"$ip: 'bitmap' found in /proc/mdstat."
293 for ip
in ${NODES[@]}
295 chunk_size
=`awk -F',' '/chunk/{print $2}' /proc/mdstat | awk -F'[a-z]' '{print $1}'`
296 [ "$chunk_size" -ne "$3" ] &&
297 die
"$ip: chunksize should be $3, but it's $chunk_size"
301 for ip
in ${NODES[@]}
303 ssh $ip "grep -Esq 'blocks.*\[$3\]\$' /proc/mdstat" ||
304 die
"$ip: no '$3' found in /proc/mdstat."
308 for ip
in ${NODES[@]}
310 ssh $ip "grep -Eq '(resync|recovery)' /proc/mdstat" &&
311 die
"$ip: resync or recovery is happening!"
315 for ip
in ${NODES[@]}
317 ssh $ip "grep -sq "read-only
" /proc/mdstat" ||
318 die
"$ip: check '$2' failed!"
322 for ip
in ${NODES[@]}
324 ssh $ip "dmesg | grep -iq 'error\|call trace\|segfault'" &&
325 die
"$ip: check '$2' prints errors!"
329 die
"unknown parameter $2"