#sudo ./test --tests=00createnames
-sudo ./test --skip-broken --no-error --disable-integrity --disable-multipath --disable-linear --keep-going
+sudo ./test --skip-broken --no-error --disable-integrity --disable-multipath --disable-linear --keep-going --skip-bigcase
ret=$?
sudo ./test cleanup
exitonerror=1
ctrl_c_error=0
skipbroken=0
+skipbigcase=0
+skipfile="skiptests"
+skipcheckfile=$testdir/$skipfile
+checkscript=""
loop=1
prefix='[0-9][0-9]'
--loop=N Run tests N times (0 to run forever)
--skip-broken Skip tests that are known to be broken
--skip-always-broken Skip tests that are known to always fail
+ --skip-bigcase Skip tests that need time than 200 seconds
--dev=loop|lvm|ram|disk Use loop devices (default), LVM, RAM or disk
--disks= Provide a bunch of physical devices for test
--volgroup=name LVM volume group for LVM test
--skip-always-broken )
skipbroken=always
;;
+ --skip-bigcase )
+ skipbigcase=all
+ ;;
--disable-multipath )
unset MULTIPATH
;;
else
for script in $testdir/$prefix $testdir/$prefix*[^~]
do
- case $script in *.broken) ;;
- *)
- do_test $script
+ checkscript="${script##*/}"
+ case $script in
+ *.broken)
+ ;;
+ *)
+ if grep -q "$checkscript" "$skipcheckfile"; then
+ if [ "$skipbigcase" == "all" ]; then
+ continue
+ fi
+ fi
+ do_test $script
esac
done
fi
--- /dev/null
+Sometimes
+
++++ pgrep -f 'mdadm --grow --continue'
+++ [[ '' != '' ]]
+++ break
+++ echo 100
+++ echo 500
+++ sleep 2
+++ check raid5
+++ case $1 in
+++ grep -sq 'active raid5 ' /proc/mdstat
+++ die 'active raid5 not found'
+++ echo -e '\n\tERROR: active raid5 not found \n'
+
+ ERROR: active raid5 not found
--- /dev/null
+always fails
+
+Fails with errors:
+ ++ /usr/sbin/mdadm -A /dev/md0 --update=revert-reshape /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --backup-file=/tmp/md-backup
+++ rv=1
+++ case $* in
+++ cat /var/tmp/stderr
+mdadm: failed to RUN_ARRAY /dev/md0: Invalid argument
+++ /dev/null
-Fails due to segmentation fault at assemble.
-
-Too much effort to diagnose this now, marking as broken to make CI clear.
- ++ /usr/sbin/mdadm -A /dev/md/ddf0 /dev/loop8 /dev/loop9 /dev/loop10 /dev/loop11 /dev/loop12
- ./test: line 76: 101955 Segmentation fault (core dumped) $mdadm "$@" 2> $targetdir/stderr
+++ /dev/null
-fails infrequently
-
-Fails roughly 1 in 3 with error:
-
- ERROR: /dev/md/vol1 should be optimal in meta data
--- /dev/null
+always fail
+
+++ /usr/sbin/mdadm -I /dev/loop4
+++ rv=0
+++ case $* in
+++ cat /var/tmp/stderr
+mdadm: /dev/loop4 attached to /dev/md/0_0, which has been started.
+++ return 0
+++ check raid5
+++ case $1 in
+++ grep -sq 'active raid5 ' /proc/mdstat
+++ die 'active raid5 not found'
+++ echo -e '\n\tERROR: active raid5 not found \n'
+
+ ERROR: active raid5 not found
+
+++ save_log fail
}
get_rootdev() {
- local part=$(grep ' / ' /proc/mounts | awk '{print $1}')
- local bd=/dev/$(lsblk -no PKNAME $part)
+ local bd=$(grep ' / ' /proc/mounts | awk '{print $1}')
[ -b $bd ] || exit 1
echo $bd
}
--- /dev/null
+casename:seconds
+01raid6integ:1732
+01replace:396
+07layouts:836
+11spare-migration:1140
+12imsm-r0_2d-grow-r0_5d:218
+13imsm-r0_r0_2d-grow-r0_r0_4d:218
+13imsm-r0_r0_2d-grow-r0_r0_5d:246
+19raid6check:268
dprintf("Start %s service\n", service_name);
/* Simply return that service cannot be started */
if (check_env("MDADM_NO_SYSTEMCTL"))
- return MDADM_STATUS_SUCCESS;
+ return MDADM_STATUS_ERROR;
/* Fork in attempt to start services */
switch (fork()) {