--- /dev/null
+#! /usr/bin/env python2
+
+from __future__ import print_function
+
+import sys
+import os
+import os.path
+import subprocess
+import threading
+import shutil
+import argparse
+import yaml
+import glob
+import re
+
+def pipe_reader(fileobj, output=None, verbose=False):
+ for line in fileobj:
+ if output:
+ output.write(line)
+ if verbose:
+ print(line.strip())
+
+class TestRunner:
+
+ def __init__(self, directory, verbose=False):
+ self.directory = directory
+ self.verbose = verbose
+ self.output = os.path.join(self.directory, "output")
+
+ # The name is just the directory name.
+ self.name = os.path.basename(self.directory)
+
+ # List of thread readers.
+ self.readers = []
+
+ def run(self):
+
+ sys.stdout.write("===> %s: " % os.path.basename(self.directory))
+ sys.stdout.flush()
+
+ args = []
+ if os.path.exists(os.path.join(self.directory, "run.sh")):
+ args.append(os.path.join(self.directory, "run.sh"))
+ else:
+ args += self.default_args()
+
+ env = {
+ "TZ": "UTC",
+ "TEST_DIR": self.directory,
+ }
+
+ # Cleanup the output directory.
+ if os.path.exists(self.output):
+ shutil.rmtree(self.output)
+ os.makedirs(self.output)
+
+ stdout = open(os.path.join(self.output, "stdout"), "w")
+ stderr = open(os.path.join(self.output, "stderr"), "w")
+
+ open(os.path.join(self.output, "cmdline"), "w").write(
+ " ".join(args))
+
+ p = subprocess.Popen(
+ args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ self.start_reader(p.stdout, stdout)
+ self.start_reader(p.stderr, stderr)
+
+ for r in self.readers:
+ r.join()
+
+ r = p.wait()
+
+ if r != 0:
+ print("FAIL: process returned with non-0 exit code: %d" % r)
+ return False
+
+ return self.check()
+
+ def check(self):
+ if not os.path.exists(os.path.join(self.directory, "check.sh")):
+ print("OK (no check script)")
+ return True
+ r = subprocess.call(["./check.sh"], cwd=self.directory)
+ if r != 0:
+ print("FAILED: verification failed")
+ return False
+ print("OK")
+ return True
+
+ def default_args(self):
+ args = ["./src/suricata",
+ "--set", "classification-file=./classification.config",
+ "--set", "reference-config-file=./reference.config",
+ "--init-errors-fatal",
+ "-l", self.output,
+ ]
+
+ if "ips" in self.name:
+ args.append("--simulate-ips")
+
+ if os.path.exists(os.path.join(self.directory, "suricata.yaml")):
+ args += ["-c", os.path.join(self.directory, "suricata.yaml")]
+ else:
+ args += ["-c", "./suricata.yaml"]
+
+ # Find pcaps.
+ pcaps = glob.glob(os.path.join(self.directory, "*.pcap"))
+ if not pcaps:
+ raise Exception("No pcap file found")
+ elif len(pcaps) > 1:
+ raise Exception("More than 1 pcap file found")
+ args += ["-r", pcaps[0]]
+
+ # Find rules.
+ rules = glob.glob(os.path.join(self.directory, "*.rules"))
+ if not rules:
+ args += ["-S", "/dev/null"]
+ elif len(rules) == 1:
+ args += ["-S", rules[0]]
+ else:
+ raise Exception("More than 1 rule file found")
+
+ return args
+
+ def start_reader(self, input, output):
+ t = threading.Thread(
+ target=pipe_reader, args=(input, output, self.verbose))
+ t.start()
+ self.readers.append(t)
+
+def check_for_lua():
+ output = subprocess.check_output(["./src/suricata", "--build-info"])
+ if output.find("HAVE_LUA") > -1:
+ return True
+ return False
+
+def check_skip(directory):
+ if os.path.exists(os.path.join(directory, "skip")):
+ return (True, None)
+
+ if directory.find("lua") > -1:
+ if not check_for_lua():
+ return (True, "lua not available")
+
+ return (False, None)
+
+def main():
+
+ parser = argparse.ArgumentParser(description="Verification test runner.")
+ parser.add_argument("-v", dest="verbose", action="store_true")
+ parser.add_argument("--force", dest="force", action="store_true",
+ help="Force running of skipped tests")
+ parser.add_argument("--fail", action="store_true",
+ help="Exit on test failure")
+ parser.add_argument("patterns", nargs="*", default=[])
+ args = parser.parse_args()
+
+ topdir = os.path.dirname(sys.argv[0])
+
+ skipped = 0
+ passed = 0
+ failed = 0
+
+ for dirpath, dirnames, filenames in os.walk(topdir):
+
+ # The top directory is not a test...
+ if dirpath == topdir:
+ dirnames.remove(".git")
+ dirnames.remove("etc")
+ continue
+
+ # We only want to go one level deep.
+ dirnames[0:] = []
+
+ name = os.path.basename(dirpath)
+
+ do_test = False
+ if not args.patterns:
+ if args.force:
+ do_test = True
+ else:
+ skip, reason = check_skip(dirpath)
+ if skip:
+ skipped += 1
+ if reason:
+ print("===> %s: SKIPPED: %s" % (name, reason))
+ else:
+ print("===> %s: SKIPPED" % (name))
+ else:
+ do_test = True
+ else:
+ # If a test matches a pattern, we do not skip it.
+ for pattern in args.patterns:
+ if name.find(pattern) > -1:
+ do_test = True
+ break
+
+ if do_test:
+ test_runner = TestRunner(dirpath, args.verbose)
+ try:
+ success = test_runner.run()
+ except Exception as err:
+ print("FAIL: exception: %s" % (str(err)))
+ success = False
+ if success:
+ passed += 1
+ else:
+ if args.fail:
+ return 1
+ failed += 1
+
+ print("")
+ print("PASSED: %d" % (passed))
+ print("FAILED: %d" % (failed))
+ print("SKIPPED: %d" % (skipped))
+
+ if failed > 0:
+ return 1
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
#! /bin/sh
+#
+# Just a wrapper for run.sh now.
set -e
prefix=$(dirname $0)
-
-# Setting force to yes with "-f" or "--force" will force tests that
-# would otherwise be skipped.
-force=no
-
-# If not verbose, output will be redirected to files.
-stdout=
-stderr=
-
-# Continue if a test fails.
-continue=no
-
-# Set if any tests fails, so when continue is set we can still exit
-# with a failure status.
-failed=no
-
-for arg in $@; do
- case "${arg}" in
- -c|--continue)
- continue=yes
- shift
- ;;
- -v|--verbose)
- stdout=/dev/stdout
- stderr=/dev/stderr
- shift
- ;;
- -f|--force)
- force=yes
- shift
- ;;
- -h)
- cat <<EOF
-
-usage: $0 [options] [test-pattern]
-
-options:
- -c continue on failed test
- -v verbose output (stderr and stdout to terminal, not file)
- -f run tests that would other be skipped
-
-To only run specific tests and pattern can be provided. For example, a
-pattern of "dnp3" will run all tests with "dnp3" in the name.
-
-EOF
- exit 0
- ;;
- -*)
- echo "error: unknown argument: ${arg}"
- exit 1
- ;;
- esac
-done
-
-# The remaining args are the patterns to test.
-patterns="$@"
-
-# Find all non-private tests.
-tests=$(cd ${prefix} && find * -maxdepth 0 -type d | grep -v '^private$')
-
-# And the private tests.
-if [ -e "${prefix}/private" ]; then
- private=$(cd ${prefix} && find private/* -maxdepth 0 -type d)
- tests="${tests} ${private}"
-fi
-
-case $(uname) in
- Darwin)
- true
- ;;
- *)
- export ASAN_OPTIONS="detect_leaks=${DETECT_LEAKS:=1},disable_core=1"
- ;;
-esac
-export LSAN_OPTIONS="suppressions=qa/lsan.suppress"
-
-run_test() {
- t="$1"
- tdir="${prefix}/${t}"
-
- logdir="${tdir}/output"
-
- pcap=$(find "${tdir}" -name \*.pcap)
- if [ "${pcap}" = "" ]; then
- echo "error: no pcaps exists"
- exit 1
- fi
- if [ $(echo "${pcap}" | wc -l) -gt 1 ]; then
- echo "error: more than one pcap exists"
- exit 1
- fi
-
- args="-vvv -l ${logdir} -r ${pcap}"
-
- # If "ips" exists in the test name, then simulate ips.
- if echo "${tname}" | grep -q "ips"; then
- args="${args} --simulate-ips"
- fi
-
- if [ -e "${tdir}/suricata.yaml" ]; then
- args="${args} -c ${tdir}/suricata.yaml"
- else
- args="${args} -c ./suricata.yaml"
- fi
-
- # If test specific rules are not provided then use /dev/null to
- # avoid loading any.
- rules=$(for n in ${tdir}/*.rules; do echo $n; break; done)
- if [ -e "${rules}" ]; then
- args="${args} -S ${rules}"
- else
- args="${args} -S /dev/null"
- fi
-
- # If stderr and stdout are not set, redirect the outputs to a file.
- if [ "${stderr}" = "" ]; then
- _stderr="${logdir}/stderr"
- else
- _stderr="${stderr}"
- fi
- if [ "${stdout}" = "" ]; then
- _stdout="${logdir}/stdout"
- else
- _stdout="${stdout}"
- fi
-
- if [ -e "${tdir}/vars.sh" ]; then
- . "${tdir}/vars.sh"
-
- if [ "${SIMULATE_IPS}" = "yes" ]; then
- args="${args} --simulate-ips"
- fi
-
- if [ "${RUNMODE}" != "" ]; then
- args="${args} --runmode=${RUNMODE}"
- fi
- fi
-
- # Cleanup existing output directory.
- rm -rf "${logdir}"
- mkdir -p "${logdir}"
-
- args="${args} --set classification-file=./classification.config"
- args="${args} --set reference-config-file=./reference.config"
- args="${args} --init-errors-fatal"
-
- cmd="TZ=UTC ./src/suricata ${args}"
- echo "${cmd}" > ${logdir}/cmdline
- eval "${cmd}" > ${_stdout} 2> ${_stderr}
- return "$?"
-}
-
-# Check the name of the test against the patterns past on the command
-# line to determine if the test should run. No patterns means run all
-# tests.
-check_patterns() {
- tname="$1"
-
- if [ "${patterns}" = "" ]; then
- return 0
- fi
-
- for pattern in ${patterns}; do
- if echo "${tname}" | grep -q "${pattern}"; then
- return 0
- fi
- done
-
- return 1
-}
-
-# Check if a test should be skipped.
-check_skip() {
- t="$1"
- tdir="${prefix}/${t}"
-
- if [ -e "${tdir}/skip" ]; then
- return 0
- fi
-
- if [ -e "${tdir}/skip.sh" ]; then
- if /bin/sh "${tdir}/skip.sh"; then
- return 0
- fi
- fi
-
- return 1
-}
-
-# Check the output of Suricata. If a test doesn't provide its own
-# verification script, then the generic file compare will be
-# performed.
-check() {
- t="$1"
- (
- cd ${prefix}/${t}
-
- if [ -e "check.sh" ]; then
- if ! /bin/sh ./check.sh; then
- exit 1
- fi
- else
- echo "error: test has no check.sh script"
- exit 1
- fi
- )
- return $?
-}
-
-# Run Suricata and check the output.
-run_and_check() {
- t="${1}"
- tdir="${prefix}/${t}"
-
- # If test has its own run script, just use that.
- if [ -e "${tdir}/run.sh" ]; then
- if ! TEST_DIR="${tdir}" "${tdir}/run.sh"; then
- echo "===> ${t}: FAIL"
- return 1
- fi
- echo "===> ${t}: OK"
- return 0
- fi
-
- if ! (run_test "${t}"); then
- echo "===> ${t}: FAIL with non-zero exit (see ${tdir}/output/stderr)"
- return 1
- fi
- if ! (check "${t}"); then
- echo "===> ${t}: FAIL with verification error"
- return 1
- fi
- echo "===> ${t}: OK"
-}
-
-for t in ${tests}; do
-
- # These are not tests, but helper directories.
- if [ "${t}" = "etc" ]; then
- continue
- fi
-
- if check_patterns ${t}; then
- if test "${force}" = "no" && check_skip "${t}"; then
- echo "===> ${t}: SKIPPED"
- continue
- fi
- echo "===> Running ${t}."
- if ! (run_and_check "${t}"); then
- failed=yes
- if [ "${continue}" != "yes" ]; then
- exit 1
- fi
- fi
- fi
-done
-
-if [ "${failed}" = "yes" ]; then
- exit 1
-fi
-
-exit 0
+exec $prefix/run.py $@