# these are for debugging only
our @EXPORT_OK = qw(
+ readtestkeywords
singletest_preprocess
);
}
}
+#######################################################################
+# Load test keywords into %keywords hash
+#
+sub readtestkeywords {
+ my @info_keywords = getpart("info", "keywords");
+
+ # Clear the list of keywords from the last test
+ %keywords = ();
+ for my $k (@info_keywords) {
+ chomp $k;
+ $keywords{$k} = 1;
+ }
+}
+
+
#######################################################################
# Memory allocation test and failure torture testing.
#
}
+#######################################################################
# restore environment variables that were modified in test
sub restore_test_env {
my $deleteoldenv = $_[0]; # 1 to delete the saved contents after restore
# Get ready to run a single test case
sub runner_test_preprocess {
my ($testnum)=@_;
-
my %testtimings;
+ # timestamp test preparation start
+ # TODO: this metric now shows only a portion of the prep time; better would
+ # be to time singletest_preprocess below instead
+ $testtimings{"timeprepini"} = Time::HiRes::time();
+
+ ###################################################################
+ # Load test metadata
+ # ignore any error here--if there were one, it would have been
+ # caught during the selection phase and this test would not be
+ # running now
+ loadtest("${TESTDIR}/test${testnum}");
+ readtestkeywords();
+
###################################################################
# Start the servers needed to run this test case
my $why = singletest_startservers($testnum, \%testtimings);
###############################################################
# Generate preprocessed test file
+ # This must be done after the servers are started so server
+ # variables are available for substitution.
singletest_preprocess($testnum);
-
###############################################################
# Set up the test environment to run this test case
singletest_setenv();
-
###############################################################
# Check that the test environment is fine to run this test case
if (!$listonly) {
my %enabled_keywords; # key words of tests to run
my %disabled; # disabled test cases
my %ignored; # ignored results of test cases
+my %ignoretestcodes; # if test results are to be ignored
my $timestats; # time stamping and stats generation
my $fullstats; # show time stats for every single test
return $result;
}
+#######################################################################
+# Parse and store the protocols in curl's Protocols: line
+sub parseprotocols {
+ my ($line)=@_;
+
+ @protocols = split(' ', lc($line));
+
+ # Generate a "proto-ipv6" version of each protocol to match the
+ # IPv6 <server> name and a "proto-unix" to match the variant which
+ # uses Unix domain sockets. This works even if support isn't
+ # compiled in because the <features> test will fail.
+ push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
+
+ # 'http-proxy' is used in test cases to do CONNECT through
+ push @protocols, 'http-proxy';
+
+ # 'none' is used in test cases to mean no server
+ push @protocols, 'none';
+}
+
+
#######################################################################
# Check & display information about curl and the host the test suite runs on.
# Information to do with servers is displayed in displayserverfeatures, after
}
elsif($_ =~ /^Protocols: (.*)/i) {
# these are the protocols compiled in to this libcurl
- @protocols = split(' ', lc($1));
-
- # Generate a "proto-ipv6" version of each protocol to match the
- # IPv6 <server> name and a "proto-unix" to match the variant which
- # uses Unix domain sockets. This works even if support isn't
- # compiled in because the <features> test will fail.
- push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
-
- # 'http-proxy' is used in test cases to do CONNECT through
- push @protocols, 'http-proxy';
-
- # 'none' is used in test cases to mean no server
- push @protocols, 'none';
+ parseprotocols($1);
}
elsif($_ =~ /^Features: (.*)/i) {
$feat = $1;
sub updatetesttimings {
my ($testnum, %testtimings)=@_;
+ if(defined $testtimings{"timeprepini"}) {
+ $timeprepini{$testnum} = $testtimings{"timeprepini"};
+ }
if(defined $testtimings{"timesrvrini"}) {
$timesrvrini{$testnum} = $testtimings{"timesrvrini"};
}
my $errorreturncode = 1; # 1 means normal error, 2 means ignored error
my @what; # what features are needed
- # first, remove all lingering log files
- if(!cleardir($LOGDIR) && $clearlocks) {
- clearlocks($LOGDIR);
- cleardir($LOGDIR);
- }
-
- # timestamp test preparation start
- $timeprepini{$testnum} = Time::HiRes::time();
-
if($disttests !~ /test$testnum(\W|\z)/ ) {
logmsg "Warning: test$testnum not present in tests/data/Makefile.inc\n";
}
$errorreturncode = 2;
}
- # load the test case file definition
if(loadtest("${TESTDIR}/test${testnum}")) {
if($verbose) {
# this is not a test
if(!$why) {
@info_keywords = getpart("info", "keywords");
- # Clear the list of keywords from the last test
- %keywords = ();
-
if(!$info_keywords[0]) {
$why = "missing the <keywords> section!";
}
logmsg "Warning: test$testnum result is ignored due to $k\n";
$errorreturncode = 2;
}
-
- $keywords{$k} = 1;
}
if(!$why && !$match && %enabled_keywords) {
sub singletest {
my ($testnum, $count, $total)=@_;
- #######################################################################
- # Verify that the test should be run
- my ($why, $errorreturncode) = singletest_shouldrun($testnum);
-
- if(!$listonly) {
+ # first, remove all lingering log files
+ if(!cleardir($LOGDIR) && $clearlocks) {
+ clearlocks($LOGDIR);
+ cleardir($LOGDIR);
+ }
- ###################################################################
- # Restore environment variables that were modified in a previous run.
- # Test definition may instruct to (un)set environment vars.
- # This is done this early so that leftover variables don't affect
- # starting servers or CI registration.
- restore_test_env(1);
+ ###################################################################
+ # Restore environment variables that were modified in a previous run.
+ # Test definition may instruct to (un)set environment vars.
+ # This is done this early so that leftover variables don't affect
+ # starting servers or CI registration.
+ restore_test_env(1);
- ###################################################################
- # Register the test case with the CI environment
- citest_starttest($testnum);
+ ###################################################################
+ # Load test file so CI registration can get the right data before the
+ # runner is called
+ loadtest("${TESTDIR}/test${testnum}");
- if(!$why) {
- my $testtimings;
- ($why, $testtimings) = runner_test_preprocess($testnum);
- updatetesttimings($testnum, %$testtimings);
- } else {
+ ###################################################################
+ # Register the test case with the CI environment
+ citest_starttest($testnum);
- # set zero servers verification time when they aren't started
- $timesrvrini{$testnum} = $timesrvrend{$testnum} = Time::HiRes::time();
- }
- }
+ my ($why, $testtimings) = runner_test_preprocess($testnum);
+ updatetesttimings($testnum, %$testtimings);
#######################################################################
# Print the test name and count tests
- my $error;
- $error = singletest_count($testnum, $why);
- if($error || $listonly) {
+ my $error = singletest_count($testnum, $why);
+ if($error) {
return $error;
}
my $CURLOUT;
my $tool;
my $usedvalgrind;
- my $testtimings;
($error, $testtimings, $cmdres, $CURLOUT, $tool, $usedvalgrind) = runner_test_run($testnum);
updatetesttimings($testnum, %$testtimings);
if($error == -1) {
# no further verification will occur
$timevrfyend{$testnum} = Time::HiRes::time();
# return a test failure, either to be reported or to be ignored
- return $errorreturncode;
+ return ignoreresultcode($testnum);
}
elsif($error == -2) {
# fill in the missing timings on error
# Verify that the test succeeded
$error = singletest_check($testnum, $cmdres, $CURLOUT, $tool, $usedvalgrind);
if($error == -1) {
- # return a test failure, either to be reported or to be ignored
- return $errorreturncode;
+ # return a test failure, either to be reported or to be ignored
+ return ignoreresultcode($testnum);
}
elsif($error == -2) {
# torture test; there is no verification, so the run result holds the
#######################################################################
# Report a successful test
- singletest_success($testnum, $count, $total, $errorreturncode);
+ singletest_success($testnum, $count, $total, ignoreresultcode($testnum));
return 0;
logmsg "\n";
}
+#######################################################################
+# returns code indicating why a test was skipped
+# 0=unknown test, 1=use test result, 2=ignore test result
+#
+sub ignoreresultcode {
+ my ($testnum)=@_;
+ if(defined $ignoretestcodes{$testnum}) {
+ return $ignoretestcodes{$testnum};
+ }
+ return 0;
+}
+
+
#######################################################################
# Check options to this test program
#
$start = time();
+# scan all tests to find ones we should try to run
+my @runtests;
foreach my $testnum (@at) {
-
$lasttest = $testnum if($testnum > $lasttest);
+ my ($why, $errorreturncode) = singletest_shouldrun($testnum);
+ if($why || $listonly) {
+ # Display test name now--test will be completely skipped later
+ my $error = singletest_count($testnum, $why);
+ next;
+ }
+ $ignoretestcodes{$testnum} = $errorreturncode;
+ push(@runtests, $testnum);
+}
+
+if($listonly) {
+ exit(0);
+}
+
+# run through each candidate test and execute it
+foreach my $testnum (@runtests) {
$count++;
# execute one test case
- my $error = singletest($testnum, $count, scalar(@at));
+ my $error = singletest($testnum, $count, scalar(@runtests));
- if(!$listonly) {
- # Submit the test case result with the CI environment
- citest_finishtest($testnum, $error);
- }
+ # Submit the test case result with the CI environment
+ citest_finishtest($testnum, $error);
if($error < 0) {
# not a test we can run