]>
git.ipfire.org Git - thirdparty/gcc.git/blob - contrib/testsuite-management/validate_failures.py
3 # Script to compare testsuite failures against a list of known-to-fail
6 # Contributed by Diego Novillo <dnovillo@google.com>
8 # Copyright (C) 2011, 2012 Free Software Foundation, Inc.
10 # This file is part of GCC.
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
34 The script must be executed from the toplevel build directory. When
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
55 # Handled test results.
56 _VALID_TEST_RESULTS
= [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
58 # Pattern for naming manifest files. The first argument should be
59 # the toplevel GCC source directory. The second argument is the
60 # target triple used during the build.
61 _MANIFEST_PATH_PATTERN
= '%s/contrib/testsuite-management/%s.xfail'
64 print >>sys
.stderr
, '\nerror: %s' % msg
68 class TestResult(object):
69 """Describes a single DejaGNU test result as emitted in .sum files.
71 We are only interested in representing unsuccessful tests. So, only
72 a subset of all the tests are loaded.
74 The summary line used to build the test result should have this format:
76 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
77 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
78 optional state name description
82 attrlist: A comma separated list of attributes.
84 flaky Indicates that this test may not always fail. These
85 tests are reported, but their presence does not affect
88 expire=YYYYMMDD After this date, this test will produce an error
89 whether it is in the manifest or not.
91 state: One of UNRESOLVED, XPASS or FAIL.
92 name: File name for the test.
93 description: String describing the test (flags used, dejagnu message, etc)
94 ordinal: Monotonically increasing integer.
95 It is used to keep results for one .exp file sorted
96 by the order the tests were run.
99 def __init__(self
, summary_line
, ordinal
=-1):
102 if '|' in summary_line
:
103 (self
.attrs
, summary_line
) = summary_line
.split('|', 1)
107 self
.description
) = re
.match(r
' *([A-Z]+):\s*(\S+)\s+(.*)',
108 summary_line
).groups()
110 print 'Failed to parse summary line: "%s"' % summary_line
112 self
.attrs
= self
.attrs
.strip()
113 self
.state
= self
.state
.strip()
114 self
.description
= self
.description
.strip()
115 self
.ordinal
= ordinal
117 Error('Cannot parse summary line "%s"' % summary_line
)
119 if self
.state
not in _VALID_TEST_RESULTS
:
120 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
121 self
.state
, summary_line
, self
))
123 def __lt__(self
, other
):
124 return (self
.name
< other
.name
or
125 (self
.name
== other
.name
and self
.ordinal
< other
.ordinal
))
128 return hash(self
.state
) ^
hash(self
.name
) ^
hash(self
.description
)
130 def __eq__(self
, other
):
131 return (self
.state
== other
.state
and
132 self
.name
== other
.name
and
133 self
.description
== other
.description
)
135 def __ne__(self
, other
):
136 return not (self
== other
)
141 attrs
= '%s | ' % self
.attrs
142 return '%s%s: %s %s' % (attrs
, self
.state
, self
.name
, self
.description
)
144 def ExpirationDate(self
):
145 # Return a datetime.date object with the expiration date for this
146 # test result. Return None, if no expiration has been set.
147 if re
.search(r
'expire=', self
.attrs
):
148 expiration
= re
.search(r
'expire=(\d\d\d\d)(\d\d)(\d\d)', self
.attrs
)
150 Error('Invalid expire= format in "%s". Must be of the form '
151 '"expire=YYYYMMDD"' % self
)
152 return datetime
.date(int(expiration
.group(1)),
153 int(expiration
.group(2)),
154 int(expiration
.group(3)))
157 def HasExpired(self
):
158 # Return True if the expiration date of this result has passed.
159 expiration_date
= self
.ExpirationDate()
161 now
= datetime
.date
.today()
162 return now
> expiration_date
165 def GetMakefileValue(makefile_name
, value_name
):
166 if os
.path
.exists(makefile_name
):
167 makefile
= open(makefile_name
)
168 for line
in makefile
:
169 if line
.startswith(value_name
):
170 (_
, value
) = line
.split('=', 1)
171 value
= value
.strip()
178 def ValidBuildDirectory(builddir
, target
):
179 if (not os
.path
.exists(builddir
) or
180 not os
.path
.exists('%s/Makefile' % builddir
) or
181 (not os
.path
.exists('%s/build-%s' % (builddir
, target
)) and
182 not os
.path
.exists('%s/%s' % (builddir
, target
)))):
187 def IsInterestingResult(line
):
188 """Return True if the given line is one of the summary lines we care about."""
190 if line
.startswith('#'):
193 (_
, line
) = line
.split('|', 1)
195 for result
in _VALID_TEST_RESULTS
:
196 if line
.startswith(result
):
201 def ParseSummary(sum_fname
):
202 """Create a set of TestResult instances from the given summary file."""
204 # ordinal is used when sorting the results so that tests within each
205 # .exp file are kept sorted.
207 sum_file
= open(sum_fname
)
208 for line
in sum_file
:
209 if IsInterestingResult(line
):
210 result
= TestResult(line
, ordinal
)
212 if result
.HasExpired():
213 # Tests that have expired are not added to the set of expected
214 # results. If they are still present in the set of actual results,
215 # they will cause an error to be reported.
216 print 'WARNING: Expected failure "%s" has expired.' % line
.strip()
218 result_set
.add(result
)
223 def GetManifest(manifest_name
):
224 """Build a set of expected failures from the manifest file.
226 Each entry in the manifest file should have the format understood
227 by the TestResult constructor.
229 If no manifest file exists for this target, it returns an empty set.
231 if os
.path
.exists(manifest_name
):
232 return ParseSummary(manifest_name
)
237 def CollectSumFiles(builddir
):
239 for root
, dirs
, files
in os
.walk(builddir
):
243 if fname
.endswith('.sum'):
244 sum_files
.append(os
.path
.join(root
, fname
))
248 def GetResults(sum_files
):
249 """Collect all the test results from the given .sum files."""
250 build_results
= set()
251 for sum_fname
in sum_files
:
252 print '\t%s' % sum_fname
253 build_results |
= ParseSummary(sum_fname
)
257 def CompareResults(manifest
, actual
):
258 """Compare sets of results and return two lists:
259 - List of results present in ACTUAL but missing from MANIFEST.
260 - List of results present in MANIFEST but missing from ACTUAL.
262 # Collect all the actual results not present in the manifest.
263 # Results in this set will be reported as errors.
264 actual_vs_manifest
= set()
265 for actual_result
in actual
:
266 if actual_result
not in manifest
:
267 actual_vs_manifest
.add(actual_result
)
269 # Collect all the tests in the manifest that were not found
270 # in the actual results.
271 # Results in this set will be reported as warnings (since
272 # they are expected failures that are not failing anymore).
273 manifest_vs_actual
= set()
274 for expected_result
in manifest
:
275 # Ignore tests marked flaky.
276 if 'flaky' in expected_result
.attrs
:
278 if expected_result
not in actual
:
279 manifest_vs_actual
.add(expected_result
)
281 return actual_vs_manifest
, manifest_vs_actual
284 def GetBuildData(options
):
285 target
= GetMakefileValue('%s/Makefile' % options
.build_dir
, 'target_alias=')
286 srcdir
= GetMakefileValue('%s/Makefile' % options
.build_dir
, 'srcdir =')
287 if not ValidBuildDirectory(options
.build_dir
, target
):
288 Error('%s is not a valid GCC top level build directory.' %
290 print 'Source directory: %s' % srcdir
291 print 'Build target: %s' % target
292 return srcdir
, target
, True
295 def PrintSummary(msg
, summary
):
297 for result
in sorted(summary
):
301 def GetSumFiles(results
, build_dir
):
303 print 'Getting actual results from build directory %s' % build_dir
304 sum_files
= CollectSumFiles(build_dir
)
306 print 'Getting actual results from user-provided results'
307 sum_files
= results
.split()
311 def PerformComparison(expected
, actual
, ignore_missing_failures
):
312 actual_vs_expected
, expected_vs_actual
= CompareResults(expected
, actual
)
315 if len(actual_vs_expected
) > 0:
316 PrintSummary('Unexpected results in this build (new failures)',
320 if not ignore_missing_failures
and len(expected_vs_actual
) > 0:
321 PrintSummary('Expected results not present in this build (fixed tests)'
322 '\n\nNOTE: This is not a failure. It just means that these '
323 'tests were expected\nto fail, but they worked in this '
324 'configuration.\n', expected_vs_actual
)
327 print '\nSUCCESS: No unexpected failures.'
332 def CheckExpectedResults(options
):
333 if not options
.manifest
:
334 (srcdir
, target
, valid_build
) = GetBuildData(options
)
337 manifest_name
= _MANIFEST_PATH_PATTERN
% (srcdir
, target
)
339 manifest_name
= options
.manifest
340 if not os
.path
.exists(manifest_name
):
341 Error('Manifest file %s does not exist.' % manifest_name
)
343 print 'Manifest: %s' % manifest_name
344 manifest
= GetManifest(manifest_name
)
345 sum_files
= GetSumFiles(options
.results
, options
.build_dir
)
346 actual
= GetResults(sum_files
)
348 if options
.verbosity
>= 1:
349 PrintSummary('Tests expected to fail', manifest
)
350 PrintSummary('\nActual test results', actual
)
352 return PerformComparison(manifest
, actual
, options
.ignore_missing_failures
)
355 def ProduceManifest(options
):
356 (srcdir
, target
, valid_build
) = GetBuildData(options
)
360 manifest_name
= _MANIFEST_PATH_PATTERN
% (srcdir
, target
)
361 if os
.path
.exists(manifest_name
) and not options
.force
:
362 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
365 sum_files
= GetSumFiles(options
.results
, options
.build_dir
)
366 actual
= GetResults(sum_files
)
367 manifest_file
= open(manifest_name
, 'w')
368 for result
in sorted(actual
):
370 manifest_file
.write('%s\n' % result
)
371 manifest_file
.close()
376 def CompareBuilds(options
):
377 (srcdir
, target
, valid_build
) = GetBuildData(options
)
381 sum_files
= GetSumFiles(options
.results
, options
.build_dir
)
382 actual
= GetResults(sum_files
)
384 clean_sum_files
= GetSumFiles(None, options
.clean_build
)
385 clean
= GetResults(clean_sum_files
)
387 return PerformComparison(clean
, actual
, options
.ignore_missing_failures
)
391 parser
= optparse
.OptionParser(usage
=__doc__
)
393 # Keep the following list sorted by option name.
394 parser
.add_option('--build_dir', action
='store', type='string',
395 dest
='build_dir', default
='.',
396 help='Build directory to check (default = .)')
397 parser
.add_option('--clean_build', action
='store', type='string',
398 dest
='clean_build', default
=None,
399 help='Compare test results from this build against '
400 'those of another (clean) build. Use this option '
401 'when comparing the test results of your patch versus '
402 'the test results of a clean build without your patch. '
403 'You must provide the path to the top directory of your '
405 parser
.add_option('--force', action
='store_true', dest
='force',
406 default
=False, help='When used with --produce_manifest, '
407 'it will overwrite an existing manifest file '
409 parser
.add_option('--ignore_missing_failures', action
='store_true',
410 dest
='ignore_missing_failures', default
=False,
411 help='When a failure is expected in the manifest but '
412 'it is not found in the actual results, the script '
413 'produces a note alerting to this fact. This means '
414 'that the expected failure has been fixed, or '
415 'it did not run, or it may simply be flaky '
417 parser
.add_option('--manifest', action
='store', type='string',
418 dest
='manifest', default
=None,
419 help='Name of the manifest file to use (default = '
420 'taken from contrib/testsuite-managment/<target_alias>.xfail)')
421 parser
.add_option('--produce_manifest', action
='store_true',
422 dest
='produce_manifest', default
=False,
423 help='Produce the manifest for the current '
424 'build (default = False)')
425 parser
.add_option('--results', action
='store', type='string',
426 dest
='results', default
=None, help='Space-separated list '
427 'of .sum files with the testing results to check. The '
428 'only content needed from these files are the lines '
429 'starting with FAIL, XPASS or UNRESOLVED (default = '
430 '.sum files collected from the build directory).')
431 parser
.add_option('--verbosity', action
='store', dest
='verbosity',
432 type='int', default
=0, help='Verbosity level (default = 0)')
433 (options
, _
) = parser
.parse_args(argv
[1:])
435 if options
.produce_manifest
:
436 retval
= ProduceManifest(options
)
437 elif options
.clean_build
:
438 retval
= CompareBuilds(options
)
440 retval
= CheckExpectedResults(options
)
448 if __name__
== '__main__':
449 retval
= Main(sys
.argv
)