]> git.ipfire.org Git - thirdparty/gcc.git/blob - contrib/testsuite-management/validate_failures.py
* testsuite-management/validate_failures.py: Use <target_alias> instead
[thirdparty/gcc.git] / contrib / testsuite-management / validate_failures.py
1 #!/usr/bin/python
2
3 # Script to compare testsuite failures against a list of known-to-fail
4 # tests.
5
6 # Contributed by Diego Novillo <dnovillo@google.com>
7 #
8 # Copyright (C) 2011, 2012 Free Software Foundation, Inc.
9 #
10 # This file is part of GCC.
11 #
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
15 # any later version.
16 #
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
26
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
29
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
33
34 The script must be executed from the toplevel build directory. When
35 executed it will:
36
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47 """
48
49 import datetime
50 import optparse
51 import os
52 import re
53 import sys
54
55 # Handled test results.
56 _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
57
58 # Pattern for naming manifest files. The first argument should be
59 # the toplevel GCC source directory. The second argument is the
60 # target triple used during the build.
61 _MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
62
63 def Error(msg):
64 print >>sys.stderr, '\nerror: %s' % msg
65 sys.exit(1)
66
67
68 class TestResult(object):
69 """Describes a single DejaGNU test result as emitted in .sum files.
70
71 We are only interested in representing unsuccessful tests. So, only
72 a subset of all the tests are loaded.
73
74 The summary line used to build the test result should have this format:
75
76 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
77 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
78 optional state name description
79 attributes
80
81 Attributes:
82 attrlist: A comma separated list of attributes.
83 Valid values:
84 flaky Indicates that this test may not always fail. These
85 tests are reported, but their presence does not affect
86 the results.
87
88 expire=YYYYMMDD After this date, this test will produce an error
89 whether it is in the manifest or not.
90
91 state: One of UNRESOLVED, XPASS or FAIL.
92 name: File name for the test.
93 description: String describing the test (flags used, dejagnu message, etc)
94 ordinal: Monotonically increasing integer.
95 It is used to keep results for one .exp file sorted
96 by the order the tests were run.
97 """
98
99 def __init__(self, summary_line, ordinal=-1):
100 try:
101 self.attrs = ''
102 if '|' in summary_line:
103 (self.attrs, summary_line) = summary_line.split('|', 1)
104 try:
105 (self.state,
106 self.name,
107 self.description) = re.match(r' *([A-Z]+):\s*(\S+)\s+(.*)',
108 summary_line).groups()
109 except:
110 print 'Failed to parse summary line: "%s"' % summary_line
111 raise
112 self.attrs = self.attrs.strip()
113 self.state = self.state.strip()
114 self.description = self.description.strip()
115 self.ordinal = ordinal
116 except ValueError:
117 Error('Cannot parse summary line "%s"' % summary_line)
118
119 if self.state not in _VALID_TEST_RESULTS:
120 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
121 self.state, summary_line, self))
122
123 def __lt__(self, other):
124 return (self.name < other.name or
125 (self.name == other.name and self.ordinal < other.ordinal))
126
127 def __hash__(self):
128 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
129
130 def __eq__(self, other):
131 return (self.state == other.state and
132 self.name == other.name and
133 self.description == other.description)
134
135 def __ne__(self, other):
136 return not (self == other)
137
138 def __str__(self):
139 attrs = ''
140 if self.attrs:
141 attrs = '%s | ' % self.attrs
142 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
143
144 def ExpirationDate(self):
145 # Return a datetime.date object with the expiration date for this
146 # test result. Return None, if no expiration has been set.
147 if re.search(r'expire=', self.attrs):
148 expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
149 if not expiration:
150 Error('Invalid expire= format in "%s". Must be of the form '
151 '"expire=YYYYMMDD"' % self)
152 return datetime.date(int(expiration.group(1)),
153 int(expiration.group(2)),
154 int(expiration.group(3)))
155 return None
156
157 def HasExpired(self):
158 # Return True if the expiration date of this result has passed.
159 expiration_date = self.ExpirationDate()
160 if expiration_date:
161 now = datetime.date.today()
162 return now > expiration_date
163
164
165 def GetMakefileValue(makefile_name, value_name):
166 if os.path.exists(makefile_name):
167 makefile = open(makefile_name)
168 for line in makefile:
169 if line.startswith(value_name):
170 (_, value) = line.split('=', 1)
171 value = value.strip()
172 makefile.close()
173 return value
174 makefile.close()
175 return None
176
177
178 def ValidBuildDirectory(builddir, target):
179 if (not os.path.exists(builddir) or
180 not os.path.exists('%s/Makefile' % builddir) or
181 (not os.path.exists('%s/build-%s' % (builddir, target)) and
182 not os.path.exists('%s/%s' % (builddir, target)))):
183 return False
184 return True
185
186
187 def IsInterestingResult(line):
188 """Return True if the given line is one of the summary lines we care about."""
189 line = line.strip()
190 if line.startswith('#'):
191 return False
192 if '|' in line:
193 (_, line) = line.split('|', 1)
194 line = line.strip()
195 for result in _VALID_TEST_RESULTS:
196 if line.startswith(result):
197 return True
198 return False
199
200
201 def ParseSummary(sum_fname):
202 """Create a set of TestResult instances from the given summary file."""
203 result_set = set()
204 # ordinal is used when sorting the results so that tests within each
205 # .exp file are kept sorted.
206 ordinal=0
207 sum_file = open(sum_fname)
208 for line in sum_file:
209 if IsInterestingResult(line):
210 result = TestResult(line, ordinal)
211 ordinal += 1
212 if result.HasExpired():
213 # Tests that have expired are not added to the set of expected
214 # results. If they are still present in the set of actual results,
215 # they will cause an error to be reported.
216 print 'WARNING: Expected failure "%s" has expired.' % line.strip()
217 continue
218 result_set.add(result)
219 sum_file.close()
220 return result_set
221
222
223 def GetManifest(manifest_name):
224 """Build a set of expected failures from the manifest file.
225
226 Each entry in the manifest file should have the format understood
227 by the TestResult constructor.
228
229 If no manifest file exists for this target, it returns an empty set.
230 """
231 if os.path.exists(manifest_name):
232 return ParseSummary(manifest_name)
233 else:
234 return set()
235
236
237 def CollectSumFiles(builddir):
238 sum_files = []
239 for root, dirs, files in os.walk(builddir):
240 if '.svn' in dirs:
241 dirs.remove('.svn')
242 for fname in files:
243 if fname.endswith('.sum'):
244 sum_files.append(os.path.join(root, fname))
245 return sum_files
246
247
248 def GetResults(sum_files):
249 """Collect all the test results from the given .sum files."""
250 build_results = set()
251 for sum_fname in sum_files:
252 print '\t%s' % sum_fname
253 build_results |= ParseSummary(sum_fname)
254 return build_results
255
256
257 def CompareResults(manifest, actual):
258 """Compare sets of results and return two lists:
259 - List of results present in ACTUAL but missing from MANIFEST.
260 - List of results present in MANIFEST but missing from ACTUAL.
261 """
262 # Collect all the actual results not present in the manifest.
263 # Results in this set will be reported as errors.
264 actual_vs_manifest = set()
265 for actual_result in actual:
266 if actual_result not in manifest:
267 actual_vs_manifest.add(actual_result)
268
269 # Collect all the tests in the manifest that were not found
270 # in the actual results.
271 # Results in this set will be reported as warnings (since
272 # they are expected failures that are not failing anymore).
273 manifest_vs_actual = set()
274 for expected_result in manifest:
275 # Ignore tests marked flaky.
276 if 'flaky' in expected_result.attrs:
277 continue
278 if expected_result not in actual:
279 manifest_vs_actual.add(expected_result)
280
281 return actual_vs_manifest, manifest_vs_actual
282
283
284 def GetBuildData(options):
285 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target_alias=')
286 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
287 if not ValidBuildDirectory(options.build_dir, target):
288 Error('%s is not a valid GCC top level build directory.' %
289 options.build_dir)
290 print 'Source directory: %s' % srcdir
291 print 'Build target: %s' % target
292 return srcdir, target, True
293
294
295 def PrintSummary(msg, summary):
296 print '\n\n%s' % msg
297 for result in sorted(summary):
298 print result
299
300
301 def GetSumFiles(results, build_dir):
302 if not results:
303 print 'Getting actual results from build directory %s' % build_dir
304 sum_files = CollectSumFiles(build_dir)
305 else:
306 print 'Getting actual results from user-provided results'
307 sum_files = results.split()
308 return sum_files
309
310
311 def PerformComparison(expected, actual, ignore_missing_failures):
312 actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
313
314 tests_ok = True
315 if len(actual_vs_expected) > 0:
316 PrintSummary('Unexpected results in this build (new failures)',
317 actual_vs_expected)
318 tests_ok = False
319
320 if not ignore_missing_failures and len(expected_vs_actual) > 0:
321 PrintSummary('Expected results not present in this build (fixed tests)'
322 '\n\nNOTE: This is not a failure. It just means that these '
323 'tests were expected\nto fail, but they worked in this '
324 'configuration.\n', expected_vs_actual)
325
326 if tests_ok:
327 print '\nSUCCESS: No unexpected failures.'
328
329 return tests_ok
330
331
332 def CheckExpectedResults(options):
333 if not options.manifest:
334 (srcdir, target, valid_build) = GetBuildData(options)
335 if not valid_build:
336 return False
337 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
338 else:
339 manifest_name = options.manifest
340 if not os.path.exists(manifest_name):
341 Error('Manifest file %s does not exist.' % manifest_name)
342
343 print 'Manifest: %s' % manifest_name
344 manifest = GetManifest(manifest_name)
345 sum_files = GetSumFiles(options.results, options.build_dir)
346 actual = GetResults(sum_files)
347
348 if options.verbosity >= 1:
349 PrintSummary('Tests expected to fail', manifest)
350 PrintSummary('\nActual test results', actual)
351
352 return PerformComparison(manifest, actual, options.ignore_missing_failures)
353
354
355 def ProduceManifest(options):
356 (srcdir, target, valid_build) = GetBuildData(options)
357 if not valid_build:
358 return False
359
360 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
361 if os.path.exists(manifest_name) and not options.force:
362 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
363 manifest_name)
364
365 sum_files = GetSumFiles(options.results, options.build_dir)
366 actual = GetResults(sum_files)
367 manifest_file = open(manifest_name, 'w')
368 for result in sorted(actual):
369 print result
370 manifest_file.write('%s\n' % result)
371 manifest_file.close()
372
373 return True
374
375
376 def CompareBuilds(options):
377 (srcdir, target, valid_build) = GetBuildData(options)
378 if not valid_build:
379 return False
380
381 sum_files = GetSumFiles(options.results, options.build_dir)
382 actual = GetResults(sum_files)
383
384 clean_sum_files = GetSumFiles(None, options.clean_build)
385 clean = GetResults(clean_sum_files)
386
387 return PerformComparison(clean, actual, options.ignore_missing_failures)
388
389
390 def Main(argv):
391 parser = optparse.OptionParser(usage=__doc__)
392
393 # Keep the following list sorted by option name.
394 parser.add_option('--build_dir', action='store', type='string',
395 dest='build_dir', default='.',
396 help='Build directory to check (default = .)')
397 parser.add_option('--clean_build', action='store', type='string',
398 dest='clean_build', default=None,
399 help='Compare test results from this build against '
400 'those of another (clean) build. Use this option '
401 'when comparing the test results of your patch versus '
402 'the test results of a clean build without your patch. '
403 'You must provide the path to the top directory of your '
404 'clean build.')
405 parser.add_option('--force', action='store_true', dest='force',
406 default=False, help='When used with --produce_manifest, '
407 'it will overwrite an existing manifest file '
408 '(default = False)')
409 parser.add_option('--ignore_missing_failures', action='store_true',
410 dest='ignore_missing_failures', default=False,
411 help='When a failure is expected in the manifest but '
412 'it is not found in the actual results, the script '
413 'produces a note alerting to this fact. This means '
414 'that the expected failure has been fixed, or '
415 'it did not run, or it may simply be flaky '
416 '(default = False)')
417 parser.add_option('--manifest', action='store', type='string',
418 dest='manifest', default=None,
419 help='Name of the manifest file to use (default = '
420 'taken from contrib/testsuite-managment/<target_alias>.xfail)')
421 parser.add_option('--produce_manifest', action='store_true',
422 dest='produce_manifest', default=False,
423 help='Produce the manifest for the current '
424 'build (default = False)')
425 parser.add_option('--results', action='store', type='string',
426 dest='results', default=None, help='Space-separated list '
427 'of .sum files with the testing results to check. The '
428 'only content needed from these files are the lines '
429 'starting with FAIL, XPASS or UNRESOLVED (default = '
430 '.sum files collected from the build directory).')
431 parser.add_option('--verbosity', action='store', dest='verbosity',
432 type='int', default=0, help='Verbosity level (default = 0)')
433 (options, _) = parser.parse_args(argv[1:])
434
435 if options.produce_manifest:
436 retval = ProduceManifest(options)
437 elif options.clean_build:
438 retval = CompareBuilds(options)
439 else:
440 retval = CheckExpectedResults(options)
441
442 if retval:
443 return 0
444 else:
445 return 1
446
447
448 if __name__ == '__main__':
449 retval = Main(sys.argv)
450 sys.exit(retval)