]> git.ipfire.org Git - thirdparty/gcc.git/blame - contrib/testsuite-management/validate_failures.py
Do not use 'with ... as ...' in validate_failures.py
[thirdparty/gcc.git] / contrib / testsuite-management / validate_failures.py
CommitLineData
02b988a4 1#!/usr/bin/python
5eb8d002 2
3# Script to compare testsuite failures against a list of known-to-fail
4# tests.
5
6# Contributed by Diego Novillo <dnovillo@google.com>
7#
b20d3dc6 8# Copyright (C) 2011, 2012 Free Software Foundation, Inc.
5eb8d002 9#
10# This file is part of GCC.
11#
12# GCC is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License as published by
14# the Free Software Foundation; either version 3, or (at your option)
15# any later version.
16#
17# GCC is distributed in the hope that it will be useful,
18# but WITHOUT ANY WARRANTY; without even the implied warranty of
19# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20# GNU General Public License for more details.
21#
22# You should have received a copy of the GNU General Public License
23# along with GCC; see the file COPYING. If not, write to
24# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25# Boston, MA 02110-1301, USA.
26
27"""This script provides a coarser XFAILing mechanism that requires no
28detailed DejaGNU markings. This is useful in a variety of scenarios:
29
30- Development branches with many known failures waiting to be fixed.
31- Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
33
34The script must be executed from the toplevel build directory. When
35executed it will:
36
371- Determine the target built: TARGET
382- Determine the source directory: SRCDIR
393- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
414- Collect all the <tool>.sum files from the build tree.
425- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
456- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47"""
48
49import optparse
50import os
51import re
52import sys
53
54# Handled test results.
55_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
56
57# Pattern for naming manifest files. The first argument should be
58# the toplevel GCC source directory. The second argument is the
59# target triple used during the build.
60_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
61
62def Error(msg):
63 print >>sys.stderr, '\nerror: %s' % msg
64 sys.exit(1)
65
66
67class TestResult(object):
68 """Describes a single DejaGNU test result as emitted in .sum files.
69
70 We are only interested in representing unsuccessful tests. So, only
71 a subset of all the tests are loaded.
72
73 The summary line used to build the test result should have this format:
74
75 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77 optional state name description
78 attributes
79
80 Attributes:
81 attrlist: A comma separated list of attributes.
82 Valid values:
83 flaky Indicates that this test may not always fail. These
84 tests are reported, but their presence does not affect
85 the results.
86
87 expire=YYYYMMDD After this date, this test will produce an error
88 whether it is in the manifest or not.
89
90 state: One of UNRESOLVED, XPASS or FAIL.
91 name: File name for the test.
92 description: String describing the test (flags used, dejagnu message, etc)
93 """
94
95 def __init__(self, summary_line):
96 try:
97 self.attrs = ''
98 if '|' in summary_line:
99 (self.attrs, summary_line) = summary_line.split('|', 1)
6d0c6e19 100 try:
101 (self.state,
102 self.name,
103 self.description) = re.match(r' *([A-Z]+): (\S+)\s(.*)',
104 summary_line).groups()
105 except:
106 print 'Failed to parse summary line: "%s"' % summary_line
107 raise
5eb8d002 108 self.attrs = self.attrs.strip()
109 self.state = self.state.strip()
110 self.description = self.description.strip()
111 except ValueError:
112 Error('Cannot parse summary line "%s"' % summary_line)
113
114 if self.state not in _VALID_TEST_RESULTS:
115 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
116 self.state, summary_line, self))
117
118 def __lt__(self, other):
119 return self.name < other.name
120
121 def __hash__(self):
122 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
123
124 def __eq__(self, other):
125 return (self.state == other.state and
126 self.name == other.name and
127 self.description == other.description)
128
129 def __ne__(self, other):
130 return not (self == other)
131
132 def __str__(self):
133 attrs = ''
134 if self.attrs:
135 attrs = '%s | ' % self.attrs
136 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
137
138
139def GetMakefileValue(makefile_name, value_name):
140 if os.path.exists(makefile_name):
ec72a41a 141 makefile = open(makefile_name)
142 for line in makefile:
143 if line.startswith(value_name):
144 (_, value) = line.split('=', 1)
145 value = value.strip()
146 makefile.close()
147 return value
148 makefile.close()
5eb8d002 149 return None
150
151
152def ValidBuildDirectory(builddir, target):
153 if (not os.path.exists(builddir) or
154 not os.path.exists('%s/Makefile' % builddir) or
12c0b40c 155 (not os.path.exists('%s/build-%s' % (builddir, target)) and
156 not os.path.exists('%s/%s' % (builddir, target)))):
5eb8d002 157 return False
158 return True
159
160
161def IsInterestingResult(line):
162 """Return True if the given line is one of the summary lines we care about."""
163 line = line.strip()
164 if line.startswith('#'):
165 return False
166 if '|' in line:
167 (_, line) = line.split('|', 1)
168 line = line.strip()
169 for result in _VALID_TEST_RESULTS:
170 if line.startswith(result):
171 return True
172 return False
173
174
175def ParseSummary(sum_fname):
176 """Create a set of TestResult instances from the given summary file."""
177 result_set = set()
ec72a41a 178 sum_file = open(sum_fname)
179 for line in sum_file:
180 if IsInterestingResult(line):
181 result_set.add(TestResult(line))
182 sum_file.close()
5eb8d002 183 return result_set
184
185
186def GetManifest(manifest_name):
187 """Build a set of expected failures from the manifest file.
188
189 Each entry in the manifest file should have the format understood
190 by the TestResult constructor.
191
192 If no manifest file exists for this target, it returns an empty
193 set.
194 """
195 if os.path.exists(manifest_name):
196 return ParseSummary(manifest_name)
197 else:
198 return set()
199
200
d8364a08 201def CollectSumFiles(builddir):
5eb8d002 202 sum_files = []
203 for root, dirs, files in os.walk(builddir):
204 if '.svn' in dirs:
205 dirs.remove('.svn')
206 for fname in files:
207 if fname.endswith('.sum'):
208 sum_files.append(os.path.join(root, fname))
209 return sum_files
210
211
4de0f292 212def GetResults(sum_files):
213 """Collect all the test results from the given .sum files."""
5eb8d002 214 build_results = set()
215 for sum_fname in sum_files:
216 print '\t%s' % sum_fname
217 build_results |= ParseSummary(sum_fname)
218 return build_results
219
220
221def CompareResults(manifest, actual):
222 """Compare sets of results and return two lists:
223 - List of results present in MANIFEST but missing from ACTUAL.
224 - List of results present in ACTUAL but missing from MANIFEST.
225 """
226 # Report all the actual results not present in the manifest.
227 actual_vs_manifest = set()
228 for actual_result in actual:
229 if actual_result not in manifest:
230 actual_vs_manifest.add(actual_result)
231
232 # Simlarly for all the tests in the manifest.
233 manifest_vs_actual = set()
234 for expected_result in manifest:
235 # Ignore tests marked flaky.
236 if 'flaky' in expected_result.attrs:
237 continue
238 if expected_result not in actual:
239 manifest_vs_actual.add(expected_result)
240
241 return actual_vs_manifest, manifest_vs_actual
242
243
244def GetBuildData(options):
b20d3dc6 245 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target_alias=')
5eb8d002 246 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
247 if not ValidBuildDirectory(options.build_dir, target):
248 Error('%s is not a valid GCC top level build directory.' %
249 options.build_dir)
250 print 'Source directory: %s' % srcdir
251 print 'Build target: %s' % target
252 return srcdir, target, True
253
254
255def PrintSummary(msg, summary):
256 print '\n\n%s' % msg
257 for result in sorted(summary):
258 print result
259
260
d8364a08 261def GetSumFiles(results, build_dir):
262 if not results:
263 print 'Getting actual results from build'
264 sum_files = CollectSumFiles(build_dir)
265 else:
266 print 'Getting actual results from user-provided results'
267 sum_files = results.split()
268 return sum_files
269
270
5eb8d002 271def CheckExpectedResults(options):
4de0f292 272 if not options.manifest:
273 (srcdir, target, valid_build) = GetBuildData(options)
274 if not valid_build:
275 return False
276 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
277 else:
278 manifest_name = options.manifest
279 if not os.path.exists(manifest_name):
280 Error('Manifest file %s does not exist.' % manifest_name)
5eb8d002 281
5eb8d002 282 print 'Manifest: %s' % manifest_name
283 manifest = GetManifest(manifest_name)
d8364a08 284 sum_files = GetSumFiles(options.results, options.build_dir)
4de0f292 285 actual = GetResults(sum_files)
5eb8d002 286
287 if options.verbosity >= 1:
288 PrintSummary('Tests expected to fail', manifest)
289 PrintSummary('\nActual test results', actual)
290
291 actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
292
293 tests_ok = True
294 if len(actual_vs_manifest) > 0:
295 PrintSummary('Build results not in the manifest', actual_vs_manifest)
296 tests_ok = False
297
4de0f292 298 if not options.ignore_missing_failures and len(manifest_vs_actual) > 0:
5eb8d002 299 PrintSummary('Manifest results not present in the build'
300 '\n\nNOTE: This is not a failure. It just means that the '
301 'manifest expected\nthese tests to fail, '
302 'but they worked in this configuration.\n',
303 manifest_vs_actual)
304
305 if tests_ok:
306 print '\nSUCCESS: No unexpected failures.'
307
308 return tests_ok
309
310
311def ProduceManifest(options):
312 (srcdir, target, valid_build) = GetBuildData(options)
313 if not valid_build:
314 return False
315
316 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
317 if os.path.exists(manifest_name) and not options.force:
318 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
319 manifest_name)
320
d8364a08 321 sum_files = GetSumFiles(options.results, options.build_dir)
322 actual = GetResults(sum_files)
ec72a41a 323 manifest_file = open(manifest_name, 'w')
324 for result in sorted(actual):
325 print result
326 manifest_file.write('%s\n' % result)
327 manifest_file.close()
5eb8d002 328
329 return True
330
331
332def Main(argv):
333 parser = optparse.OptionParser(usage=__doc__)
4de0f292 334
335 # Keep the following list sorted by option name.
5eb8d002 336 parser.add_option('--build_dir', action='store', type='string',
337 dest='build_dir', default='.',
338 help='Build directory to check (default = .)')
5eb8d002 339 parser.add_option('--force', action='store_true', dest='force',
4de0f292 340 default=False, help='When used with --produce_manifest, '
341 'it will overwrite an existing manifest file '
342 '(default = False)')
343 parser.add_option('--ignore_missing_failures', action='store_true',
344 dest='ignore_missing_failures', default=False,
345 help='When a failure is expected in the manifest but '
346 'it is not found in the actual results, the script '
347 'produces a note alerting to this fact. This means '
348 'that the expected failure has been fixed, or '
349 'it did not run, or it may simply be flaky '
350 '(default = False)')
351 parser.add_option('--manifest', action='store', type='string',
352 dest='manifest', default=None,
353 help='Name of the manifest file to use (default = '
354 'taken from contrib/testsuite-managment/<target>.xfail)')
355 parser.add_option('--produce_manifest', action='store_true',
356 dest='produce_manifest', default=False,
357 help='Produce the manifest for the current '
358 'build (default = False)')
359 parser.add_option('--results', action='store', type='string',
360 dest='results', default=None, help='Space-separated list '
361 'of .sum files with the testing results to check. The '
362 'only content needed from these files are the lines '
363 'starting with FAIL, XPASS or UNRESOLVED (default = '
364 '.sum files collected from the build directory).')
5eb8d002 365 parser.add_option('--verbosity', action='store', dest='verbosity',
366 type='int', default=0, help='Verbosity level (default = 0)')
367 (options, _) = parser.parse_args(argv[1:])
368
4de0f292 369 if options.produce_manifest:
5eb8d002 370 retval = ProduceManifest(options)
371 else:
372 retval = CheckExpectedResults(options)
373
374 if retval:
375 return 0
376 else:
377 return 1
378
379if __name__ == '__main__':
380 retval = Main(sys.argv)
381 sys.exit(retval)