]> git.ipfire.org Git - thirdparty/gcc.git/blame - contrib/testsuite-management/validate_failures.py
Extend validate_failures.py to run outside the build directory.
[thirdparty/gcc.git] / contrib / testsuite-management / validate_failures.py
CommitLineData
4de0f292 1#!/usr/bin/python2.6
5eb8d002 2
3# Script to compare testsuite failures against a list of known-to-fail
4# tests.
5
6# Contributed by Diego Novillo <dnovillo@google.com>
7#
b20d3dc6 8# Copyright (C) 2011, 2012 Free Software Foundation, Inc.
5eb8d002 9#
10# This file is part of GCC.
11#
12# GCC is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License as published by
14# the Free Software Foundation; either version 3, or (at your option)
15# any later version.
16#
17# GCC is distributed in the hope that it will be useful,
18# but WITHOUT ANY WARRANTY; without even the implied warranty of
19# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20# GNU General Public License for more details.
21#
22# You should have received a copy of the GNU General Public License
23# along with GCC; see the file COPYING. If not, write to
24# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25# Boston, MA 02110-1301, USA.
26
27"""This script provides a coarser XFAILing mechanism that requires no
28detailed DejaGNU markings. This is useful in a variety of scenarios:
29
30- Development branches with many known failures waiting to be fixed.
31- Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
33
34The script must be executed from the toplevel build directory. When
35executed it will:
36
371- Determine the target built: TARGET
382- Determine the source directory: SRCDIR
393- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
414- Collect all the <tool>.sum files from the build tree.
425- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
456- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47"""
48
49import optparse
50import os
51import re
52import sys
53
54# Handled test results.
55_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
56
57# Pattern for naming manifest files. The first argument should be
58# the toplevel GCC source directory. The second argument is the
59# target triple used during the build.
60_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
61
62def Error(msg):
63 print >>sys.stderr, '\nerror: %s' % msg
64 sys.exit(1)
65
66
67class TestResult(object):
68 """Describes a single DejaGNU test result as emitted in .sum files.
69
70 We are only interested in representing unsuccessful tests. So, only
71 a subset of all the tests are loaded.
72
73 The summary line used to build the test result should have this format:
74
75 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77 optional state name description
78 attributes
79
80 Attributes:
81 attrlist: A comma separated list of attributes.
82 Valid values:
83 flaky Indicates that this test may not always fail. These
84 tests are reported, but their presence does not affect
85 the results.
86
87 expire=YYYYMMDD After this date, this test will produce an error
88 whether it is in the manifest or not.
89
90 state: One of UNRESOLVED, XPASS or FAIL.
91 name: File name for the test.
92 description: String describing the test (flags used, dejagnu message, etc)
93 """
94
95 def __init__(self, summary_line):
96 try:
97 self.attrs = ''
98 if '|' in summary_line:
99 (self.attrs, summary_line) = summary_line.split('|', 1)
6d0c6e19 100 try:
101 (self.state,
102 self.name,
103 self.description) = re.match(r' *([A-Z]+): (\S+)\s(.*)',
104 summary_line).groups()
105 except:
106 print 'Failed to parse summary line: "%s"' % summary_line
107 raise
5eb8d002 108 self.attrs = self.attrs.strip()
109 self.state = self.state.strip()
110 self.description = self.description.strip()
111 except ValueError:
112 Error('Cannot parse summary line "%s"' % summary_line)
113
114 if self.state not in _VALID_TEST_RESULTS:
115 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
116 self.state, summary_line, self))
117
118 def __lt__(self, other):
119 return self.name < other.name
120
121 def __hash__(self):
122 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
123
124 def __eq__(self, other):
125 return (self.state == other.state and
126 self.name == other.name and
127 self.description == other.description)
128
129 def __ne__(self, other):
130 return not (self == other)
131
132 def __str__(self):
133 attrs = ''
134 if self.attrs:
135 attrs = '%s | ' % self.attrs
136 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
137
138
139def GetMakefileValue(makefile_name, value_name):
140 if os.path.exists(makefile_name):
141 with open(makefile_name) as makefile:
142 for line in makefile:
143 if line.startswith(value_name):
144 (_, value) = line.split('=', 1)
145 value = value.strip()
146 return value
147 return None
148
149
150def ValidBuildDirectory(builddir, target):
151 if (not os.path.exists(builddir) or
152 not os.path.exists('%s/Makefile' % builddir) or
12c0b40c 153 (not os.path.exists('%s/build-%s' % (builddir, target)) and
154 not os.path.exists('%s/%s' % (builddir, target)))):
5eb8d002 155 return False
156 return True
157
158
159def IsInterestingResult(line):
160 """Return True if the given line is one of the summary lines we care about."""
161 line = line.strip()
162 if line.startswith('#'):
163 return False
164 if '|' in line:
165 (_, line) = line.split('|', 1)
166 line = line.strip()
167 for result in _VALID_TEST_RESULTS:
168 if line.startswith(result):
169 return True
170 return False
171
172
173def ParseSummary(sum_fname):
174 """Create a set of TestResult instances from the given summary file."""
175 result_set = set()
176 with open(sum_fname) as sum_file:
177 for line in sum_file:
178 if IsInterestingResult(line):
179 result_set.add(TestResult(line))
180 return result_set
181
182
183def GetManifest(manifest_name):
184 """Build a set of expected failures from the manifest file.
185
186 Each entry in the manifest file should have the format understood
187 by the TestResult constructor.
188
189 If no manifest file exists for this target, it returns an empty
190 set.
191 """
192 if os.path.exists(manifest_name):
193 return ParseSummary(manifest_name)
194 else:
195 return set()
196
197
198def GetSumFiles(builddir):
199 sum_files = []
200 for root, dirs, files in os.walk(builddir):
201 if '.svn' in dirs:
202 dirs.remove('.svn')
203 for fname in files:
204 if fname.endswith('.sum'):
205 sum_files.append(os.path.join(root, fname))
206 return sum_files
207
208
4de0f292 209def GetResults(sum_files):
210 """Collect all the test results from the given .sum files."""
5eb8d002 211 build_results = set()
212 for sum_fname in sum_files:
213 print '\t%s' % sum_fname
214 build_results |= ParseSummary(sum_fname)
215 return build_results
216
217
218def CompareResults(manifest, actual):
219 """Compare sets of results and return two lists:
220 - List of results present in MANIFEST but missing from ACTUAL.
221 - List of results present in ACTUAL but missing from MANIFEST.
222 """
223 # Report all the actual results not present in the manifest.
224 actual_vs_manifest = set()
225 for actual_result in actual:
226 if actual_result not in manifest:
227 actual_vs_manifest.add(actual_result)
228
229 # Simlarly for all the tests in the manifest.
230 manifest_vs_actual = set()
231 for expected_result in manifest:
232 # Ignore tests marked flaky.
233 if 'flaky' in expected_result.attrs:
234 continue
235 if expected_result not in actual:
236 manifest_vs_actual.add(expected_result)
237
238 return actual_vs_manifest, manifest_vs_actual
239
240
241def GetBuildData(options):
b20d3dc6 242 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target_alias=')
5eb8d002 243 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
244 if not ValidBuildDirectory(options.build_dir, target):
245 Error('%s is not a valid GCC top level build directory.' %
246 options.build_dir)
247 print 'Source directory: %s' % srcdir
248 print 'Build target: %s' % target
249 return srcdir, target, True
250
251
252def PrintSummary(msg, summary):
253 print '\n\n%s' % msg
254 for result in sorted(summary):
255 print result
256
257
258def CheckExpectedResults(options):
4de0f292 259 if not options.manifest:
260 (srcdir, target, valid_build) = GetBuildData(options)
261 if not valid_build:
262 return False
263 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
264 else:
265 manifest_name = options.manifest
266 if not os.path.exists(manifest_name):
267 Error('Manifest file %s does not exist.' % manifest_name)
5eb8d002 268
5eb8d002 269 print 'Manifest: %s' % manifest_name
270 manifest = GetManifest(manifest_name)
271
4de0f292 272 if not options.results:
273 print 'Getting actual results from build'
274 sum_files = GetSumFiles(options.build_dir)
275 else:
276 print 'Getting actual results from user-provided results'
277 sum_files = options.results.split()
278 actual = GetResults(sum_files)
5eb8d002 279
280 if options.verbosity >= 1:
281 PrintSummary('Tests expected to fail', manifest)
282 PrintSummary('\nActual test results', actual)
283
284 actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
285
286 tests_ok = True
287 if len(actual_vs_manifest) > 0:
288 PrintSummary('Build results not in the manifest', actual_vs_manifest)
289 tests_ok = False
290
4de0f292 291 if not options.ignore_missing_failures and len(manifest_vs_actual) > 0:
5eb8d002 292 PrintSummary('Manifest results not present in the build'
293 '\n\nNOTE: This is not a failure. It just means that the '
294 'manifest expected\nthese tests to fail, '
295 'but they worked in this configuration.\n',
296 manifest_vs_actual)
297
298 if tests_ok:
299 print '\nSUCCESS: No unexpected failures.'
300
301 return tests_ok
302
303
304def ProduceManifest(options):
305 (srcdir, target, valid_build) = GetBuildData(options)
306 if not valid_build:
307 return False
308
309 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
310 if os.path.exists(manifest_name) and not options.force:
311 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
312 manifest_name)
313
314 actual = GetResults(options.build_dir)
315 with open(manifest_name, 'w') as manifest_file:
316 for result in sorted(actual):
317 print result
318 manifest_file.write('%s\n' % result)
319
320 return True
321
322
323def Main(argv):
324 parser = optparse.OptionParser(usage=__doc__)
4de0f292 325
326 # Keep the following list sorted by option name.
5eb8d002 327 parser.add_option('--build_dir', action='store', type='string',
328 dest='build_dir', default='.',
329 help='Build directory to check (default = .)')
5eb8d002 330 parser.add_option('--force', action='store_true', dest='force',
4de0f292 331 default=False, help='When used with --produce_manifest, '
332 'it will overwrite an existing manifest file '
333 '(default = False)')
334 parser.add_option('--ignore_missing_failures', action='store_true',
335 dest='ignore_missing_failures', default=False,
336 help='When a failure is expected in the manifest but '
337 'it is not found in the actual results, the script '
338 'produces a note alerting to this fact. This means '
339 'that the expected failure has been fixed, or '
340 'it did not run, or it may simply be flaky '
341 '(default = False)')
342 parser.add_option('--manifest', action='store', type='string',
343 dest='manifest', default=None,
344 help='Name of the manifest file to use (default = '
345 'taken from contrib/testsuite-managment/<target>.xfail)')
346 parser.add_option('--produce_manifest', action='store_true',
347 dest='produce_manifest', default=False,
348 help='Produce the manifest for the current '
349 'build (default = False)')
350 parser.add_option('--results', action='store', type='string',
351 dest='results', default=None, help='Space-separated list '
352 'of .sum files with the testing results to check. The '
353 'only content needed from these files are the lines '
354 'starting with FAIL, XPASS or UNRESOLVED (default = '
355 '.sum files collected from the build directory).')
5eb8d002 356 parser.add_option('--verbosity', action='store', dest='verbosity',
357 type='int', default=0, help='Verbosity level (default = 0)')
358 (options, _) = parser.parse_args(argv[1:])
359
4de0f292 360 if options.produce_manifest:
5eb8d002 361 retval = ProduceManifest(options)
362 else:
363 retval = CheckExpectedResults(options)
364
365 if retval:
366 return 0
367 else:
368 return 1
369
370if __name__ == '__main__':
371 retval = Main(sys.argv)
372 sys.exit(retval)