]> git.ipfire.org Git - thirdparty/gcc.git/blame - contrib/testsuite-management/validate_failures.py
This fixes an edge case in parsing summary lines. Some times, the
[thirdparty/gcc.git] / contrib / testsuite-management / validate_failures.py
CommitLineData
5eb8d002 1#!/usr/bin/python
2
3# Script to compare testsuite failures against a list of known-to-fail
4# tests.
5
6# Contributed by Diego Novillo <dnovillo@google.com>
7#
8# Copyright (C) 2011 Free Software Foundation, Inc.
9#
10# This file is part of GCC.
11#
12# GCC is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License as published by
14# the Free Software Foundation; either version 3, or (at your option)
15# any later version.
16#
17# GCC is distributed in the hope that it will be useful,
18# but WITHOUT ANY WARRANTY; without even the implied warranty of
19# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20# GNU General Public License for more details.
21#
22# You should have received a copy of the GNU General Public License
23# along with GCC; see the file COPYING. If not, write to
24# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25# Boston, MA 02110-1301, USA.
26
27"""This script provides a coarser XFAILing mechanism that requires no
28detailed DejaGNU markings. This is useful in a variety of scenarios:
29
30- Development branches with many known failures waiting to be fixed.
31- Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
33
34The script must be executed from the toplevel build directory. When
35executed it will:
36
371- Determine the target built: TARGET
382- Determine the source directory: SRCDIR
393- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
414- Collect all the <tool>.sum files from the build tree.
425- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
456- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47"""
48
49import optparse
50import os
51import re
52import sys
53
54# Handled test results.
55_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
56
57# Pattern for naming manifest files. The first argument should be
58# the toplevel GCC source directory. The second argument is the
59# target triple used during the build.
60_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
61
62def Error(msg):
63 print >>sys.stderr, '\nerror: %s' % msg
64 sys.exit(1)
65
66
67class TestResult(object):
68 """Describes a single DejaGNU test result as emitted in .sum files.
69
70 We are only interested in representing unsuccessful tests. So, only
71 a subset of all the tests are loaded.
72
73 The summary line used to build the test result should have this format:
74
75 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77 optional state name description
78 attributes
79
80 Attributes:
81 attrlist: A comma separated list of attributes.
82 Valid values:
83 flaky Indicates that this test may not always fail. These
84 tests are reported, but their presence does not affect
85 the results.
86
87 expire=YYYYMMDD After this date, this test will produce an error
88 whether it is in the manifest or not.
89
90 state: One of UNRESOLVED, XPASS or FAIL.
91 name: File name for the test.
92 description: String describing the test (flags used, dejagnu message, etc)
93 """
94
95 def __init__(self, summary_line):
96 try:
97 self.attrs = ''
98 if '|' in summary_line:
99 (self.attrs, summary_line) = summary_line.split('|', 1)
6d0c6e19 100 try:
101 (self.state,
102 self.name,
103 self.description) = re.match(r' *([A-Z]+): (\S+)\s(.*)',
104 summary_line).groups()
105 except:
106 print 'Failed to parse summary line: "%s"' % summary_line
107 raise
5eb8d002 108 self.attrs = self.attrs.strip()
109 self.state = self.state.strip()
110 self.description = self.description.strip()
111 except ValueError:
112 Error('Cannot parse summary line "%s"' % summary_line)
113
114 if self.state not in _VALID_TEST_RESULTS:
115 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
116 self.state, summary_line, self))
117
118 def __lt__(self, other):
119 return self.name < other.name
120
121 def __hash__(self):
122 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
123
124 def __eq__(self, other):
125 return (self.state == other.state and
126 self.name == other.name and
127 self.description == other.description)
128
129 def __ne__(self, other):
130 return not (self == other)
131
132 def __str__(self):
133 attrs = ''
134 if self.attrs:
135 attrs = '%s | ' % self.attrs
136 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
137
138
139def GetMakefileValue(makefile_name, value_name):
140 if os.path.exists(makefile_name):
141 with open(makefile_name) as makefile:
142 for line in makefile:
143 if line.startswith(value_name):
144 (_, value) = line.split('=', 1)
145 value = value.strip()
146 return value
147 return None
148
149
150def ValidBuildDirectory(builddir, target):
151 if (not os.path.exists(builddir) or
152 not os.path.exists('%s/Makefile' % builddir) or
12c0b40c 153 (not os.path.exists('%s/build-%s' % (builddir, target)) and
154 not os.path.exists('%s/%s' % (builddir, target)))):
5eb8d002 155 return False
156 return True
157
158
159def IsInterestingResult(line):
160 """Return True if the given line is one of the summary lines we care about."""
161 line = line.strip()
162 if line.startswith('#'):
163 return False
164 if '|' in line:
165 (_, line) = line.split('|', 1)
166 line = line.strip()
167 for result in _VALID_TEST_RESULTS:
168 if line.startswith(result):
169 return True
170 return False
171
172
173def ParseSummary(sum_fname):
174 """Create a set of TestResult instances from the given summary file."""
175 result_set = set()
176 with open(sum_fname) as sum_file:
177 for line in sum_file:
178 if IsInterestingResult(line):
179 result_set.add(TestResult(line))
180 return result_set
181
182
183def GetManifest(manifest_name):
184 """Build a set of expected failures from the manifest file.
185
186 Each entry in the manifest file should have the format understood
187 by the TestResult constructor.
188
189 If no manifest file exists for this target, it returns an empty
190 set.
191 """
192 if os.path.exists(manifest_name):
193 return ParseSummary(manifest_name)
194 else:
195 return set()
196
197
198def GetSumFiles(builddir):
199 sum_files = []
200 for root, dirs, files in os.walk(builddir):
201 if '.svn' in dirs:
202 dirs.remove('.svn')
203 for fname in files:
204 if fname.endswith('.sum'):
205 sum_files.append(os.path.join(root, fname))
206 return sum_files
207
208
209def GetResults(builddir):
210 """Collect all the test results from .sum files under the given build
211 directory."""
212 sum_files = GetSumFiles(builddir)
213 build_results = set()
214 for sum_fname in sum_files:
215 print '\t%s' % sum_fname
216 build_results |= ParseSummary(sum_fname)
217 return build_results
218
219
220def CompareResults(manifest, actual):
221 """Compare sets of results and return two lists:
222 - List of results present in MANIFEST but missing from ACTUAL.
223 - List of results present in ACTUAL but missing from MANIFEST.
224 """
225 # Report all the actual results not present in the manifest.
226 actual_vs_manifest = set()
227 for actual_result in actual:
228 if actual_result not in manifest:
229 actual_vs_manifest.add(actual_result)
230
231 # Simlarly for all the tests in the manifest.
232 manifest_vs_actual = set()
233 for expected_result in manifest:
234 # Ignore tests marked flaky.
235 if 'flaky' in expected_result.attrs:
236 continue
237 if expected_result not in actual:
238 manifest_vs_actual.add(expected_result)
239
240 return actual_vs_manifest, manifest_vs_actual
241
242
243def GetBuildData(options):
244 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target=')
245 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
246 if not ValidBuildDirectory(options.build_dir, target):
247 Error('%s is not a valid GCC top level build directory.' %
248 options.build_dir)
249 print 'Source directory: %s' % srcdir
250 print 'Build target: %s' % target
251 return srcdir, target, True
252
253
254def PrintSummary(msg, summary):
255 print '\n\n%s' % msg
256 for result in sorted(summary):
257 print result
258
259
260def CheckExpectedResults(options):
261 (srcdir, target, valid_build) = GetBuildData(options)
262 if not valid_build:
263 return False
264
265 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
266 print 'Manifest: %s' % manifest_name
267 manifest = GetManifest(manifest_name)
268
269 print 'Getting actual results from build'
270 actual = GetResults(options.build_dir)
271
272 if options.verbosity >= 1:
273 PrintSummary('Tests expected to fail', manifest)
274 PrintSummary('\nActual test results', actual)
275
276 actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
277
278 tests_ok = True
279 if len(actual_vs_manifest) > 0:
280 PrintSummary('Build results not in the manifest', actual_vs_manifest)
281 tests_ok = False
282
283 if len(manifest_vs_actual) > 0:
284 PrintSummary('Manifest results not present in the build'
285 '\n\nNOTE: This is not a failure. It just means that the '
286 'manifest expected\nthese tests to fail, '
287 'but they worked in this configuration.\n',
288 manifest_vs_actual)
289
290 if tests_ok:
291 print '\nSUCCESS: No unexpected failures.'
292
293 return tests_ok
294
295
296def ProduceManifest(options):
297 (srcdir, target, valid_build) = GetBuildData(options)
298 if not valid_build:
299 return False
300
301 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
302 if os.path.exists(manifest_name) and not options.force:
303 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
304 manifest_name)
305
306 actual = GetResults(options.build_dir)
307 with open(manifest_name, 'w') as manifest_file:
308 for result in sorted(actual):
309 print result
310 manifest_file.write('%s\n' % result)
311
312 return True
313
314
315def Main(argv):
316 parser = optparse.OptionParser(usage=__doc__)
317 parser.add_option('--build_dir', action='store', type='string',
318 dest='build_dir', default='.',
319 help='Build directory to check (default = .)')
320 parser.add_option('--manifest', action='store_true', dest='manifest',
321 default=False, help='Produce the manifest for the current '
322 'build (default = False)')
323 parser.add_option('--force', action='store_true', dest='force',
324 default=False, help='When used with --manifest, it will '
325 'overwrite an existing manifest file (default = False)')
326 parser.add_option('--verbosity', action='store', dest='verbosity',
327 type='int', default=0, help='Verbosity level (default = 0)')
328 (options, _) = parser.parse_args(argv[1:])
329
330 if options.manifest:
331 retval = ProduceManifest(options)
332 else:
333 retval = CheckExpectedResults(options)
334
335 if retval:
336 return 0
337 else:
338 return 1
339
340if __name__ == '__main__':
341 retval = Main(sys.argv)
342 sys.exit(retval)