]> git.ipfire.org Git - thirdparty/gcc.git/blame - contrib/testsuite-management/validate_failures.py
[contrib] validate_failures.py: Support "$tool:" prefix in exp names
[thirdparty/gcc.git] / contrib / testsuite-management / validate_failures.py
CommitLineData
b58c12f3 1#!/usr/bin/env python3
a5baf3b8
DN
2
3# Script to compare testsuite failures against a list of known-to-fail
4# tests.
5
6# Contributed by Diego Novillo <dnovillo@google.com>
7#
83ffe9cd 8# Copyright (C) 2011-2023 Free Software Foundation, Inc.
a5baf3b8
DN
9#
10# This file is part of GCC.
11#
12# GCC is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License as published by
14# the Free Software Foundation; either version 3, or (at your option)
15# any later version.
16#
17# GCC is distributed in the hope that it will be useful,
18# but WITHOUT ANY WARRANTY; without even the implied warranty of
19# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20# GNU General Public License for more details.
21#
22# You should have received a copy of the GNU General Public License
23# along with GCC; see the file COPYING. If not, write to
24# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25# Boston, MA 02110-1301, USA.
26
27"""This script provides a coarser XFAILing mechanism that requires no
28detailed DejaGNU markings. This is useful in a variety of scenarios:
29
30- Development branches with many known failures waiting to be fixed.
31- Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
33
34The script must be executed from the toplevel build directory. When
35executed it will:
36
371- Determine the target built: TARGET
382- Determine the source directory: SRCDIR
393- Look for a failure manifest file in
f6fce951 40 <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
a5baf3b8
DN
414- Collect all the <tool>.sum files from the build tree.
425- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
456- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
d5651dcf
DE
47
48Manifest files contain expected DejaGNU results that are otherwise
49treated as failures.
50They may also contain additional text:
51
52# This is a comment. - self explanatory
53@include file - the file is a path relative to the includer
54@remove result text - result text is removed from the expected set
a5baf3b8
DN
55"""
56
c577382e 57import datetime
a5baf3b8
DN
58import optparse
59import os
60import re
61import sys
62
a5baf3b8 63_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
d4d776a3
MK
64# <STATE>: <NAME> <DESCRIPTION"
65_VALID_TEST_RESULTS_REX = re.compile('(%s):\s*(\S+)\s*(.*)'
66 % "|".join(_VALID_TEST_RESULTS))
a5baf3b8 67
c8558627
MK
68# Formats of .sum file sections
69_TOOL_LINE_FORMAT = '\t\t=== %s tests ===\n'
484a4864 70_EXP_LINE_FORMAT = '\nRunning %s:%s ...\n'
c8558627
MK
71_SUMMARY_LINE_FORMAT = '\n\t\t=== %s Summary ===\n'
72
73# ... and their compiled regexs.
74_TOOL_LINE_REX = re.compile('^\t\t=== (.*) tests ===\n')
484a4864
CL
75# Match .exp file name, optionally prefixed by a "tool:" name and a
76# path ending with "testsuite/"
77_EXP_LINE_REX = re.compile('^Running (?:.*:)?(.*) \.\.\.\n')
c8558627
MK
78_SUMMARY_LINE_REX = re.compile('^\t\t=== (.*) Summary ===\n')
79
f6fce951
DE
80# Subdirectory of srcdir in which to find the manifest file.
81_MANIFEST_SUBDIR = 'contrib/testsuite-management'
82
83# Pattern for naming manifest files.
84# The first argument should be the toplevel GCC(/GNU tool) source directory.
85# The second argument is the manifest subdir.
86# The third argument is the manifest target, which defaults to the target
87# triplet used during the build.
88_MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
a5baf3b8 89
bc5e01b1
DE
90# The options passed to the program.
91_OPTIONS = None
92
a5baf3b8 93def Error(msg):
b58c12f3 94 print('error: %s' % msg, file=sys.stderr)
a5baf3b8
DN
95 sys.exit(1)
96
97
98class TestResult(object):
99 """Describes a single DejaGNU test result as emitted in .sum files.
100
101 We are only interested in representing unsuccessful tests. So, only
102 a subset of all the tests are loaded.
103
104 The summary line used to build the test result should have this format:
105
106 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
107 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
108 optional state name description
109 attributes
110
111 Attributes:
112 attrlist: A comma separated list of attributes.
113 Valid values:
114 flaky Indicates that this test may not always fail. These
115 tests are reported, but their presence does not affect
116 the results.
117
118 expire=YYYYMMDD After this date, this test will produce an error
119 whether it is in the manifest or not.
120
121 state: One of UNRESOLVED, XPASS or FAIL.
122 name: File name for the test.
123 description: String describing the test (flags used, dejagnu message, etc)
b3891de4
DE
124 ordinal: Monotonically increasing integer.
125 It is used to keep results for one .exp file sorted
126 by the order the tests were run.
c8558627
MK
127 tool: Top-level testsuite name (aka "tool" in DejaGnu parlance) of the test.
128 exp: Name of .exp testsuite file.
a5baf3b8
DN
129 """
130
c8558627 131 def __init__(self, summary_line, ordinal, tool, exp):
a5baf3b8 132 try:
76ba1222 133 (self.attrs, summary_line) = SplitAttributesFromSummaryLine(summary_line)
9c23e8b8
DN
134 try:
135 (self.state,
136 self.name,
d4d776a3 137 self.description) = _VALID_TEST_RESULTS_REX.match(summary_line).groups()
9c23e8b8 138 except:
b58c12f3 139 print('Failed to parse summary line: "%s"' % summary_line)
9c23e8b8 140 raise
b3891de4 141 self.ordinal = ordinal
c8558627
MK
142 if tool == None or exp == None:
143 # .sum file seem to be broken. There was no "tool" and/or "exp"
144 # lines preceding this result.
145 raise
146 self.tool = tool
147 self.exp = exp
a5baf3b8
DN
148 except ValueError:
149 Error('Cannot parse summary line "%s"' % summary_line)
150
151 if self.state not in _VALID_TEST_RESULTS:
152 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
153 self.state, summary_line, self))
154
155 def __lt__(self, other):
c8558627
MK
156 if (self.tool != other.tool):
157 return self.tool < other.tool
158 if (self.exp != other.exp):
159 return self.exp < other.exp
160 if (self.name != other.name):
161 return self.name < other.name
162 return self.ordinal < other.ordinal
a5baf3b8
DN
163
164 def __hash__(self):
c8558627
MK
165 return (hash(self.state) ^ hash(self.tool) ^ hash(self.exp)
166 ^ hash(self.name) ^ hash(self.description))
167
168 # Note that we don't include "attrs" in this comparison. This means that
169 # result entries "FAIL: test" and "flaky | FAIL: test" are considered
170 # the same. Therefore the ResultSet will preserve only the first occurence.
171 # In practice this means that flaky entries should preceed expected fails
172 # entries.
a5baf3b8
DN
173 def __eq__(self, other):
174 return (self.state == other.state and
c8558627
MK
175 self.tool == other.tool and
176 self.exp == other.exp and
a5baf3b8
DN
177 self.name == other.name and
178 self.description == other.description)
179
180 def __ne__(self, other):
181 return not (self == other)
182
183 def __str__(self):
184 attrs = ''
185 if self.attrs:
186 attrs = '%s | ' % self.attrs
187 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
188
c577382e
DN
189 def ExpirationDate(self):
190 # Return a datetime.date object with the expiration date for this
a327112f 191 # test result. Return None, if no expiration has been set.
c577382e
DN
192 if re.search(r'expire=', self.attrs):
193 expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
194 if not expiration:
195 Error('Invalid expire= format in "%s". Must be of the form '
196 '"expire=YYYYMMDD"' % self)
197 return datetime.date(int(expiration.group(1)),
198 int(expiration.group(2)),
199 int(expiration.group(3)))
200 return None
201
202 def HasExpired(self):
203 # Return True if the expiration date of this result has passed.
204 expiration_date = self.ExpirationDate()
205 if expiration_date:
206 now = datetime.date.today()
207 return now > expiration_date
208
a5baf3b8 209
c8558627
MK
210class ResultSet(set):
211 """Describes a set of DejaGNU test results.
212 This set can be read in from .sum files or emitted as a manifest.
213
214 Attributes:
215 current_tool: Name of the current top-level DejaGnu testsuite.
216 current_exp: Name of the current .exp testsuite file.
217 """
218
219 def __init__(self):
220 super().__init__()
221 self.ResetToolExp()
222
223 def ResetToolExp(self):
224 self.current_tool = None
225 self.current_exp = None
226
227 def MakeTestResult(self, summary_line, ordinal=-1):
228 return TestResult(summary_line, ordinal,
229 self.current_tool, self.current_exp)
230
231 def Print(self, outfile=sys.stdout):
232 current_tool = None
233 current_exp = None
234
235 for result in sorted(self):
236 if current_tool != result.tool:
237 current_tool = result.tool
238 outfile.write(_TOOL_LINE_FORMAT % current_tool)
239 if current_exp != result.exp:
240 current_exp = result.exp
484a4864 241 outfile.write(_EXP_LINE_FORMAT % (current_tool, current_exp))
c8558627
MK
242 outfile.write('%s\n' % result)
243
244 outfile.write(_SUMMARY_LINE_FORMAT % 'Results')
245
246
a5baf3b8
DN
247def GetMakefileValue(makefile_name, value_name):
248 if os.path.exists(makefile_name):
b58c12f3 249 makefile = open(makefile_name, encoding='latin-1', mode='r')
55b073ba
DN
250 for line in makefile:
251 if line.startswith(value_name):
252 (_, value) = line.split('=', 1)
253 value = value.strip()
254 makefile.close()
255 return value
256 makefile.close()
a5baf3b8
DN
257 return None
258
259
89cbb85b 260def ValidBuildDirectory(builddir):
a5baf3b8 261 if (not os.path.exists(builddir) or
89cbb85b 262 not os.path.exists('%s/Makefile' % builddir)):
a5baf3b8
DN
263 return False
264 return True
265
266
d5651dcf
DE
267def IsComment(line):
268 """Return True if line is a comment."""
269 return line.startswith('#')
270
271
76ba1222
BM
272def SplitAttributesFromSummaryLine(line):
273 """Splits off attributes from a summary line, if present."""
274 if '|' in line and not _VALID_TEST_RESULTS_REX.match(line):
275 (attrs, line) = line.split('|', 1)
276 attrs = attrs.strip()
277 else:
278 attrs = ''
279 line = line.strip()
280 return (attrs, line)
281
282
a5baf3b8 283def IsInterestingResult(line):
d5651dcf 284 """Return True if line is one of the summary lines we care about."""
76ba1222 285 (_, line) = SplitAttributesFromSummaryLine(line)
143c83f1 286 return bool(_VALID_TEST_RESULTS_REX.match(line))
a5baf3b8
DN
287
288
c8558627
MK
289def IsToolLine(line):
290 """Return True if line mentions the tool (in DejaGnu terms) for the following tests."""
291 return bool(_TOOL_LINE_REX.match(line))
292
293
294def IsExpLine(line):
295 """Return True if line mentions the .exp file for the following tests."""
296 return bool(_EXP_LINE_REX.match(line))
297
298
299def IsSummaryLine(line):
300 """Return True if line starts .sum footer."""
301 return bool(_SUMMARY_LINE_REX.match(line))
302
303
d5651dcf
DE
304def IsInclude(line):
305 """Return True if line is an include of another file."""
306 return line.startswith("@include ")
307
308
309def GetIncludeFile(line, includer):
310 """Extract the name of the include file from line."""
311 includer_dir = os.path.dirname(includer)
312 include_file = line[len("@include "):]
313 return os.path.join(includer_dir, include_file.strip())
314
315
316def IsNegativeResult(line):
317 """Return True if line should be removed from the expected results."""
318 return line.startswith("@remove ")
319
320
321def GetNegativeResult(line):
322 """Extract the name of the negative result from line."""
323 line = line[len("@remove "):]
324 return line.strip()
325
326
327def ParseManifestWorker(result_set, manifest_path):
328 """Read manifest_path, adding the contents to result_set."""
e341d15b 329 if _OPTIONS.verbosity >= 5:
b58c12f3
BRF
330 print('Parsing manifest file %s.' % manifest_path)
331 manifest_file = open(manifest_path, encoding='latin-1', mode='r')
c8558627
MK
332 for orig_line in manifest_file:
333 line = orig_line.strip()
d5651dcf
DE
334 if line == "":
335 pass
336 elif IsComment(line):
337 pass
338 elif IsNegativeResult(line):
c8558627 339 result_set.remove(result_set.MakeTestResult(GetNegativeResult(line)))
d5651dcf
DE
340 elif IsInclude(line):
341 ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path))
342 elif IsInterestingResult(line):
b713de1c
MK
343 result = result_set.MakeTestResult(line)
344 if result.HasExpired():
345 # Ignore expired manifest entries.
346 if _OPTIONS.verbosity >= 4:
347 print('WARNING: Expected failure "%s" has expired.' % line.strip())
348 continue
349 result_set.add(result)
c8558627
MK
350 elif IsExpLine(orig_line):
351 result_set.current_exp = _EXP_LINE_REX.match(orig_line).groups()[0]
352 elif IsToolLine(orig_line):
353 result_set.current_tool = _TOOL_LINE_REX.match(orig_line).groups()[0]
354 elif IsSummaryLine(orig_line):
355 result_set.ResetToolExp()
d5651dcf
DE
356 else:
357 Error('Unrecognized line in manifest file: %s' % line)
358 manifest_file.close()
359
360
361def ParseManifest(manifest_path):
362 """Create a set of TestResult instances from the given manifest file."""
c8558627 363 result_set = ResultSet()
d5651dcf
DE
364 ParseManifestWorker(result_set, manifest_path)
365 return result_set
366
367
a5baf3b8
DN
368def ParseSummary(sum_fname):
369 """Create a set of TestResult instances from the given summary file."""
c8558627 370 result_set = ResultSet()
b3891de4
DE
371 # ordinal is used when sorting the results so that tests within each
372 # .exp file are kept sorted.
373 ordinal=0
b58c12f3 374 sum_file = open(sum_fname, encoding='latin-1', mode='r')
55b073ba
DN
375 for line in sum_file:
376 if IsInterestingResult(line):
c8558627 377 result = result_set.MakeTestResult(line, ordinal)
b3891de4 378 ordinal += 1
c577382e 379 if result.HasExpired():
b713de1c
MK
380 # ??? What is the use-case for this? How "expiry" annotations are
381 # ??? supposed to be added to .sum results?
6baa7225
DN
382 # Tests that have expired are not added to the set of expected
383 # results. If they are still present in the set of actual results,
384 # they will cause an error to be reported.
e341d15b
MK
385 if _OPTIONS.verbosity >= 4:
386 print('WARNING: Expected failure "%s" has expired.' % line.strip())
c577382e
DN
387 continue
388 result_set.add(result)
c8558627
MK
389 elif IsExpLine(line):
390 result_set.current_exp = _EXP_LINE_REX.match(line).groups()[0]
391 elif IsToolLine(line):
392 result_set.current_tool = _TOOL_LINE_REX.match(line).groups()[0]
393 result_set.current_exp = None
394 elif IsSummaryLine(line):
395 result_set.ResetToolExp()
55b073ba 396 sum_file.close()
a5baf3b8
DN
397 return result_set
398
399
cd1d95bd 400def GetManifest(manifest_path):
a5baf3b8
DN
401 """Build a set of expected failures from the manifest file.
402
403 Each entry in the manifest file should have the format understood
404 by the TestResult constructor.
405
1099bb0a 406 If no manifest file exists for this target, it returns an empty set.
a5baf3b8 407 """
cd1d95bd 408 if os.path.exists(manifest_path):
d5651dcf 409 return ParseManifest(manifest_path)
a5baf3b8 410 else:
c8558627 411 return ResultSet()
a5baf3b8
DN
412
413
29476fe1 414def CollectSumFiles(builddir):
a5baf3b8
DN
415 sum_files = []
416 for root, dirs, files in os.walk(builddir):
7fb1e592
BRF
417 for ignored in ('.svn', '.git'):
418 if ignored in dirs:
419 dirs.remove(ignored)
a5baf3b8
DN
420 for fname in files:
421 if fname.endswith('.sum'):
422 sum_files.append(os.path.join(root, fname))
423 return sum_files
424
425
5f8cc7f0 426def GetResults(sum_files, build_results = None):
831315d0 427 """Collect all the test results from the given .sum files."""
5f8cc7f0
MK
428 if build_results == None:
429 build_results = ResultSet()
a5baf3b8 430 for sum_fname in sum_files:
e341d15b
MK
431 if _OPTIONS.verbosity >= 3:
432 print('\t%s' % sum_fname)
a5baf3b8
DN
433 build_results |= ParseSummary(sum_fname)
434 return build_results
435
436
437def CompareResults(manifest, actual):
438 """Compare sets of results and return two lists:
a5baf3b8 439 - List of results present in ACTUAL but missing from MANIFEST.
c577382e 440 - List of results present in MANIFEST but missing from ACTUAL.
a5baf3b8 441 """
c577382e
DN
442 # Collect all the actual results not present in the manifest.
443 # Results in this set will be reported as errors.
c8558627 444 actual_vs_manifest = ResultSet()
a5baf3b8
DN
445 for actual_result in actual:
446 if actual_result not in manifest:
447 actual_vs_manifest.add(actual_result)
448
c577382e
DN
449 # Collect all the tests in the manifest that were not found
450 # in the actual results.
451 # Results in this set will be reported as warnings (since
452 # they are expected failures that are not failing anymore).
c8558627 453 manifest_vs_actual = ResultSet()
a5baf3b8
DN
454 for expected_result in manifest:
455 # Ignore tests marked flaky.
456 if 'flaky' in expected_result.attrs:
457 continue
458 if expected_result not in actual:
459 manifest_vs_actual.add(expected_result)
460
461 return actual_vs_manifest, manifest_vs_actual
462
463
febe56cb 464def GetManifestPath(user_provided_must_exist):
f6fce951
DE
465 """Return the full path to the manifest file."""
466 manifest_path = _OPTIONS.manifest
467 if manifest_path:
468 if user_provided_must_exist and not os.path.exists(manifest_path):
469 Error('Manifest does not exist: %s' % manifest_path)
470 return manifest_path
471 else:
febe56cb 472 (srcdir, target) = GetBuildData()
89cbb85b 473 if not srcdir:
18d5a76d 474 Error('Could not determine the location of GCC\'s source tree. '
89cbb85b
DN
475 'The Makefile does not contain a definition for "srcdir".')
476 if not target:
477 Error('Could not determine the target triplet for this build. '
478 'The Makefile does not contain a definition for "target_alias".')
f6fce951
DE
479 return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target)
480
481
bc5e01b1 482def GetBuildData():
89cbb85b 483 if not ValidBuildDirectory(_OPTIONS.build_dir):
828e50c5
DN
484 # If we have been given a set of results to use, we may
485 # not be inside a valid GCC build directory. In that case,
486 # the user must provide both a manifest file and a set
487 # of results to check against it.
488 if not _OPTIONS.results or not _OPTIONS.manifest:
489 Error('%s is not a valid GCC top level build directory. '
490 'You must use --manifest and --results to do the validation.' %
491 _OPTIONS.build_dir)
492 else:
493 return None, None
89cbb85b
DN
494 srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
495 target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
e341d15b
MK
496 if _OPTIONS.verbosity >= 3:
497 print('Source directory: %s' % srcdir)
498 print('Build target: %s' % target)
74df1ad0 499 return srcdir, target
a5baf3b8
DN
500
501
e341d15b 502def PrintSummary(summary):
c8558627 503 summary.Print()
a5baf3b8 504
29476fe1
DN
505def GetSumFiles(results, build_dir):
506 if not results:
e341d15b
MK
507 if _OPTIONS.verbosity >= 3:
508 print('Getting actual results from build directory %s' % build_dir)
29476fe1
DN
509 sum_files = CollectSumFiles(build_dir)
510 else:
e341d15b
MK
511 if _OPTIONS.verbosity >= 3:
512 print('Getting actual results from user-provided results')
29476fe1
DN
513 sum_files = results.split()
514 return sum_files
515
516
e341d15b 517def PerformComparison(expected, actual):
b436bf38
DN
518 actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
519
520 tests_ok = True
521 if len(actual_vs_expected) > 0:
e341d15b
MK
522 if _OPTIONS.verbosity >= 3:
523 print('\n\nUnexpected results in this build (new failures)')
524 if _OPTIONS.verbosity >= 1:
525 PrintSummary(actual_vs_expected)
b436bf38
DN
526 tests_ok = False
527
e341d15b
MK
528 if _OPTIONS.verbosity >= 2 and len(expected_vs_actual) > 0:
529 print('\n\nExpected results not present in this build (fixed tests)'
530 '\n\nNOTE: This is not a failure. It just means that these '
531 'tests were expected\nto fail, but either they worked in '
532 'this configuration or they were not\npresent at all.\n')
533 PrintSummary(expected_vs_actual)
b436bf38 534
e341d15b 535 if tests_ok and _OPTIONS.verbosity >= 3:
b58c12f3 536 print('\nSUCCESS: No unexpected failures.')
b436bf38
DN
537
538 return tests_ok
539
540
bc5e01b1 541def CheckExpectedResults():
febe56cb 542 manifest_path = GetManifestPath(True)
e341d15b
MK
543 if _OPTIONS.verbosity >= 3:
544 print('Manifest: %s' % manifest_path)
cd1d95bd 545 manifest = GetManifest(manifest_path)
bc5e01b1 546 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
831315d0 547 actual = GetResults(sum_files)
a5baf3b8 548
e341d15b
MK
549 if _OPTIONS.verbosity >= 5:
550 print('\n\nTests expected to fail')
551 PrintSummary(manifest)
552 print('\n\nActual test results')
553 PrintSummary(actual)
a5baf3b8 554
e341d15b 555 return PerformComparison(manifest, actual)
a5baf3b8
DN
556
557
bc5e01b1 558def ProduceManifest():
febe56cb 559 manifest_path = GetManifestPath(False)
e341d15b
MK
560 if _OPTIONS.verbosity >= 3:
561 print('Manifest: %s' % manifest_path)
bc5e01b1 562 if os.path.exists(manifest_path) and not _OPTIONS.force:
a5baf3b8 563 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
cd1d95bd 564 manifest_path)
a5baf3b8 565
bc5e01b1 566 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
29476fe1 567 actual = GetResults(sum_files)
b58c12f3 568 manifest_file = open(manifest_path, encoding='latin-1', mode='w')
c8558627
MK
569 actual.Print(manifest_file)
570 actual.Print()
55b073ba 571 manifest_file.close()
a5baf3b8
DN
572
573 return True
574
575
bc5e01b1 576def CompareBuilds():
bc5e01b1 577 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
b436bf38
DN
578 actual = GetResults(sum_files)
579
5f8cc7f0
MK
580 clean = ResultSet()
581
582 if _OPTIONS.manifest:
febe56cb 583 manifest_path = GetManifestPath(True)
e341d15b
MK
584 if _OPTIONS.verbosity >= 3:
585 print('Manifest: %s' % manifest_path)
5f8cc7f0
MK
586 clean = GetManifest(manifest_path)
587
bc5e01b1 588 clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build)
5f8cc7f0 589 clean = GetResults(clean_sum_files, clean)
b436bf38 590
e341d15b 591 return PerformComparison(clean, actual)
b436bf38
DN
592
593
a5baf3b8
DN
594def Main(argv):
595 parser = optparse.OptionParser(usage=__doc__)
831315d0
DN
596
597 # Keep the following list sorted by option name.
a5baf3b8
DN
598 parser.add_option('--build_dir', action='store', type='string',
599 dest='build_dir', default='.',
600 help='Build directory to check (default = .)')
b436bf38
DN
601 parser.add_option('--clean_build', action='store', type='string',
602 dest='clean_build', default=None,
603 help='Compare test results from this build against '
604 'those of another (clean) build. Use this option '
605 'when comparing the test results of your patch versus '
606 'the test results of a clean build without your patch. '
607 'You must provide the path to the top directory of your '
608 'clean build.')
a5baf3b8 609 parser.add_option('--force', action='store_true', dest='force',
831315d0
DN
610 default=False, help='When used with --produce_manifest, '
611 'it will overwrite an existing manifest file '
612 '(default = False)')
831315d0
DN
613 parser.add_option('--manifest', action='store', type='string',
614 dest='manifest', default=None,
615 help='Name of the manifest file to use (default = '
828e50c5
DN
616 'taken from '
617 'contrib/testsuite-managment/<target_alias>.xfail)')
831315d0
DN
618 parser.add_option('--produce_manifest', action='store_true',
619 dest='produce_manifest', default=False,
620 help='Produce the manifest for the current '
621 'build (default = False)')
622 parser.add_option('--results', action='store', type='string',
623 dest='results', default=None, help='Space-separated list '
624 'of .sum files with the testing results to check. The '
625 'only content needed from these files are the lines '
626 'starting with FAIL, XPASS or UNRESOLVED (default = '
627 '.sum files collected from the build directory).')
a5baf3b8 628 parser.add_option('--verbosity', action='store', dest='verbosity',
e341d15b
MK
629 type='int', default=3, help='Verbosity level '
630 '(default = 3). Level 0: only error output, this is '
631 'useful in scripting when only the exit code is used. '
632 'Level 1: output unexpected failures. '
633 'Level 2: output unexpected passes. '
634 'Level 3: output helpful information. '
635 'Level 4: output notification on expired entries. '
636 'Level 5: output debug information.')
bc5e01b1
DE
637 global _OPTIONS
638 (_OPTIONS, _) = parser.parse_args(argv[1:])
a5baf3b8 639
bc5e01b1
DE
640 if _OPTIONS.produce_manifest:
641 retval = ProduceManifest()
642 elif _OPTIONS.clean_build:
643 retval = CompareBuilds()
a5baf3b8 644 else:
bc5e01b1 645 retval = CheckExpectedResults()
a5baf3b8
DN
646
647 if retval:
648 return 0
649 else:
5d52f355 650 return 2
a5baf3b8 651
1099bb0a 652
a5baf3b8
DN
653if __name__ == '__main__':
654 retval = Main(sys.argv)
655 sys.exit(retval)