]>
Commit | Line | Data |
---|---|---|
b58c12f3 | 1 | #!/usr/bin/env python3 |
a5baf3b8 DN |
2 | |
3 | # Script to compare testsuite failures against a list of known-to-fail | |
4 | # tests. | |
5 | ||
6 | # Contributed by Diego Novillo <dnovillo@google.com> | |
7 | # | |
828e50c5 | 8 | # Copyright (C) 2011-2013 Free Software Foundation, Inc. |
a5baf3b8 DN |
9 | # |
10 | # This file is part of GCC. | |
11 | # | |
12 | # GCC is free software; you can redistribute it and/or modify | |
13 | # it under the terms of the GNU General Public License as published by | |
14 | # the Free Software Foundation; either version 3, or (at your option) | |
15 | # any later version. | |
16 | # | |
17 | # GCC is distributed in the hope that it will be useful, | |
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | # GNU General Public License for more details. | |
21 | # | |
22 | # You should have received a copy of the GNU General Public License | |
23 | # along with GCC; see the file COPYING. If not, write to | |
24 | # the Free Software Foundation, 51 Franklin Street, Fifth Floor, | |
25 | # Boston, MA 02110-1301, USA. | |
26 | ||
27 | """This script provides a coarser XFAILing mechanism that requires no | |
28 | detailed DejaGNU markings. This is useful in a variety of scenarios: | |
29 | ||
30 | - Development branches with many known failures waiting to be fixed. | |
31 | - Release branches with known failures that are not considered | |
32 | important for the particular release criteria used in that branch. | |
33 | ||
34 | The script must be executed from the toplevel build directory. When | |
35 | executed it will: | |
36 | ||
37 | 1- Determine the target built: TARGET | |
38 | 2- Determine the source directory: SRCDIR | |
39 | 3- Look for a failure manifest file in | |
f6fce951 | 40 | <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail |
a5baf3b8 DN |
41 | 4- Collect all the <tool>.sum files from the build tree. |
42 | 5- Produce a report stating: | |
43 | a- Failures expected in the manifest but not present in the build. | |
44 | b- Failures in the build not expected in the manifest. | |
45 | 6- If all the build failures are expected in the manifest, it exits | |
46 | with exit code 0. Otherwise, it exits with error code 1. | |
d5651dcf DE |
47 | |
48 | Manifest files contain expected DejaGNU results that are otherwise | |
49 | treated as failures. | |
50 | They may also contain additional text: | |
51 | ||
52 | # This is a comment. - self explanatory | |
53 | @include file - the file is a path relative to the includer | |
54 | @remove result text - result text is removed from the expected set | |
a5baf3b8 DN |
55 | """ |
56 | ||
c577382e | 57 | import datetime |
a5baf3b8 DN |
58 | import optparse |
59 | import os | |
60 | import re | |
61 | import sys | |
62 | ||
63 | # Handled test results. | |
64 | _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ] | |
143c83f1 | 65 | _VALID_TEST_RESULTS_REX = re.compile("%s" % "|".join(_VALID_TEST_RESULTS)) |
a5baf3b8 | 66 | |
f6fce951 DE |
67 | # Subdirectory of srcdir in which to find the manifest file. |
68 | _MANIFEST_SUBDIR = 'contrib/testsuite-management' | |
69 | ||
70 | # Pattern for naming manifest files. | |
71 | # The first argument should be the toplevel GCC(/GNU tool) source directory. | |
72 | # The second argument is the manifest subdir. | |
73 | # The third argument is the manifest target, which defaults to the target | |
74 | # triplet used during the build. | |
75 | _MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail' | |
a5baf3b8 | 76 | |
bc5e01b1 DE |
77 | # The options passed to the program. |
78 | _OPTIONS = None | |
79 | ||
a5baf3b8 | 80 | def Error(msg): |
b58c12f3 | 81 | print('error: %s' % msg, file=sys.stderr) |
a5baf3b8 DN |
82 | sys.exit(1) |
83 | ||
84 | ||
85 | class TestResult(object): | |
86 | """Describes a single DejaGNU test result as emitted in .sum files. | |
87 | ||
88 | We are only interested in representing unsuccessful tests. So, only | |
89 | a subset of all the tests are loaded. | |
90 | ||
91 | The summary line used to build the test result should have this format: | |
92 | ||
93 | attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors) | |
94 | ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^ | |
95 | optional state name description | |
96 | attributes | |
97 | ||
98 | Attributes: | |
99 | attrlist: A comma separated list of attributes. | |
100 | Valid values: | |
101 | flaky Indicates that this test may not always fail. These | |
102 | tests are reported, but their presence does not affect | |
103 | the results. | |
104 | ||
105 | expire=YYYYMMDD After this date, this test will produce an error | |
106 | whether it is in the manifest or not. | |
107 | ||
108 | state: One of UNRESOLVED, XPASS or FAIL. | |
109 | name: File name for the test. | |
110 | description: String describing the test (flags used, dejagnu message, etc) | |
b3891de4 DE |
111 | ordinal: Monotonically increasing integer. |
112 | It is used to keep results for one .exp file sorted | |
113 | by the order the tests were run. | |
a5baf3b8 DN |
114 | """ |
115 | ||
b3891de4 | 116 | def __init__(self, summary_line, ordinal=-1): |
a5baf3b8 | 117 | try: |
76ba1222 | 118 | (self.attrs, summary_line) = SplitAttributesFromSummaryLine(summary_line) |
9c23e8b8 DN |
119 | try: |
120 | (self.state, | |
121 | self.name, | |
76ba1222 | 122 | self.description) = re.match(r'([A-Z]+):\s*(\S+)\s*(.*)', |
9c23e8b8 DN |
123 | summary_line).groups() |
124 | except: | |
b58c12f3 | 125 | print('Failed to parse summary line: "%s"' % summary_line) |
9c23e8b8 | 126 | raise |
b3891de4 | 127 | self.ordinal = ordinal |
a5baf3b8 DN |
128 | except ValueError: |
129 | Error('Cannot parse summary line "%s"' % summary_line) | |
130 | ||
131 | if self.state not in _VALID_TEST_RESULTS: | |
132 | Error('Invalid test result %s in "%s" (parsed as "%s")' % ( | |
133 | self.state, summary_line, self)) | |
134 | ||
135 | def __lt__(self, other): | |
b3891de4 DE |
136 | return (self.name < other.name or |
137 | (self.name == other.name and self.ordinal < other.ordinal)) | |
a5baf3b8 DN |
138 | |
139 | def __hash__(self): | |
140 | return hash(self.state) ^ hash(self.name) ^ hash(self.description) | |
141 | ||
142 | def __eq__(self, other): | |
143 | return (self.state == other.state and | |
144 | self.name == other.name and | |
145 | self.description == other.description) | |
146 | ||
147 | def __ne__(self, other): | |
148 | return not (self == other) | |
149 | ||
150 | def __str__(self): | |
151 | attrs = '' | |
152 | if self.attrs: | |
153 | attrs = '%s | ' % self.attrs | |
154 | return '%s%s: %s %s' % (attrs, self.state, self.name, self.description) | |
155 | ||
c577382e DN |
156 | def ExpirationDate(self): |
157 | # Return a datetime.date object with the expiration date for this | |
a327112f | 158 | # test result. Return None, if no expiration has been set. |
c577382e DN |
159 | if re.search(r'expire=', self.attrs): |
160 | expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs) | |
161 | if not expiration: | |
162 | Error('Invalid expire= format in "%s". Must be of the form ' | |
163 | '"expire=YYYYMMDD"' % self) | |
164 | return datetime.date(int(expiration.group(1)), | |
165 | int(expiration.group(2)), | |
166 | int(expiration.group(3))) | |
167 | return None | |
168 | ||
169 | def HasExpired(self): | |
170 | # Return True if the expiration date of this result has passed. | |
171 | expiration_date = self.ExpirationDate() | |
172 | if expiration_date: | |
173 | now = datetime.date.today() | |
174 | return now > expiration_date | |
175 | ||
a5baf3b8 DN |
176 | |
177 | def GetMakefileValue(makefile_name, value_name): | |
178 | if os.path.exists(makefile_name): | |
b58c12f3 | 179 | makefile = open(makefile_name, encoding='latin-1', mode='r') |
55b073ba DN |
180 | for line in makefile: |
181 | if line.startswith(value_name): | |
182 | (_, value) = line.split('=', 1) | |
183 | value = value.strip() | |
184 | makefile.close() | |
185 | return value | |
186 | makefile.close() | |
a5baf3b8 DN |
187 | return None |
188 | ||
189 | ||
89cbb85b | 190 | def ValidBuildDirectory(builddir): |
a5baf3b8 | 191 | if (not os.path.exists(builddir) or |
89cbb85b | 192 | not os.path.exists('%s/Makefile' % builddir)): |
a5baf3b8 DN |
193 | return False |
194 | return True | |
195 | ||
196 | ||
d5651dcf DE |
197 | def IsComment(line): |
198 | """Return True if line is a comment.""" | |
199 | return line.startswith('#') | |
200 | ||
201 | ||
76ba1222 BM |
202 | def SplitAttributesFromSummaryLine(line): |
203 | """Splits off attributes from a summary line, if present.""" | |
204 | if '|' in line and not _VALID_TEST_RESULTS_REX.match(line): | |
205 | (attrs, line) = line.split('|', 1) | |
206 | attrs = attrs.strip() | |
207 | else: | |
208 | attrs = '' | |
209 | line = line.strip() | |
210 | return (attrs, line) | |
211 | ||
212 | ||
a5baf3b8 | 213 | def IsInterestingResult(line): |
d5651dcf | 214 | """Return True if line is one of the summary lines we care about.""" |
76ba1222 | 215 | (_, line) = SplitAttributesFromSummaryLine(line) |
143c83f1 | 216 | return bool(_VALID_TEST_RESULTS_REX.match(line)) |
a5baf3b8 DN |
217 | |
218 | ||
d5651dcf DE |
219 | def IsInclude(line): |
220 | """Return True if line is an include of another file.""" | |
221 | return line.startswith("@include ") | |
222 | ||
223 | ||
224 | def GetIncludeFile(line, includer): | |
225 | """Extract the name of the include file from line.""" | |
226 | includer_dir = os.path.dirname(includer) | |
227 | include_file = line[len("@include "):] | |
228 | return os.path.join(includer_dir, include_file.strip()) | |
229 | ||
230 | ||
231 | def IsNegativeResult(line): | |
232 | """Return True if line should be removed from the expected results.""" | |
233 | return line.startswith("@remove ") | |
234 | ||
235 | ||
236 | def GetNegativeResult(line): | |
237 | """Extract the name of the negative result from line.""" | |
238 | line = line[len("@remove "):] | |
239 | return line.strip() | |
240 | ||
241 | ||
242 | def ParseManifestWorker(result_set, manifest_path): | |
243 | """Read manifest_path, adding the contents to result_set.""" | |
f15b9727 | 244 | if _OPTIONS.verbosity >= 1: |
b58c12f3 BRF |
245 | print('Parsing manifest file %s.' % manifest_path) |
246 | manifest_file = open(manifest_path, encoding='latin-1', mode='r') | |
d5651dcf DE |
247 | for line in manifest_file: |
248 | line = line.strip() | |
249 | if line == "": | |
250 | pass | |
251 | elif IsComment(line): | |
252 | pass | |
253 | elif IsNegativeResult(line): | |
254 | result_set.remove(TestResult(GetNegativeResult(line))) | |
255 | elif IsInclude(line): | |
256 | ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path)) | |
257 | elif IsInterestingResult(line): | |
258 | result_set.add(TestResult(line)) | |
259 | else: | |
260 | Error('Unrecognized line in manifest file: %s' % line) | |
261 | manifest_file.close() | |
262 | ||
263 | ||
264 | def ParseManifest(manifest_path): | |
265 | """Create a set of TestResult instances from the given manifest file.""" | |
266 | result_set = set() | |
267 | ParseManifestWorker(result_set, manifest_path) | |
268 | return result_set | |
269 | ||
270 | ||
a5baf3b8 DN |
271 | def ParseSummary(sum_fname): |
272 | """Create a set of TestResult instances from the given summary file.""" | |
273 | result_set = set() | |
b3891de4 DE |
274 | # ordinal is used when sorting the results so that tests within each |
275 | # .exp file are kept sorted. | |
276 | ordinal=0 | |
b58c12f3 | 277 | sum_file = open(sum_fname, encoding='latin-1', mode='r') |
55b073ba DN |
278 | for line in sum_file: |
279 | if IsInterestingResult(line): | |
b3891de4 DE |
280 | result = TestResult(line, ordinal) |
281 | ordinal += 1 | |
c577382e | 282 | if result.HasExpired(): |
6baa7225 DN |
283 | # Tests that have expired are not added to the set of expected |
284 | # results. If they are still present in the set of actual results, | |
285 | # they will cause an error to be reported. | |
b58c12f3 | 286 | print('WARNING: Expected failure "%s" has expired.' % line.strip()) |
c577382e DN |
287 | continue |
288 | result_set.add(result) | |
55b073ba | 289 | sum_file.close() |
a5baf3b8 DN |
290 | return result_set |
291 | ||
292 | ||
cd1d95bd | 293 | def GetManifest(manifest_path): |
a5baf3b8 DN |
294 | """Build a set of expected failures from the manifest file. |
295 | ||
296 | Each entry in the manifest file should have the format understood | |
297 | by the TestResult constructor. | |
298 | ||
1099bb0a | 299 | If no manifest file exists for this target, it returns an empty set. |
a5baf3b8 | 300 | """ |
cd1d95bd | 301 | if os.path.exists(manifest_path): |
d5651dcf | 302 | return ParseManifest(manifest_path) |
a5baf3b8 DN |
303 | else: |
304 | return set() | |
305 | ||
306 | ||
29476fe1 | 307 | def CollectSumFiles(builddir): |
a5baf3b8 DN |
308 | sum_files = [] |
309 | for root, dirs, files in os.walk(builddir): | |
7fb1e592 BRF |
310 | for ignored in ('.svn', '.git'): |
311 | if ignored in dirs: | |
312 | dirs.remove(ignored) | |
a5baf3b8 DN |
313 | for fname in files: |
314 | if fname.endswith('.sum'): | |
315 | sum_files.append(os.path.join(root, fname)) | |
316 | return sum_files | |
317 | ||
318 | ||
831315d0 DN |
319 | def GetResults(sum_files): |
320 | """Collect all the test results from the given .sum files.""" | |
a5baf3b8 DN |
321 | build_results = set() |
322 | for sum_fname in sum_files: | |
b58c12f3 | 323 | print('\t%s' % sum_fname) |
a5baf3b8 DN |
324 | build_results |= ParseSummary(sum_fname) |
325 | return build_results | |
326 | ||
327 | ||
328 | def CompareResults(manifest, actual): | |
329 | """Compare sets of results and return two lists: | |
a5baf3b8 | 330 | - List of results present in ACTUAL but missing from MANIFEST. |
c577382e | 331 | - List of results present in MANIFEST but missing from ACTUAL. |
a5baf3b8 | 332 | """ |
c577382e DN |
333 | # Collect all the actual results not present in the manifest. |
334 | # Results in this set will be reported as errors. | |
a5baf3b8 DN |
335 | actual_vs_manifest = set() |
336 | for actual_result in actual: | |
337 | if actual_result not in manifest: | |
338 | actual_vs_manifest.add(actual_result) | |
339 | ||
c577382e DN |
340 | # Collect all the tests in the manifest that were not found |
341 | # in the actual results. | |
342 | # Results in this set will be reported as warnings (since | |
343 | # they are expected failures that are not failing anymore). | |
a5baf3b8 DN |
344 | manifest_vs_actual = set() |
345 | for expected_result in manifest: | |
346 | # Ignore tests marked flaky. | |
347 | if 'flaky' in expected_result.attrs: | |
348 | continue | |
349 | if expected_result not in actual: | |
350 | manifest_vs_actual.add(expected_result) | |
351 | ||
352 | return actual_vs_manifest, manifest_vs_actual | |
353 | ||
354 | ||
f6fce951 DE |
355 | def GetManifestPath(srcdir, target, user_provided_must_exist): |
356 | """Return the full path to the manifest file.""" | |
357 | manifest_path = _OPTIONS.manifest | |
358 | if manifest_path: | |
359 | if user_provided_must_exist and not os.path.exists(manifest_path): | |
360 | Error('Manifest does not exist: %s' % manifest_path) | |
361 | return manifest_path | |
362 | else: | |
89cbb85b | 363 | if not srcdir: |
18d5a76d | 364 | Error('Could not determine the location of GCC\'s source tree. ' |
89cbb85b DN |
365 | 'The Makefile does not contain a definition for "srcdir".') |
366 | if not target: | |
367 | Error('Could not determine the target triplet for this build. ' | |
368 | 'The Makefile does not contain a definition for "target_alias".') | |
f6fce951 DE |
369 | return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target) |
370 | ||
371 | ||
bc5e01b1 | 372 | def GetBuildData(): |
89cbb85b | 373 | if not ValidBuildDirectory(_OPTIONS.build_dir): |
828e50c5 DN |
374 | # If we have been given a set of results to use, we may |
375 | # not be inside a valid GCC build directory. In that case, | |
376 | # the user must provide both a manifest file and a set | |
377 | # of results to check against it. | |
378 | if not _OPTIONS.results or not _OPTIONS.manifest: | |
379 | Error('%s is not a valid GCC top level build directory. ' | |
380 | 'You must use --manifest and --results to do the validation.' % | |
381 | _OPTIONS.build_dir) | |
382 | else: | |
383 | return None, None | |
89cbb85b DN |
384 | srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =') |
385 | target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=') | |
b58c12f3 BRF |
386 | print('Source directory: %s' % srcdir) |
387 | print('Build target: %s' % target) | |
74df1ad0 | 388 | return srcdir, target |
a5baf3b8 DN |
389 | |
390 | ||
391 | def PrintSummary(msg, summary): | |
b58c12f3 | 392 | print('\n\n%s' % msg) |
a5baf3b8 | 393 | for result in sorted(summary): |
b58c12f3 | 394 | print(result) |
a5baf3b8 DN |
395 | |
396 | ||
29476fe1 DN |
397 | def GetSumFiles(results, build_dir): |
398 | if not results: | |
b58c12f3 | 399 | print('Getting actual results from build directory %s' % build_dir) |
29476fe1 DN |
400 | sum_files = CollectSumFiles(build_dir) |
401 | else: | |
b58c12f3 | 402 | print('Getting actual results from user-provided results') |
29476fe1 DN |
403 | sum_files = results.split() |
404 | return sum_files | |
405 | ||
406 | ||
b436bf38 DN |
407 | def PerformComparison(expected, actual, ignore_missing_failures): |
408 | actual_vs_expected, expected_vs_actual = CompareResults(expected, actual) | |
409 | ||
410 | tests_ok = True | |
411 | if len(actual_vs_expected) > 0: | |
412 | PrintSummary('Unexpected results in this build (new failures)', | |
413 | actual_vs_expected) | |
414 | tests_ok = False | |
415 | ||
416 | if not ignore_missing_failures and len(expected_vs_actual) > 0: | |
417 | PrintSummary('Expected results not present in this build (fixed tests)' | |
418 | '\n\nNOTE: This is not a failure. It just means that these ' | |
76ba1222 BM |
419 | 'tests were expected\nto fail, but either they worked in ' |
420 | 'this configuration or they were not\npresent at all.\n', | |
421 | expected_vs_actual) | |
b436bf38 DN |
422 | |
423 | if tests_ok: | |
b58c12f3 | 424 | print('\nSUCCESS: No unexpected failures.') |
b436bf38 DN |
425 | |
426 | return tests_ok | |
427 | ||
428 | ||
bc5e01b1 | 429 | def CheckExpectedResults(): |
828e50c5 | 430 | srcdir, target = GetBuildData() |
f6fce951 | 431 | manifest_path = GetManifestPath(srcdir, target, True) |
b58c12f3 | 432 | print('Manifest: %s' % manifest_path) |
cd1d95bd | 433 | manifest = GetManifest(manifest_path) |
bc5e01b1 | 434 | sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir) |
831315d0 | 435 | actual = GetResults(sum_files) |
a5baf3b8 | 436 | |
bc5e01b1 | 437 | if _OPTIONS.verbosity >= 1: |
a5baf3b8 DN |
438 | PrintSummary('Tests expected to fail', manifest) |
439 | PrintSummary('\nActual test results', actual) | |
440 | ||
bc5e01b1 | 441 | return PerformComparison(manifest, actual, _OPTIONS.ignore_missing_failures) |
a5baf3b8 DN |
442 | |
443 | ||
bc5e01b1 | 444 | def ProduceManifest(): |
74df1ad0 | 445 | (srcdir, target) = GetBuildData() |
f6fce951 | 446 | manifest_path = GetManifestPath(srcdir, target, False) |
b58c12f3 | 447 | print('Manifest: %s' % manifest_path) |
bc5e01b1 | 448 | if os.path.exists(manifest_path) and not _OPTIONS.force: |
a5baf3b8 | 449 | Error('Manifest file %s already exists.\nUse --force to overwrite.' % |
cd1d95bd | 450 | manifest_path) |
a5baf3b8 | 451 | |
bc5e01b1 | 452 | sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir) |
29476fe1 | 453 | actual = GetResults(sum_files) |
b58c12f3 | 454 | manifest_file = open(manifest_path, encoding='latin-1', mode='w') |
55b073ba | 455 | for result in sorted(actual): |
b58c12f3 | 456 | print(result) |
55b073ba DN |
457 | manifest_file.write('%s\n' % result) |
458 | manifest_file.close() | |
a5baf3b8 DN |
459 | |
460 | return True | |
461 | ||
462 | ||
bc5e01b1 | 463 | def CompareBuilds(): |
74df1ad0 | 464 | (srcdir, target) = GetBuildData() |
b436bf38 | 465 | |
bc5e01b1 | 466 | sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir) |
b436bf38 DN |
467 | actual = GetResults(sum_files) |
468 | ||
bc5e01b1 | 469 | clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build) |
b436bf38 DN |
470 | clean = GetResults(clean_sum_files) |
471 | ||
bc5e01b1 | 472 | return PerformComparison(clean, actual, _OPTIONS.ignore_missing_failures) |
b436bf38 DN |
473 | |
474 | ||
a5baf3b8 DN |
475 | def Main(argv): |
476 | parser = optparse.OptionParser(usage=__doc__) | |
831315d0 DN |
477 | |
478 | # Keep the following list sorted by option name. | |
a5baf3b8 DN |
479 | parser.add_option('--build_dir', action='store', type='string', |
480 | dest='build_dir', default='.', | |
481 | help='Build directory to check (default = .)') | |
b436bf38 DN |
482 | parser.add_option('--clean_build', action='store', type='string', |
483 | dest='clean_build', default=None, | |
484 | help='Compare test results from this build against ' | |
485 | 'those of another (clean) build. Use this option ' | |
486 | 'when comparing the test results of your patch versus ' | |
487 | 'the test results of a clean build without your patch. ' | |
488 | 'You must provide the path to the top directory of your ' | |
489 | 'clean build.') | |
a5baf3b8 | 490 | parser.add_option('--force', action='store_true', dest='force', |
831315d0 DN |
491 | default=False, help='When used with --produce_manifest, ' |
492 | 'it will overwrite an existing manifest file ' | |
493 | '(default = False)') | |
494 | parser.add_option('--ignore_missing_failures', action='store_true', | |
495 | dest='ignore_missing_failures', default=False, | |
496 | help='When a failure is expected in the manifest but ' | |
497 | 'it is not found in the actual results, the script ' | |
498 | 'produces a note alerting to this fact. This means ' | |
499 | 'that the expected failure has been fixed, or ' | |
500 | 'it did not run, or it may simply be flaky ' | |
501 | '(default = False)') | |
502 | parser.add_option('--manifest', action='store', type='string', | |
503 | dest='manifest', default=None, | |
504 | help='Name of the manifest file to use (default = ' | |
828e50c5 DN |
505 | 'taken from ' |
506 | 'contrib/testsuite-managment/<target_alias>.xfail)') | |
831315d0 DN |
507 | parser.add_option('--produce_manifest', action='store_true', |
508 | dest='produce_manifest', default=False, | |
509 | help='Produce the manifest for the current ' | |
510 | 'build (default = False)') | |
511 | parser.add_option('--results', action='store', type='string', | |
512 | dest='results', default=None, help='Space-separated list ' | |
513 | 'of .sum files with the testing results to check. The ' | |
514 | 'only content needed from these files are the lines ' | |
515 | 'starting with FAIL, XPASS or UNRESOLVED (default = ' | |
516 | '.sum files collected from the build directory).') | |
a5baf3b8 DN |
517 | parser.add_option('--verbosity', action='store', dest='verbosity', |
518 | type='int', default=0, help='Verbosity level (default = 0)') | |
bc5e01b1 DE |
519 | global _OPTIONS |
520 | (_OPTIONS, _) = parser.parse_args(argv[1:]) | |
a5baf3b8 | 521 | |
bc5e01b1 DE |
522 | if _OPTIONS.produce_manifest: |
523 | retval = ProduceManifest() | |
524 | elif _OPTIONS.clean_build: | |
525 | retval = CompareBuilds() | |
a5baf3b8 | 526 | else: |
bc5e01b1 | 527 | retval = CheckExpectedResults() |
a5baf3b8 DN |
528 | |
529 | if retval: | |
530 | return 0 | |
531 | else: | |
532 | return 1 | |
533 | ||
1099bb0a | 534 | |
a5baf3b8 DN |
535 | if __name__ == '__main__': |
536 | retval = Main(sys.argv) | |
537 | sys.exit(retval) |