]> git.ipfire.org Git - people/ms/u-boot.git/blob - test/py/conftest.py
Merge git://git.denx.de/u-boot-dm
[people/ms/u-boot.git] / test / py / conftest.py
1 # Copyright (c) 2015 Stephen Warren
2 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3 #
4 # SPDX-License-Identifier: GPL-2.0
5
6 # Implementation of pytest run-time hook functions. These are invoked by
7 # pytest at certain points during operation, e.g. startup, for each executed
8 # test, at shutdown etc. These hooks perform functions such as:
9 # - Parsing custom command-line options.
10 # - Pullilng in user-specified board configuration.
11 # - Creating the U-Boot console test fixture.
12 # - Creating the HTML log file.
13 # - Monitoring each test's results.
14 # - Implementing custom pytest markers.
15
16 import atexit
17 import errno
18 import os
19 import os.path
20 import pytest
21 from _pytest.runner import runtestprotocol
22 import ConfigParser
23 import re
24 import StringIO
25 import sys
26
27 # Globals: The HTML log file, and the connection to the U-Boot console.
28 log = None
29 console = None
30
31 def mkdir_p(path):
32 """Create a directory path.
33
34 This includes creating any intermediate/parent directories. Any errors
35 caused due to already extant directories are ignored.
36
37 Args:
38 path: The directory path to create.
39
40 Returns:
41 Nothing.
42 """
43
44 try:
45 os.makedirs(path)
46 except OSError as exc:
47 if exc.errno == errno.EEXIST and os.path.isdir(path):
48 pass
49 else:
50 raise
51
52 def pytest_addoption(parser):
53 """pytest hook: Add custom command-line options to the cmdline parser.
54
55 Args:
56 parser: The pytest command-line parser.
57
58 Returns:
59 Nothing.
60 """
61
62 parser.addoption('--build-dir', default=None,
63 help='U-Boot build directory (O=)')
64 parser.addoption('--result-dir', default=None,
65 help='U-Boot test result/tmp directory')
66 parser.addoption('--persistent-data-dir', default=None,
67 help='U-Boot test persistent generated data directory')
68 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69 help='U-Boot board type')
70 parser.addoption('--board-identity', '--id', default='na',
71 help='U-Boot board identity/instance')
72 parser.addoption('--build', default=False, action='store_true',
73 help='Compile U-Boot before running tests')
74 parser.addoption('--gdbserver', default=None,
75 help='Run sandbox under gdbserver. The argument is the channel '+
76 'over which gdbserver should communicate, e.g. localhost:1234')
77
78 def pytest_configure(config):
79 """pytest hook: Perform custom initialization at startup time.
80
81 Args:
82 config: The pytest configuration.
83
84 Returns:
85 Nothing.
86 """
87
88 global log
89 global console
90 global ubconfig
91
92 test_py_dir = os.path.dirname(os.path.abspath(__file__))
93 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94
95 board_type = config.getoption('board_type')
96 board_type_filename = board_type.replace('-', '_')
97
98 board_identity = config.getoption('board_identity')
99 board_identity_filename = board_identity.replace('-', '_')
100
101 build_dir = config.getoption('build_dir')
102 if not build_dir:
103 build_dir = source_dir + '/build-' + board_type
104 mkdir_p(build_dir)
105
106 result_dir = config.getoption('result_dir')
107 if not result_dir:
108 result_dir = build_dir
109 mkdir_p(result_dir)
110
111 persistent_data_dir = config.getoption('persistent_data_dir')
112 if not persistent_data_dir:
113 persistent_data_dir = build_dir + '/persistent-data'
114 mkdir_p(persistent_data_dir)
115
116 gdbserver = config.getoption('gdbserver')
117 if gdbserver and board_type != 'sandbox':
118 raise Exception('--gdbserver only supported with sandbox')
119
120 import multiplexed_log
121 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122
123 if config.getoption('build'):
124 if build_dir != source_dir:
125 o_opt = 'O=%s' % build_dir
126 else:
127 o_opt = ''
128 cmds = (
129 ['make', o_opt, '-s', board_type + '_defconfig'],
130 ['make', o_opt, '-s', '-j8'],
131 )
132 with log.section('make'):
133 runner = log.get_runner('make', sys.stdout)
134 for cmd in cmds:
135 runner.run(cmd, cwd=source_dir)
136 runner.close()
137 log.status_pass('OK')
138
139 class ArbitraryAttributeContainer(object):
140 pass
141
142 ubconfig = ArbitraryAttributeContainer()
143 ubconfig.brd = dict()
144 ubconfig.env = dict()
145
146 modules = [
147 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150 board_identity_filename),
151 ]
152 for (dict_to_fill, module_name) in modules:
153 try:
154 module = __import__(module_name)
155 except ImportError:
156 continue
157 dict_to_fill.update(module.__dict__)
158
159 ubconfig.buildconfig = dict()
160
161 for conf_file in ('.config', 'include/autoconf.mk'):
162 dot_config = build_dir + '/' + conf_file
163 if not os.path.exists(dot_config):
164 raise Exception(conf_file + ' does not exist; ' +
165 'try passing --build option?')
166
167 with open(dot_config, 'rt') as f:
168 ini_str = '[root]\n' + f.read()
169 ini_sio = StringIO.StringIO(ini_str)
170 parser = ConfigParser.RawConfigParser()
171 parser.readfp(ini_sio)
172 ubconfig.buildconfig.update(parser.items('root'))
173
174 ubconfig.test_py_dir = test_py_dir
175 ubconfig.source_dir = source_dir
176 ubconfig.build_dir = build_dir
177 ubconfig.result_dir = result_dir
178 ubconfig.persistent_data_dir = persistent_data_dir
179 ubconfig.board_type = board_type
180 ubconfig.board_identity = board_identity
181 ubconfig.gdbserver = gdbserver
182 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
183
184 env_vars = (
185 'board_type',
186 'board_identity',
187 'source_dir',
188 'test_py_dir',
189 'build_dir',
190 'result_dir',
191 'persistent_data_dir',
192 )
193 for v in env_vars:
194 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
195
196 if board_type.startswith('sandbox'):
197 import u_boot_console_sandbox
198 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
199 else:
200 import u_boot_console_exec_attach
201 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
202
203 re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$')
204 def generate_ut_subtest(metafunc, fixture_name):
205 """Provide parametrization for a ut_subtest fixture.
206
207 Determines the set of unit tests built into a U-Boot binary by parsing the
208 list of symbols generated by the build process. Provides this information
209 to test functions by parameterizing their ut_subtest fixture parameter.
210
211 Args:
212 metafunc: The pytest test function.
213 fixture_name: The fixture name to test.
214
215 Returns:
216 Nothing.
217 """
218
219 fn = console.config.build_dir + '/u-boot.sym'
220 try:
221 with open(fn, 'rt') as f:
222 lines = f.readlines()
223 except:
224 lines = []
225 lines.sort()
226
227 vals = []
228 for l in lines:
229 m = re_ut_test_list.search(l)
230 if not m:
231 continue
232 vals.append(m.group(1) + ' ' + m.group(2))
233
234 ids = ['ut_' + s.replace(' ', '_') for s in vals]
235 metafunc.parametrize(fixture_name, vals, ids=ids)
236
237 def generate_config(metafunc, fixture_name):
238 """Provide parametrization for {env,brd}__ fixtures.
239
240 If a test function takes parameter(s) (fixture names) of the form brd__xxx
241 or env__xxx, the brd and env configuration dictionaries are consulted to
242 find the list of values to use for those parameters, and the test is
243 parametrized so that it runs once for each combination of values.
244
245 Args:
246 metafunc: The pytest test function.
247 fixture_name: The fixture name to test.
248
249 Returns:
250 Nothing.
251 """
252
253 subconfigs = {
254 'brd': console.config.brd,
255 'env': console.config.env,
256 }
257 parts = fixture_name.split('__')
258 if len(parts) < 2:
259 return
260 if parts[0] not in subconfigs:
261 return
262 subconfig = subconfigs[parts[0]]
263 vals = []
264 val = subconfig.get(fixture_name, [])
265 # If that exact name is a key in the data source:
266 if val:
267 # ... use the dict value as a single parameter value.
268 vals = (val, )
269 else:
270 # ... otherwise, see if there's a key that contains a list of
271 # values to use instead.
272 vals = subconfig.get(fixture_name+ 's', [])
273 def fixture_id(index, val):
274 try:
275 return val['fixture_id']
276 except:
277 return fixture_name + str(index)
278 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
279 metafunc.parametrize(fixture_name, vals, ids=ids)
280
281 def pytest_generate_tests(metafunc):
282 """pytest hook: parameterize test functions based on custom rules.
283
284 Check each test function parameter (fixture name) to see if it is one of
285 our custom names, and if so, provide the correct parametrization for that
286 parameter.
287
288 Args:
289 metafunc: The pytest test function.
290
291 Returns:
292 Nothing.
293 """
294
295 for fn in metafunc.fixturenames:
296 if fn == 'ut_subtest':
297 generate_ut_subtest(metafunc, fn)
298 continue
299 generate_config(metafunc, fn)
300
301 @pytest.fixture(scope='function')
302 def u_boot_console(request):
303 """Generate the value of a test's u_boot_console fixture.
304
305 Args:
306 request: The pytest request.
307
308 Returns:
309 The fixture value.
310 """
311
312 console.ensure_spawned()
313 return console
314
315 anchors = {}
316 tests_not_run = []
317 tests_failed = []
318 tests_xpassed = []
319 tests_xfailed = []
320 tests_skipped = []
321 tests_passed = []
322
323 def pytest_itemcollected(item):
324 """pytest hook: Called once for each test found during collection.
325
326 This enables our custom result analysis code to see the list of all tests
327 that should eventually be run.
328
329 Args:
330 item: The item that was collected.
331
332 Returns:
333 Nothing.
334 """
335
336 tests_not_run.append(item.name)
337
338 def cleanup():
339 """Clean up all global state.
340
341 Executed (via atexit) once the entire test process is complete. This
342 includes logging the status of all tests, and the identity of any failed
343 or skipped tests.
344
345 Args:
346 None.
347
348 Returns:
349 Nothing.
350 """
351
352 if console:
353 console.close()
354 if log:
355 with log.section('Status Report', 'status_report'):
356 log.status_pass('%d passed' % len(tests_passed))
357 if tests_skipped:
358 log.status_skipped('%d skipped' % len(tests_skipped))
359 for test in tests_skipped:
360 anchor = anchors.get(test, None)
361 log.status_skipped('... ' + test, anchor)
362 if tests_xpassed:
363 log.status_xpass('%d xpass' % len(tests_xpassed))
364 for test in tests_xpassed:
365 anchor = anchors.get(test, None)
366 log.status_xpass('... ' + test, anchor)
367 if tests_xfailed:
368 log.status_xfail('%d xfail' % len(tests_xfailed))
369 for test in tests_xfailed:
370 anchor = anchors.get(test, None)
371 log.status_xfail('... ' + test, anchor)
372 if tests_failed:
373 log.status_fail('%d failed' % len(tests_failed))
374 for test in tests_failed:
375 anchor = anchors.get(test, None)
376 log.status_fail('... ' + test, anchor)
377 if tests_not_run:
378 log.status_fail('%d not run' % len(tests_not_run))
379 for test in tests_not_run:
380 anchor = anchors.get(test, None)
381 log.status_fail('... ' + test, anchor)
382 log.close()
383 atexit.register(cleanup)
384
385 def setup_boardspec(item):
386 """Process any 'boardspec' marker for a test.
387
388 Such a marker lists the set of board types that a test does/doesn't
389 support. If tests are being executed on an unsupported board, the test is
390 marked to be skipped.
391
392 Args:
393 item: The pytest test item.
394
395 Returns:
396 Nothing.
397 """
398
399 mark = item.get_marker('boardspec')
400 if not mark:
401 return
402 required_boards = []
403 for board in mark.args:
404 if board.startswith('!'):
405 if ubconfig.board_type == board[1:]:
406 pytest.skip('board not supported')
407 return
408 else:
409 required_boards.append(board)
410 if required_boards and ubconfig.board_type not in required_boards:
411 pytest.skip('board not supported')
412
413 def setup_buildconfigspec(item):
414 """Process any 'buildconfigspec' marker for a test.
415
416 Such a marker lists some U-Boot configuration feature that the test
417 requires. If tests are being executed on an U-Boot build that doesn't
418 have the required feature, the test is marked to be skipped.
419
420 Args:
421 item: The pytest test item.
422
423 Returns:
424 Nothing.
425 """
426
427 mark = item.get_marker('buildconfigspec')
428 if not mark:
429 return
430 for option in mark.args:
431 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
432 pytest.skip('.config feature not enabled')
433
434 def pytest_runtest_setup(item):
435 """pytest hook: Configure (set up) a test item.
436
437 Called once for each test to perform any custom configuration. This hook
438 is used to skip the test if certain conditions apply.
439
440 Args:
441 item: The pytest test item.
442
443 Returns:
444 Nothing.
445 """
446
447 anchors[item.name] = log.start_section(item.name)
448 setup_boardspec(item)
449 setup_buildconfigspec(item)
450
451 def pytest_runtest_protocol(item, nextitem):
452 """pytest hook: Called to execute a test.
453
454 This hook wraps the standard pytest runtestprotocol() function in order
455 to acquire visibility into, and record, each test function's result.
456
457 Args:
458 item: The pytest test item to execute.
459 nextitem: The pytest test item that will be executed after this one.
460
461 Returns:
462 A list of pytest reports (test result data).
463 """
464
465 reports = runtestprotocol(item, nextitem=nextitem)
466
467 failure_cleanup = False
468 test_list = tests_passed
469 msg = 'OK'
470 msg_log = log.status_pass
471 for report in reports:
472 if report.outcome == 'failed':
473 if hasattr(report, 'wasxfail'):
474 test_list = tests_xpassed
475 msg = 'XPASSED'
476 msg_log = log.status_xpass
477 else:
478 failure_cleanup = True
479 test_list = tests_failed
480 msg = 'FAILED:\n' + str(report.longrepr)
481 msg_log = log.status_fail
482 break
483 if report.outcome == 'skipped':
484 if hasattr(report, 'wasxfail'):
485 failure_cleanup = True
486 test_list = tests_xfailed
487 msg = 'XFAILED:\n' + str(report.longrepr)
488 msg_log = log.status_xfail
489 break
490 test_list = tests_skipped
491 msg = 'SKIPPED:\n' + str(report.longrepr)
492 msg_log = log.status_skipped
493
494 if failure_cleanup:
495 console.drain_console()
496
497 test_list.append(item.name)
498 tests_not_run.remove(item.name)
499
500 try:
501 msg_log(msg)
502 except:
503 # If something went wrong with logging, it's better to let the test
504 # process continue, which may report other exceptions that triggered
505 # the logging issue (e.g. console.log wasn't created). Hence, just
506 # squash the exception. If the test setup failed due to e.g. syntax
507 # error somewhere else, this won't be seen. However, once that issue
508 # is fixed, if this exception still exists, it will then be logged as
509 # part of the test's stdout.
510 import traceback
511 print 'Exception occurred while logging runtest status:'
512 traceback.print_exc()
513 # FIXME: Can we force a test failure here?
514
515 log.end_section(item.name)
516
517 if failure_cleanup:
518 console.cleanup_spawn()
519
520 return reports