# # Execute single test interactively such that features like `debug ()` work.
# $ meson test -i --test-args='-ix' t1400-update-ref
#
-# Test execution is parallelized by default and scales with the number of
-# processor cores available. You can change the number of processes by passing
-# the `-jN` flag to `meson test`.
+# # Execute all benchmarks.
+# $ meson test -i --benchmark
+#
+# # Execute single benchmark.
+# $ meson test -i --benchmark p0000-*
+#
+# Test execution (but not benchmark execution) is parallelized by default and
+# scales with the number of processor cores available. You can change the
+# number of processes by passing the `-jN` flag to `meson test`.
#
# 4. Install the Git distribution. Again, this can be done via Meson, Ninja or
# Samurai:
sed = find_program('sed', dirs: program_path, native: true)
shell = find_program('sh', dirs: program_path, native: true)
tar = find_program('tar', dirs: program_path, native: true)
+time = find_program('time', dirs: program_path, required: get_option('benchmarks'))
target_shell = find_program('sh', dirs: program_path, native: false)
# features. It is optional if you want to neither execute tests nor use any of
# these optional features.
perl_required = get_option('perl')
-if get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers') or get_option('docs') != []
+if get_option('benchmarks').enabled() or get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers') or get_option('docs') != []
perl_required = true
endif
)
summary({
+ 'benchmarks': get_option('tests') and perl.found() and time.found(),
'curl': curl.found(),
'expat': expat.found(),
'gettext': intl.found(),
't9903-bash-prompt.sh',
]
+benchmarks = [
+ 'perf/p0000-perf-lib-sanity.sh',
+ 'perf/p0001-rev-list.sh',
+ 'perf/p0002-read-cache.sh',
+ 'perf/p0003-delta-base-cache.sh',
+ 'perf/p0004-lazy-init-name-hash.sh',
+ 'perf/p0005-status.sh',
+ 'perf/p0006-read-tree-checkout.sh',
+ 'perf/p0007-write-cache.sh',
+ 'perf/p0008-odb-fsync.sh',
+ 'perf/p0071-sort.sh',
+ 'perf/p0090-cache-tree.sh',
+ 'perf/p0100-globbing.sh',
+ 'perf/p1006-cat-file.sh',
+ 'perf/p1400-update-ref.sh',
+ 'perf/p1450-fsck.sh',
+ 'perf/p1451-fsck-skip-list.sh',
+ 'perf/p1500-graph-walks.sh',
+ 'perf/p2000-sparse-operations.sh',
+ 'perf/p3400-rebase.sh',
+ 'perf/p3404-rebase-interactive.sh',
+ 'perf/p4000-diff-algorithms.sh',
+ 'perf/p4001-diff-no-index.sh',
+ 'perf/p4002-diff-color-moved.sh',
+ 'perf/p4205-log-pretty-formats.sh',
+ 'perf/p4209-pickaxe.sh',
+ 'perf/p4211-line-log.sh',
+ 'perf/p4220-log-grep-engines.sh',
+ 'perf/p4221-log-grep-engines-fixed.sh',
+ 'perf/p5302-pack-index.sh',
+ 'perf/p5303-many-packs.sh',
+ 'perf/p5304-prune.sh',
+ 'perf/p5310-pack-bitmaps.sh',
+ 'perf/p5311-pack-bitmaps-fetch.sh',
+ 'perf/p5312-pack-bitmaps-revs.sh',
+ 'perf/p5313-pack-objects.sh',
+ 'perf/p5314-name-hash.sh',
+ 'perf/p5326-multi-pack-bitmaps.sh',
+ 'perf/p5332-multi-pack-reuse.sh',
+ 'perf/p5333-pseudo-merge-bitmaps.sh',
+ 'perf/p5550-fetch-tags.sh',
+ 'perf/p5551-fetch-rescan.sh',
+ 'perf/p5600-partial-clone.sh',
+ 'perf/p5601-clone-reference.sh',
+ 'perf/p6100-describe.sh',
+ 'perf/p6300-for-each-ref.sh',
+ 'perf/p7000-filter-branch.sh',
+ 'perf/p7102-reset.sh',
+ 'perf/p7300-clean.sh',
+ 'perf/p7519-fsmonitor.sh',
+ 'perf/p7527-builtin-fsmonitor.sh',
+ 'perf/p7810-grep.sh',
+ 'perf/p7820-grep-engines.sh',
+ 'perf/p7821-grep-engines-fixed.sh',
+ 'perf/p7822-grep-perl-character.sh',
+ 'perf/p9210-scalar.sh',
+ 'perf/p9300-fast-import-export.sh',
+]
+
# Sanity check that we are not missing any tests present in 't/'. This check
# only runs once at configure time and is thus best-effort, only. It is
# sufficient to catch missing test suites in our CI though.
foreach glob, tests : {
't[0-9][0-9][0-9][0-9]-*.sh': integration_tests,
+ 'perf/p[0-9][0-9][0-9][0-9]-*.sh': benchmarks,
'unit-tests/t-*.c': unit_test_programs,
'unit-tests/u-*.c': clar_test_suites,
}
timeout: 0,
)
endforeach
+
+if perl.found() and time.found()
+ benchmark_environment = test_environment
+ benchmark_environment.set('GTIME', time.full_path())
+
+ foreach benchmark : benchmarks
+ benchmark(fs.stem(benchmark), shell,
+ args: [
+ fs.name(benchmark),
+ ],
+ workdir: meson.current_source_dir() / 'perf',
+ env: benchmark_environment,
+ depends: test_dependencies + bin_wrappers,
+ timeout: 0,
+ )
+ endforeach
+endif