]> git.ipfire.org Git - thirdparty/glibc.git/blame - benchtests/scripts/compare_strings.py
benchtests: Add --no-diff and --no-header options
[thirdparty/glibc.git] / benchtests / scripts / compare_strings.py
CommitLineData
25d52472 1#!/usr/bin/python
688903eb 2# Copyright (C) 2017-2018 Free Software Foundation, Inc.
25d52472
SP
3# This file is part of the GNU C Library.
4#
5# The GNU C Library is free software; you can redistribute it and/or
6# modify it under the terms of the GNU Lesser General Public
7# License as published by the Free Software Foundation; either
8# version 2.1 of the License, or (at your option) any later version.
9#
10# The GNU C Library is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13# Lesser General Public License for more details.
14#
15# You should have received a copy of the GNU Lesser General Public
16# License along with the GNU C Library; if not, see
17# <http://www.gnu.org/licenses/>.
18"""Compare results of string functions
19
20Given a string benchmark result file, print a table with comparisons with a
21baseline. The baseline is the first function, which typically is the builtin
22function.
23"""
dd3e86ad
SP
24import matplotlib as mpl
25mpl.use('Agg')
b115e819 26
25d52472
SP
27import sys
28import os
29import json
30import pylab
06b1de23 31import argparse
25d52472
SP
32
33try:
34 import jsonschema as validator
35except ImportError:
36 print('Could not find jsonschema module.')
37 raise
38
39
40def parse_file(filename, schema_filename):
41 with open(schema_filename, 'r') as schemafile:
42 schema = json.load(schemafile)
43 with open(filename, 'r') as benchfile:
44 bench = json.load(benchfile)
45 validator.validate(bench, schema)
46 return bench
47
48
49def draw_graph(f, v, ifuncs, results):
50 """Plot graphs for functions
51
52 Plot line graphs for each of the ifuncs
53
54 Args:
55 f: Function name
56 v: Benchmark variant for the function.
57 ifuncs: List of ifunc names
58 results: Dictionary of results for each test criterion
59 """
140647ea 60 print('Generating graph for %s, variant \'%s\'' % (f, v))
25d52472
SP
61 xkeys = results.keys()
62
63 pylab.clf()
64 fig = pylab.figure(frameon=False)
65 fig.set_size_inches(32, 18)
66 pylab.ylabel('Performance improvement from base')
67 X = range(len(xkeys))
68 pylab.xticks(X, xkeys)
69
70 i = 0
71
72 while i < len(ifuncs):
73 Y = [results[k][i] for k in xkeys]
74 lines = pylab.plot(X, Y, label=':'+ifuncs[i])
75 i = i + 1
76
77 pylab.legend()
78 pylab.grid()
79 pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
80
81
195abbf4 82def process_results(results, attrs, base_func, graph, no_diff, no_header):
25d52472
SP
83 """ Process results and print them
84
85 Args:
86 results: JSON dictionary of results
87 attrs: Attributes that form the test criteria
88 """
89
90 for f in results['functions'].keys():
195abbf4 91
5a6547b7 92 v = results['functions'][f]['bench-variant']
5a6547b7 93
b115e819
SP
94 base_index = 0
95 if base_func:
96 base_index = results['functions'][f]['ifuncs'].index(base_func)
97
195abbf4
LS
98 if not no_header:
99 print('Function: %s' % f)
100 print('Variant: %s' % v)
101 print("%36s%s" % (' ', '\t'.join(results['functions'][f]['ifuncs'])))
102 print("=" * 120)
103
25d52472
SP
104 graph_res = {}
105 for res in results['functions'][f]['results']:
106 attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
b115e819 107 i = 0
5a6547b7
SP
108 key = ', '.join(attr_list)
109 sys.stdout.write('%36s: ' % key)
25d52472
SP
110 graph_res[key] = res['timings']
111 for t in res['timings']:
5a6547b7 112 sys.stdout.write ('%12.2f' % t)
195abbf4
LS
113 if not no_diff:
114 if i != base_index:
115 base = res['timings'][base_index]
116 diff = (base - t) * 100 / base
117 sys.stdout.write (' (%6.2f%%)' % diff)
25d52472 118 sys.stdout.write('\t')
b115e819 119 i = i + 1
25d52472 120 print('')
140647ea
SP
121
122 if graph:
123 draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
25d52472
SP
124
125
126def main(args):
127 """Program Entry Point
128
129 Take a string benchmark output file and compare timings.
130 """
25d52472 131
b115e819 132 base_func = None
06b1de23
SP
133 filename = args.input
134 schema_filename = args.schema
135 base_func = args.base
136 attrs = args.attributes.split(',')
137
138 results = parse_file(args.input, args.schema)
195abbf4 139 process_results(results, attrs, base_func, args.graph, args.no_diff, args.no_header)
25d52472
SP
140
141
142if __name__ == '__main__':
06b1de23
SP
143 parser = argparse.ArgumentParser()
144
145 # The required arguments.
146 req = parser.add_argument_group(title='required arguments')
147 req.add_argument('-a', '--attributes', required=True,
148 help='Comma separated list of benchmark attributes.')
149 req.add_argument('-i', '--input', required=True,
150 help='Input JSON benchmark result file.')
151 req.add_argument('-s', '--schema', required=True,
152 help='Schema file to validate the result file.')
153
154 # Optional arguments.
155 parser.add_argument('-b', '--base',
156 help='IFUNC variant to set as baseline.')
140647ea
SP
157 parser.add_argument('-g', '--graph', action='store_true',
158 help='Generate a graph from results.')
195abbf4
LS
159 parser.add_argument('--no-diff', action='store_true',
160 help='Do not print the difference from baseline.')
161 parser.add_argument('--no-header', action='store_true',
162 help='Do not print the header.')
06b1de23
SP
163
164 args = parser.parse_args()
165 main(args)