]> git.ipfire.org Git - thirdparty/glibc.git/blame - benchtests/scripts/compare_strings.py
benchtests: New -g option to generate graphs in compare_strings.py
[thirdparty/glibc.git] / benchtests / scripts / compare_strings.py
CommitLineData
25d52472
SP
1#!/usr/bin/python
2# Copyright (C) 2017 Free Software Foundation, Inc.
3# This file is part of the GNU C Library.
4#
5# The GNU C Library is free software; you can redistribute it and/or
6# modify it under the terms of the GNU Lesser General Public
7# License as published by the Free Software Foundation; either
8# version 2.1 of the License, or (at your option) any later version.
9#
10# The GNU C Library is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13# Lesser General Public License for more details.
14#
15# You should have received a copy of the GNU Lesser General Public
16# License along with the GNU C Library; if not, see
17# <http://www.gnu.org/licenses/>.
18"""Compare results of string functions
19
20Given a string benchmark result file, print a table with comparisons with a
21baseline. The baseline is the first function, which typically is the builtin
22function.
23"""
dd3e86ad
SP
24import matplotlib as mpl
25mpl.use('Agg')
b115e819 26
25d52472
SP
27import sys
28import os
29import json
30import pylab
06b1de23 31import argparse
25d52472
SP
32
33try:
34 import jsonschema as validator
35except ImportError:
36 print('Could not find jsonschema module.')
37 raise
38
39
40def parse_file(filename, schema_filename):
41 with open(schema_filename, 'r') as schemafile:
42 schema = json.load(schemafile)
43 with open(filename, 'r') as benchfile:
44 bench = json.load(benchfile)
45 validator.validate(bench, schema)
46 return bench
47
48
49def draw_graph(f, v, ifuncs, results):
50 """Plot graphs for functions
51
52 Plot line graphs for each of the ifuncs
53
54 Args:
55 f: Function name
56 v: Benchmark variant for the function.
57 ifuncs: List of ifunc names
58 results: Dictionary of results for each test criterion
59 """
140647ea 60 print('Generating graph for %s, variant \'%s\'' % (f, v))
25d52472
SP
61 xkeys = results.keys()
62
63 pylab.clf()
64 fig = pylab.figure(frameon=False)
65 fig.set_size_inches(32, 18)
66 pylab.ylabel('Performance improvement from base')
67 X = range(len(xkeys))
68 pylab.xticks(X, xkeys)
69
70 i = 0
71
72 while i < len(ifuncs):
73 Y = [results[k][i] for k in xkeys]
74 lines = pylab.plot(X, Y, label=':'+ifuncs[i])
75 i = i + 1
76
77 pylab.legend()
78 pylab.grid()
79 pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
80
81
140647ea 82def process_results(results, attrs, base_func, graph):
25d52472
SP
83 """ Process results and print them
84
85 Args:
86 results: JSON dictionary of results
87 attrs: Attributes that form the test criteria
88 """
89
90 for f in results['functions'].keys():
91 print('Function: %s' % f)
5a6547b7
SP
92 v = results['functions'][f]['bench-variant']
93 print('Variant: %s' % v)
94
b115e819
SP
95 base_index = 0
96 if base_func:
97 base_index = results['functions'][f]['ifuncs'].index(base_func)
98
5a6547b7
SP
99 print("%36s%s" % (' ', '\t'.join(results['functions'][f]['ifuncs'])))
100 print("=" * 120)
25d52472
SP
101 graph_res = {}
102 for res in results['functions'][f]['results']:
103 attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
b115e819 104 i = 0
5a6547b7
SP
105 key = ', '.join(attr_list)
106 sys.stdout.write('%36s: ' % key)
25d52472
SP
107 graph_res[key] = res['timings']
108 for t in res['timings']:
5a6547b7 109 sys.stdout.write ('%12.2f' % t)
b115e819 110 if i != base_index:
5a6547b7
SP
111 base = res['timings'][base_index]
112 diff = (base - t) * 100 / base
113 sys.stdout.write (' (%6.2f%%)' % diff)
25d52472 114 sys.stdout.write('\t')
b115e819 115 i = i + 1
25d52472 116 print('')
140647ea
SP
117
118 if graph:
119 draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
25d52472
SP
120
121
122def main(args):
123 """Program Entry Point
124
125 Take a string benchmark output file and compare timings.
126 """
25d52472 127
b115e819 128 base_func = None
06b1de23
SP
129 filename = args.input
130 schema_filename = args.schema
131 base_func = args.base
132 attrs = args.attributes.split(',')
133
134 results = parse_file(args.input, args.schema)
140647ea 135 process_results(results, attrs, base_func, args.graph)
25d52472
SP
136
137
138if __name__ == '__main__':
06b1de23
SP
139 parser = argparse.ArgumentParser()
140
141 # The required arguments.
142 req = parser.add_argument_group(title='required arguments')
143 req.add_argument('-a', '--attributes', required=True,
144 help='Comma separated list of benchmark attributes.')
145 req.add_argument('-i', '--input', required=True,
146 help='Input JSON benchmark result file.')
147 req.add_argument('-s', '--schema', required=True,
148 help='Schema file to validate the result file.')
149
150 # Optional arguments.
151 parser.add_argument('-b', '--base',
152 help='IFUNC variant to set as baseline.')
140647ea
SP
153 parser.add_argument('-g', '--graph', action='store_true',
154 help='Generate a graph from results.')
06b1de23
SP
155
156 args = parser.parse_args()
157 main(args)