]> git.ipfire.org Git - thirdparty/curl.git/commitdiff
scorecard: rework format and add json print
authorStefan Eissing <stefan@eissing.org>
Fri, 30 May 2025 10:58:34 +0000 (12:58 +0200)
committerDaniel Stenberg <daniel@haxx.se>
Mon, 2 Jun 2025 21:15:33 +0000 (23:15 +0200)
Improve the JSON result format to be more descriptive and
easier to parse.

Add --print option to scorecard.py that prints a saved JSON file
Add date field to score card.

Closes #17465

tests/http/scorecard.py
tests/http/testenv/caddy.py
tests/http/testenv/env.py
tests/http/testenv/nghttpx.py

index 93d6c05eb21a78eefa27ec5c53f7a842aa32e1ff..1966d9285c3c3e83c43da9cbb656e1905763cfa1 100644 (file)
@@ -25,6 +25,7 @@
 ###########################################################################
 #
 import argparse
+import datetime
 import json
 import logging
 import os
@@ -42,7 +43,140 @@ class ScoreCardError(Exception):
     pass
 
 
-class ScoreCard:
+class Card:
+    @classmethod
+    def fmt_ms(cls, tval):
+        return f'{int(tval*1000)} ms' if tval >= 0 else '--'
+
+    @classmethod
+    def fmt_size(cls, val):
+        if val >= (1024*1024*1024):
+            return f'{val / (1024*1024*1024):0.000f}GB'
+        elif val >= (1024 * 1024):
+            return f'{val / (1024*1024):0.000f}MB'
+        elif val >= 1024:
+            return f'{val / 1024:0.000f}KB'
+        else:
+            return f'{val:0.000f}B'
+
+    @classmethod
+    def fmt_mbs(cls, val):
+        return f'{val/(1024*1024):0.000f} MB/s' if val >= 0 else '--'
+
+    @classmethod
+    def fmt_reqs(cls, val):
+        return f'{val:0.000f} r/s' if val >= 0 else '--'
+
+    @classmethod
+    def mk_mbs_cell(cls, samples, profiles, errors):
+        val = mean(samples) if len(samples) else -1
+        cell = {
+            'val': val,
+            'sval': Card.fmt_mbs(val) if val >= 0 else '--',
+        }
+        if len(profiles):
+            cell['stats'] = RunProfile.AverageStats(profiles)
+        if len(errors):
+            cell['errors'] = errors
+        return cell
+
+    @classmethod
+    def mk_reqs_cell(cls, samples, profiles, errors):
+        val = mean(samples) if len(samples) else -1
+        cell = {
+            'val': val,
+            'sval': Card.fmt_reqs(val) if val >= 0 else '--',
+        }
+        if len(profiles):
+            cell['stats'] = RunProfile.AverageStats(profiles)
+        if len(errors):
+            cell['errors'] = errors
+        return cell
+
+    @classmethod
+    def parse_size(cls, s):
+        m = re.match(r'(\d+)(mb|kb|gb)?', s, re.IGNORECASE)
+        if m is None:
+            raise Exception(f'unrecognized size: {s}')
+        size = int(m.group(1))
+        if not m.group(2):
+            pass
+        elif m.group(2).lower() == 'kb':
+            size *= 1024
+        elif m.group(2).lower() == 'mb':
+            size *= 1024 * 1024
+        elif m.group(2).lower() == 'gb':
+            size *= 1024 * 1024 * 1024
+        return size
+
+    @classmethod
+    def print_score(cls, score):
+        print(f'Scorecard curl, protocol {score["meta"]["protocol"]} '
+              f'via {score["meta"]["implementation"]}/'
+              f'{score["meta"]["implementation_version"]}')
+        print(f'Date: {score["meta"]["date"]}')
+        if 'curl_V' in score["meta"]:
+            print(f'Version: {score["meta"]["curl_V"]}')
+        if 'curl_features' in score["meta"]:
+            print(f'Features: {score["meta"]["curl_features"]}')
+        print(f'Samples Size: {score["meta"]["samples"]}')
+        if 'handshakes' in score:
+            print(f'{"Handshakes":<24} {"ipv4":25} {"ipv6":28}')
+            print(f'  {"Host":<17} {"Connect":>12} {"Handshake":>12} '
+                  f'{"Connect":>12} {"Handshake":>12}     {"Errors":<20}')
+            for key, val in score["handshakes"].items():
+                print(f'  {key:<17} {Card.fmt_ms(val["ipv4-connect"]):>12} '
+                      f'{Card.fmt_ms(val["ipv4-handshake"]):>12} '
+                      f'{Card.fmt_ms(val["ipv6-connect"]):>12} '
+                      f'{Card.fmt_ms(val["ipv6-handshake"]):>12}     '
+                      f'{"/".join(val["ipv4-errors"] + val["ipv6-errors"]):<20}'
+                      )
+        for name in ['downloads', 'uploads', 'requests']:
+            if name in score:
+                Card.print_score_table(score[name])
+
+    @classmethod
+    def print_score_table(cls, score):
+        cols = score['cols']
+        rows = score['rows']
+        colw = []
+        statw = 13
+        errors = []
+        col_has_stats = []
+        for idx, col in enumerate(cols):
+            cellw = max([len(r[idx]["sval"]) for r in rows])
+            colw.append(max(cellw, len(col)))
+            col_has_stats.append(False)
+            for row in rows:
+                if 'stats' in row[idx]:
+                    col_has_stats[idx] = True
+                    break
+        if 'title' in score['meta']:
+            print(score['meta']['title'])
+        for idx, col in enumerate(cols):
+            if col_has_stats[idx]:
+                print(f'  {col:>{colw[idx]}} {"[cpu/rss]":<{statw}}', end='')
+            else:
+                print(f'  {col:>{colw[idx]}}', end='')
+        print('')
+        for row in rows:
+            for idx, cell in enumerate(row):
+                print(f'  {cell["sval"]:>{colw[idx]}}', end='')
+                if col_has_stats[idx]:
+                    if 'stats' in cell:
+                        s = f'[{cell["stats"]["cpu"]:>.1f}%' \
+                            f'/{Card.fmt_size(cell["stats"]["rss"])}]'
+                    else:
+                        s = ''
+                    print(f' {s:<{statw}}', end='')
+                if 'errors' in cell:
+                    errors.extend(cell['errors'])
+            print('')
+        if len(errors):
+            print(f'Errors: {errors}')
+
+
+class ScoreRunner:
 
     def __init__(self, env: Env,
                  protocol: str,
@@ -115,7 +249,7 @@ class ScoreCard:
                         downloads: Optional[List[int]] = None):
         if downloads is not None:
             for fsize in downloads:
-                label = self.fmt_size(fsize)
+                label = Card.fmt_size(fsize)
                 fname = f'score{label}.data'
                 self._make_docs_file(docs_dir=server_docs,
                                      fname=fname, fsize=fsize)
@@ -133,14 +267,13 @@ class ScoreCard:
             error += f'{len(fails)} failed'
         return error if len(error) > 0 else None
 
-    def transfer_single(self, url: str, count: int):
-        sample_size = count
+    def dl_single(self, url: str, nsamples: int = 1):
         count = 1
         samples = []
         errors = []
         profiles = []
         self.info('single...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_download(urls=[url], alpn_proto=self.protocol,
@@ -153,23 +286,15 @@ class ScoreCard:
                 total_size = sum([s['size_download'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': 1,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles),
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def transfer_serial(self, url: str, count: int):
-        sample_size = 1
+    def dl_serial(self, url: str, count: int, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
         url = f'{url}?[0-{count - 1}]'
         self.info('serial...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_download(urls=[url], alpn_proto=self.protocol,
@@ -182,24 +307,16 @@ class ScoreCard:
                 total_size = sum([s['size_download'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': 1,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles),
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def transfer_parallel(self, url: str, count: int):
-        sample_size = 1
+    def dl_parallel(self, url: str, count: int, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
         max_parallel = self._download_parallel if self._download_parallel > 0 else count
         url = f'{url}?[0-{count - 1}]'
         self.info('parallel...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_download(urls=[url], alpn_proto=self.protocol,
@@ -217,34 +334,39 @@ class ScoreCard:
                 total_size = sum([s['size_download'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': max_parallel,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles),
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def download_url(self, label: str, url: str, count: int):
-        self.info(f'  {count}x{label}: ')
-        props = {
-            'single': self.transfer_single(url=url, count=10),
-        }
+    def downloads(self, count: int, fsizes: List[int], meta: Dict[str, Any]) -> Dict[str, Any]:
+        nsamples = meta['samples']
+        max_parallel = self._download_parallel if self._download_parallel > 0 else count
+        cols = ['size', 'single']
         if count > 1:
-            props['serial'] = self.transfer_serial(url=url, count=count)
-            props['parallel'] = self.transfer_parallel(url=url, count=count)
-        self.info('ok.\n')
-        return props
-
-    def downloads(self, count: int, fsizes: List[int]) -> Dict[str, Any]:
-        scores = {}
+            cols.append(f'serial({count})')
+            cols.append(f'parallel({count}x{max_parallel})')
+        rows = []
         for fsize in fsizes:
-            label = self.fmt_size(fsize)
-            fname = f'score{label}.data'
-            url = f'https://{self.env.domain1}:{self.server_port}/{fname}'
-            scores[label] = self.download_url(label=label, url=url, count=count)
-        return scores
+            row = [{
+                'val': fsize,
+                'sval': Card.fmt_size(fsize)
+            }]
+            self.info(f'{row[0]["sval"]} downloads...')
+            url = f'https://{self.env.domain1}:{self.server_port}/score{row[0]["sval"]}.data'
+
+            row.append(self.dl_single(url=url, nsamples=nsamples))
+            if count > 1:
+                row.append(self.dl_serial(url=url, count=count, nsamples=nsamples))
+                row.append(self.dl_parallel(url=url, count=count, nsamples=nsamples))
+            rows.append(row)
+            self.info('done.\n')
+        return {
+            'meta': {
+                'title': f'Downloads from {meta["server"]}',
+                'count': count,
+                'max-parallel': max_parallel,
+            },
+            'cols': cols,
+            'rows': rows,
+        }
 
     def _check_uploads(self, r: ExecResult, count: int):
         error = ''
@@ -259,42 +381,32 @@ class ScoreCard:
             error += f'[{f["response_code"]}]'
         return error if len(error) > 0 else None
 
-    def upload_single(self, url: str, fpath: str, count: int):
-        sample_size = count
-        count = 1
+    def ul_single(self, url: str, fpath: str, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
         self.info('single...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_put(urls=[url], fdata=fpath, alpn_proto=self.protocol,
                               with_headers=False, with_profile=True)
-            err = self._check_uploads(r, count)
+            err = self._check_uploads(r, 1)
             if err:
                 errors.append(err)
             else:
                 total_size = sum([s['size_upload'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': 1,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles) if len(profiles) else {},
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def upload_serial(self, url: str, fpath: str, count: int):
-        sample_size = 1
+    def ul_serial(self, url: str, fpath: str, count: int, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
         url = f'{url}?id=[0-{count - 1}]'
         self.info('serial...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_put(urls=[url], fdata=fpath, alpn_proto=self.protocol,
@@ -306,31 +418,23 @@ class ScoreCard:
                 total_size = sum([s['size_upload'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': 1,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles) if len(profiles) else {},
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def upload_parallel(self, url: str, fpath: str, count: int):
-        sample_size = 1
+    def ul_parallel(self, url: str, fpath: str, count: int, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
-        max_parallel = count
+        max_parallel = self._download_parallel if self._download_parallel > 0 else count
         url = f'{url}?id=[0-{count - 1}]'
         self.info('parallel...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_put(urls=[url], fdata=fpath, alpn_proto=self.protocol,
                               with_headers=False, with_profile=True,
                               extra_args=[
                                    '--parallel',
-                                    '--parallel-max', str(max_parallel)
+                                   '--parallel-max', str(max_parallel)
                               ])
             err = self._check_uploads(r, count)
             if err:
@@ -339,43 +443,44 @@ class ScoreCard:
                 total_size = sum([s['size_upload'] for s in r.stats])
                 samples.append(total_size / r.duration.total_seconds())
                 profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'max-parallel': max_parallel,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles) if len(profiles) else {},
-        }
+        return Card.mk_mbs_cell(samples, profiles, errors)
 
-    def upload_url(self, label: str, url: str, fpath: str, count: int):
-        self.info(f'  {count}x{label}: ')
-        props = {
-            'single': self.upload_single(url=url, fpath=fpath, count=10),
-        }
-        if count > 1:
-            props['serial'] = self.upload_serial(url=url, fpath=fpath, count=count)
-            props['parallel'] = self.upload_parallel(url=url, fpath=fpath, count=count)
-        self.info('ok.\n')
-        return props
-
-    def uploads(self, count: int, fsizes: List[int]) -> Dict[str, Any]:
-        scores = {}
+    def uploads(self, count: int, fsizes: List[int], meta: Dict[str, Any]) -> Dict[str, Any]:
+        nsamples = meta['samples']
+        max_parallel = self._download_parallel if self._download_parallel > 0 else count
         url = f'https://{self.env.domain2}:{self.server_port}/curltest/put'
-        fpaths = {}
+        cols = ['size', 'single']
+        if count > 1:
+            cols.append(f'serial({count})')
+            cols.append(f'parallel({count}x{max_parallel})')
+        rows = []
         for fsize in fsizes:
-            label = self.fmt_size(fsize)
-            fname = f'upload{label}.data'
-            fpaths[label] = self._make_docs_file(docs_dir=self.env.gen_dir,
-                                                 fname=fname, fsize=fsize)
-
-        for label, fpath in fpaths.items():
-            scores[label] = self.upload_url(label=label, url=url, fpath=fpath,
-                                            count=count)
-        return scores
-
-    def do_requests(self, url: str, count: int, max_parallel: int = 1):
-        sample_size = 1
+            row = [{
+                'val': fsize,
+                'sval': Card.fmt_size(fsize)
+            }]
+            fname = f'upload{row[0]["sval"]}.data'
+            fpath = self._make_docs_file(docs_dir=self.env.gen_dir,
+                                         fname=fname, fsize=fsize)
+
+            self.info(f'{row[0]["sval"]} uploads...')
+            row.append(self.ul_single(url=url, fpath=fpath, nsamples=nsamples))
+            if count > 1:
+                row.append(self.ul_serial(url=url, fpath=fpath, count=count, nsamples=nsamples))
+                row.append(self.ul_parallel(url=url, fpath=fpath, count=count, nsamples=nsamples))
+            rows.append(row)
+            self.info('done.\n')
+        return {
+            'meta': {
+                'title': f'Uploads to {meta["server"]}',
+                'count': count,
+                'max-parallel': max_parallel,
+            },
+            'cols': cols,
+            'rows': rows,
+        }
+
+    def do_requests(self, url: str, count: int, max_parallel: int = 1, nsamples: int = 1):
         samples = []
         errors = []
         profiles = []
@@ -388,7 +493,7 @@ class ScoreCard:
                '--parallel', '--parallel-max', str(max_parallel)
             ])
         self.info(f'{max_parallel}...')
-        for _ in range(sample_size):
+        for _ in range(nsamples):
             curl = CurlClient(env=self.env, silent=self._silent_curl,
                               server_addr=self.server_addr)
             r = curl.http_download(urls=[url], alpn_proto=self.protocol, no_save=True,
@@ -405,28 +510,34 @@ class ScoreCard:
                 if non_200s > 0:
                     errors.append(f'responses != 200: {non_200s}')
             profiles.append(r.profile)
-        return {
-            'count': count,
-            'samples': sample_size,
-            'speed': mean(samples) if len(samples) else -1,
-            'errors': errors,
-            'stats': RunProfile.AverageStats(profiles),
-        }
+        return Card.mk_reqs_cell(samples, profiles, errors)
 
-    def requests_url(self, url: str, count: int):
-        self.info(f'  {url}: ')
-        props = {}
-        # 300 is max in curl, see tool_main.h
-        for m in [1, 6, 25, 50, 100, 300]:
-            props[str(m)] = self.do_requests(url=url, count=count, max_parallel=m)
-        self.info('ok.\n')
-        return props
-
-    def requests(self, req_count) -> Dict[str, Any]:
+    def requests(self, count: int, meta: Dict[str, Any]) -> Dict[str, Any]:
         url = f'https://{self.env.domain1}:{self.server_port}/reqs10.data'
+        fsize = 10*1024
+        cols = ['size', 'total']
+        rows = []
+        cols.extend([f'{mp} max' for mp in [1, 6, 25, 50, 100, 300]])
+        row = [{
+            'val': fsize,
+            'sval': Card.fmt_size(fsize)
+        },{
+            'val': count,
+            'sval': f'{count}',
+        }]
+        self.info('requests, max parallel...')
+        row.extend([self.do_requests(url=url, count=count,
+                                     max_parallel=mp, nsamples=meta["samples"])
+                    for mp in [1, 6, 25, 50, 100, 300]])
+        rows.append(row)
+        self.info('done.\n')
         return {
-            'count': req_count,
-            '10KB': self.requests_url(url=url, count=req_count),
+            'meta': {
+                'title': f'Requests in parallel to {meta["server"]}',
+                'count': count,
+            },
+            'cols': cols,
+            'rows': rows,
         }
 
     def score(self,
@@ -436,297 +547,81 @@ class ScoreCard:
               uploads: Optional[List[int]] = None,
               upload_count: int = 50,
               req_count=5000,
+              nsamples: int = 1,
               requests: bool = True):
         self.info(f"scoring {self.protocol} against {self.server_descr}\n")
-        p = {}
+
+        score = {
+            'meta': {
+                'curl_version': self.env.curl_version(),
+                'curl_V': self.env.curl_fullname(),
+                'curl_features': self.env.curl_features_string(),
+                'os': self.env.curl_os(),
+                'server': self.server_descr,
+                'samples': nsamples,
+                'date': f'{datetime.datetime.now(tz=datetime.timezone.utc).isoformat()}',
+            }
+        }
         if self.protocol == 'h3':
-            p['name'] = 'h3'
+            score['meta']['protocol'] = 'h3'
             if not self.env.have_h3_curl():
                 raise ScoreCardError('curl does not support HTTP/3')
             for lib in ['ngtcp2', 'quiche', 'msh3', 'nghttp3']:
                 if self.env.curl_uses_lib(lib):
-                    p['implementation'] = lib
+                    score['meta']['implementation'] = lib
                     break
         elif self.protocol == 'h2':
-            p['name'] = 'h2'
+            score['meta']['protocol'] = 'h2'
             if not self.env.have_h2_curl():
                 raise ScoreCardError('curl does not support HTTP/2')
             for lib in ['nghttp2']:
                 if self.env.curl_uses_lib(lib):
-                    p['implementation'] = lib
+                    score['meta']['implementation'] = lib
                     break
         elif self.protocol == 'h1' or self.protocol == 'http/1.1':
-            proto = 'http/1.1'
-            p['name'] = proto
-            p['implementation'] = 'native'
+            score['meta']['protocol'] = 'http/1.1'
+            score['meta']['implementation'] = 'native'
         else:
             raise ScoreCardError(f"unknown protocol: {self.protocol}")
 
-        if 'implementation' not in p:
-            raise ScoreCardError(f'did not recognized {p} lib')
-        p['version'] = Env.curl_lib_version(p['implementation'])
+        if 'implementation' not in score['meta']:
+            raise ScoreCardError('did not recognized protocol lib')
+        score['meta']['implementation_version'] = Env.curl_lib_version(score['meta']['implementation'])
 
-        score = {
-            'curl': self.env.curl_fullname(),
-            'os': self.env.curl_os(),
-            'protocol': p,
-            'server': self.server_descr,
-        }
         if handshakes:
             score['handshakes'] = self.handshakes()
         if downloads and len(downloads) > 0:
             score['downloads'] = self.downloads(count=download_count,
-                                                fsizes=downloads)
+                                                fsizes=downloads,
+                                                meta=score['meta'])
         if uploads and len(uploads) > 0:
             score['uploads'] = self.uploads(count=upload_count,
-                                            fsizes=uploads)
+                                            fsizes=uploads,
+                                            meta=score['meta'])
         if requests:
-            score['requests'] = self.requests(req_count=req_count)
-        self.info("\n")
+            score['requests'] = self.requests(count=req_count, meta=score['meta'])
         return score
 
-    def fmt_ms(self, tval):
-        return f'{int(tval*1000)} ms' if tval >= 0 else '--'
 
-    def fmt_size(self, val):
-        if val >= (1024*1024*1024):
-            return f'{val / (1024*1024*1024):0.000f}GB'
-        elif val >= (1024 * 1024):
-            return f'{val / (1024*1024):0.000f}MB'
-        elif val >= 1024:
-            return f'{val / 1024:0.000f}KB'
-        else:
-            return f'{val:0.000f}B'
-
-    def fmt_mbs(self, val):
-        return f'{val/(1024*1024):0.000f} MB/s' if val >= 0 else '--'
-
-    def fmt_reqs(self, val):
-        return f'{val:0.000f} r/s' if val >= 0 else '--'
+def run_score(args, protocol):
+    if protocol not in ['http/1.1', 'h1', 'h2', 'h3']:
+        sys.stderr.write(f'ERROR: protocol "{protocol}" not known to scorecard\n')
+        sys.exit(1)
+    if protocol == 'h1':
+        protocol = 'http/1.1'
 
-    def print_score(self, score):
-        print(f'{score["protocol"]["name"].upper()} in {score["curl"]}')
-        if 'handshakes' in score:
-            print(f'{"Handshakes":<24} {"ipv4":25} {"ipv6":28}')
-            print(f'  {"Host":<17} {"Connect":>12} {"Handshake":>12} '
-                  f'{"Connect":>12} {"Handshake":>12}     {"Errors":<20}')
-            for key, val in score["handshakes"].items():
-                print(f'  {key:<17} {self.fmt_ms(val["ipv4-connect"]):>12} '
-                      f'{self.fmt_ms(val["ipv4-handshake"]):>12} '
-                      f'{self.fmt_ms(val["ipv6-connect"]):>12} '
-                      f'{self.fmt_ms(val["ipv6-handshake"]):>12}     '
-                      f'{"/".join(val["ipv4-errors"] + val["ipv6-errors"]):<20}'
-                      )
-        if 'downloads' in score:
-            # get the key names of all sizes and measurements made
-            sizes = []
-            measures = []
-            m_names = {}
-            mcol_width = 12
-            mcol_sw = 17
-            for sskey, ssval in score['downloads'].items():
-                if isinstance(ssval, str):
-                    continue
-                if sskey not in sizes:
-                    sizes.append(sskey)
-                for mkey, mval in score['downloads'][sskey].items():
-                    if mkey not in measures:
-                        measures.append(mkey)
-                        m_names[mkey] = f'{mkey}({mval["count"]}x{mval["max-parallel"]})'
-            print(f'Downloads from {score["server"]}')
-            print(f'  {"Size":>8}', end='')
-            for m in measures:
-                print(f' {m_names[m]:>{mcol_width}} {"[cpu/rss]":<{mcol_sw}}', end='')
-            print(f' {"Errors":^20}')
-
-            for size in score['downloads']:
-                size_score = score['downloads'][size]
-                print(f'  {size:>8}', end='')
-                errors = []
-                for val in size_score.values():
-                    if 'errors' in val:
-                        errors.extend(val['errors'])
-                for m in measures:
-                    if m in size_score:
-                        print(f' {self.fmt_mbs(size_score[m]["speed"]):>{mcol_width}}', end='')
-                        s = f'[{size_score[m]["stats"]["cpu"]:>.1f}%'\
-                            f'/{self.fmt_size(size_score[m]["stats"]["rss"])}]'
-                        print(f' {s:<{mcol_sw}}', end='')
-                    else:
-                        print(' '*mcol_width, end='')
-                if len(errors):
-                    print(f' {"/".join(errors):<20}')
-                else:
-                    print(f' {"-":^20}')
-
-        if 'uploads' in score:
-            # get the key names of all sizes and measurements made
-            sizes = []
-            measures = []
-            m_names = {}
-            mcol_width = 12
-            mcol_sw = 17
-            for sskey, ssval in score['uploads'].items():
-                if isinstance(ssval, str):
-                    continue
-                if sskey not in sizes:
-                    sizes.append(sskey)
-                for mkey, mval in ssval.items():
-                    if mkey not in measures:
-                        measures.append(mkey)
-                        m_names[mkey] = f'{mkey}({mval["count"]}x{mval["max-parallel"]})'
-
-            print(f'Uploads to {score["server"]}')
-            print(f'  {"Size":>8}', end='')
-            for m in measures:
-                print(f' {m_names[m]:>{mcol_width}} {"[cpu/rss]":<{mcol_sw}}', end='')
-            print(f' {"Errors":^20}')
-
-            for size in sizes:
-                size_score = score['uploads'][size]
-                print(f'  {size:>8}', end='')
-                errors = []
-                for val in size_score.values():
-                    if 'errors' in val:
-                        errors.extend(val['errors'])
-                for m in measures:
-                    if m in size_score:
-                        print(f' {self.fmt_mbs(size_score[m]["speed"]):>{mcol_width}}', end='')
-                        stats = size_score[m]["stats"]
-                        if 'cpu' in stats:
-                            s = f'[{stats["cpu"]:>.1f}%/{self.fmt_size(stats["rss"])}]'
-                        else:
-                            s = '[???/???]'
-                        print(f' {s:<{mcol_sw}}', end='')
-                    else:
-                        print(' '*mcol_width, end='')
-                if len(errors):
-                    print(f' {"/".join(errors):<20}')
-                else:
-                    print(f' {"-":^20}')
-
-        if 'requests' in score:
-            sizes = []
-            measures = []
-            m_names = {}
-            mcol_width = 9
-            mcol_sw = 13
-            for sskey, ssval in score['requests'].items():
-                if isinstance(ssval, (str, int)):
-                    continue
-                if sskey not in sizes:
-                    sizes.append(sskey)
-                for mkey in score['requests'][sskey]:
-                    if mkey not in measures:
-                        measures.append(mkey)
-                        m_names[mkey] = f'{mkey}'
-
-            print('Requests (max parallel) to {score["server"]}')
-            print(f'  {"Size":>6} {"Reqs":>6}', end='')
-            for m in measures:
-                print(f' {m_names[m]:>{mcol_width}} {"[cpu/rss]":<{mcol_sw}}', end='')
-            print(f' {"Errors":^10}')
-
-            for size in sizes:
-                size_score = score['requests'][size]
-                count = score['requests']['count']
-                print(f'  {size:>6} {count:>6}', end='')
-                errors = []
-                for val in size_score.values():
-                    if 'errors' in val:
-                        errors.extend(val['errors'])
-                for m in measures:
-                    if m in size_score:
-                        print(f' {self.fmt_reqs(size_score[m]["speed"]):>{mcol_width}}', end='')
-                        s = f'[{size_score[m]["stats"]["cpu"]:>.1f}%'\
-                            f'/{self.fmt_size(size_score[m]["stats"]["rss"])}]'
-                        print(f' {s:<{mcol_sw}}', end='')
-                    else:
-                        print(' '*mcol_width, end='')
-                if len(errors):
-                    print(f' {"/".join(errors):<10}')
-                else:
-                    print(f' {"-":^10}')
-
-
-def parse_size(s):
-    m = re.match(r'(\d+)(mb|kb|gb)?', s, re.IGNORECASE)
-    if m is None:
-        raise Exception(f'unrecognized size: {s}')
-    size = int(m.group(1))
-    if not m.group(2):
-        pass
-    elif m.group(2).lower() == 'kb':
-        size *= 1024
-    elif m.group(2).lower() == 'mb':
-        size *= 1024 * 1024
-    elif m.group(2).lower() == 'gb':
-        size *= 1024 * 1024 * 1024
-    return size
-
-
-def main():
-    parser = argparse.ArgumentParser(prog='scorecard', description="""
-        Run a range of tests to give a scorecard for an HTTP protocol
-        'h3' or 'h2' implementation in curl.
-        """)
-    parser.add_argument("-v", "--verbose", action='count', default=1,
-                        help="log more output on stderr")
-    parser.add_argument("-j", "--json", action='store_true',
-                        default=False, help="print json instead of text")
-    parser.add_argument("-H", "--handshakes", action='store_true',
-                        default=False, help="evaluate handshakes only")
-    parser.add_argument("-d", "--downloads", action='store_true',
-                        default=False, help="evaluate downloads")
-    parser.add_argument("--download", action='append', type=str,
-                        default=None, help="evaluate download size")
-    parser.add_argument("--download-count", action='store', type=int,
-                        default=50, help="perform that many downloads")
-    parser.add_argument("--download-parallel", action='store', type=int,
-                        default=0, help="perform that many downloads in parallel (default all)")
-    parser.add_argument("-u", "--uploads", action='store_true',
-                        default=False, help="evaluate uploads")
-    parser.add_argument("--upload", action='append', type=str,
-                        default=None, help="evaluate upload size")
-    parser.add_argument("--upload-count", action='store', type=int,
-                        default=50, help="perform that many uploads")
-    parser.add_argument("-r", "--requests", action='store_true',
-                        default=False, help="evaluate requests")
-    parser.add_argument("--request-count", action='store', type=int,
-                        default=5000, help="perform that many requests")
-    parser.add_argument("--httpd", action='store_true', default=False,
-                        help="evaluate httpd server only")
-    parser.add_argument("--caddy", action='store_true', default=False,
-                        help="evaluate caddy server only")
-    parser.add_argument("--curl-verbose", action='store_true',
-                        default=False, help="run curl with `-v`")
-    parser.add_argument("protocol", default='h2', nargs='?',
-                        help="Name of protocol to score")
-    parser.add_argument("--start-only", action='store_true', default=False,
-                        help="only start the servers")
-    parser.add_argument("--remote", action='store', type=str,
-                        default=None, help="score against the remote server at <ip>:<port>")
-    args = parser.parse_args()
-
-    if args.verbose > 0:
-        console = logging.StreamHandler()
-        console.setLevel(logging.INFO)
-        console.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
-        logging.getLogger('').addHandler(console)
-
-    protocol = args.protocol
     handshakes = True
     downloads = [1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024]
-    if args.download is not None:
+    if args.download_sizes is not None:
         downloads = []
-        for x in args.download:
-            downloads.extend([parse_size(s) for s in x.split(',')])
+        for x in args.download_sizes:
+            downloads.extend([Card.parse_size(s) for s in x.split(',')])
 
     uploads = [1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024]
-    if args.upload is not None:
+    if args.upload_sizes is not None:
         uploads = []
-        for x in args.upload:
-            uploads.extend([parse_size(s) for s in x.split(',')])
+        for x in args.upload_sizes:
+            uploads.extend([Card.parse_size(s) for s in x.split(',')])
 
     requests = True
     if args.downloads or args.uploads or args.requests or args.handshakes:
@@ -738,7 +633,7 @@ def main():
         requests = args.requests
 
     test_httpd = protocol != 'h3'
-    test_caddy = True
+    test_caddy = protocol == 'h3'
     if args.caddy or args.httpd:
         test_caddy = args.caddy
         test_httpd = args.httpd
@@ -761,13 +656,14 @@ def main():
             test_caddy = False
             remote_addr = m.group(1)
             remote_port = int(m.group(2))
-            card = ScoreCard(env=env,
-                             protocol=protocol,
-                             server_descr=f'Server at {args.remote}',
-                             server_addr=remote_addr,
-                             server_port=remote_port,
-                             verbose=args.verbose, curl_verbose=args.curl_verbose,
-                             download_parallel=args.download_parallel)
+            card = ScoreRunner(env=env,
+                               protocol=protocol,
+                               server_descr=f'Server at {args.remote}',
+                               server_addr=remote_addr,
+                               server_port=remote_port,
+                               verbose=args.verbose,
+                               curl_verbose=args.curl_verbose,
+                               download_parallel=args.download_parallel)
             cards.append(card)
 
         if test_httpd:
@@ -781,24 +677,24 @@ def main():
                 nghttpx = NghttpxQuic(env=env)
                 nghttpx.clear_logs()
                 assert nghttpx.initial_start()
-                server_descr = f'nghttpx: https:{env.h3_port} [backend httpd: {env.httpd_version()}, https:{env.https_port}]'
+                server_descr = f'nghttpx: https:{env.h3_port} [backend httpd/{env.httpd_version()}]'
                 server_port = env.h3_port
             else:
-                server_descr = f'httpd: {env.httpd_version()}, http:{env.http_port} https:{env.https_port}'
+                server_descr = f'httpd/{env.httpd_version()}'
                 server_port = env.https_port
-            card = ScoreCard(env=env,
-                             protocol=protocol,
-                             server_descr=server_descr,
-                             server_port=server_port,
-                             verbose=args.verbose, curl_verbose=args.curl_verbose,
-                             download_parallel=args.download_parallel)
+            card = ScoreRunner(env=env,
+                               protocol=protocol,
+                               server_descr=server_descr,
+                               server_port=server_port,
+                               verbose=args.verbose, curl_verbose=args.curl_verbose,
+                               download_parallel=args.download_parallel)
             card.setup_resources(server_docs, downloads)
             cards.append(card)
 
         if test_caddy and env.caddy:
             backend = ''
             if uploads and httpd is None:
-                backend = f' [backend httpd: {env.httpd_version()}, http:{env.http_port} https:{env.https_port}]'
+                backend = f' [backend httpd: {env.httpd_version()}]'
                 httpd = Httpd(env=env)
                 assert httpd.exists(), \
                     f'httpd not found: {env.httpd}'
@@ -807,15 +703,15 @@ def main():
             caddy = Caddy(env=env)
             caddy.clear_logs()
             assert caddy.initial_start()
-            server_descr = f'Caddy: {env.caddy_version()}, http:{env.caddy_http_port} https:{env.caddy_https_port}{backend}'
+            server_descr = f'Caddy/{env.caddy_version()} {backend}'
             server_port = caddy.port
             server_docs = caddy.docs_dir
-            card = ScoreCard(env=env,
-                             protocol=protocol,
-                             server_descr=server_descr,
-                             server_port=server_port,
-                             verbose=args.verbose, curl_verbose=args.curl_verbose,
-                             download_parallel=args.download_parallel)
+            card = ScoreRunner(env=env,
+                               protocol=protocol,
+                               server_descr=server_descr,
+                               server_port=server_port,
+                               verbose=args.verbose, curl_verbose=args.curl_verbose,
+                               download_parallel=args.download_parallel)
             card.setup_resources(server_docs, downloads)
             cards.append(card)
 
@@ -834,11 +730,12 @@ def main():
                                    uploads=uploads,
                                    upload_count=args.upload_count,
                                    req_count=args.request_count,
-                                   requests=requests)
+                                   requests=requests,
+                                   nsamples=args.samples)
                 if args.json:
                     print(json.JSONEncoder(indent=2).encode(score))
                 else:
-                    card.print_score(score)
+                    Card.print_score(score)
 
     except ScoreCardError as ex:
         sys.stderr.write(f"ERROR: {ex}\n")
@@ -853,6 +750,86 @@ def main():
             nghttpx.stop(wait_dead=False)
         if httpd:
             httpd.stop()
+    return rv
+
+
+def print_file(filename):
+    if not os.path.exists(filename):
+        sys.stderr.write(f"ERROR: file does not exist {filename}\n")
+        return 1
+    with open(filename) as file:
+        data = json.load(file)
+    Card.print_score(data)
+    return 0
+
+
+def main():
+    parser = argparse.ArgumentParser(prog='scorecard', description="""
+        Run a range of tests to give a scorecard for a HTTP protocol
+        'h3' or 'h2' implementation in curl.
+        """)
+    parser.add_argument("-v", "--verbose", action='count', default=1,
+                        help="log more output on stderr")
+    parser.add_argument("-j", "--json", action='store_true',
+                        default=False, help="print json instead of text")
+    parser.add_argument("-H", "--handshakes", action='store_true',
+                        default=False, help="evaluate handshakes only")
+    parser.add_argument("-d", "--downloads", action='store_true',
+                        default=False, help="evaluate downloads")
+    parser.add_argument("--download-sizes", action='append', type=str,
+                        metavar='numberlist',
+                        default=None, help="evaluate download size")
+    parser.add_argument("--download-count", action='store', type=int,
+                        metavar='number',
+                        default=50, help="perform that many downloads")
+    parser.add_argument("--samples", action='store', type=int, metavar='number',
+                        default=1, help="how many sample runs to make")
+    parser.add_argument("--download-parallel", action='store', type=int,
+                        metavar='number', default=0,
+                        help="perform that many downloads in parallel (default all)")
+    parser.add_argument("-u", "--uploads", action='store_true',
+                        default=False, help="evaluate uploads")
+    parser.add_argument("--upload-sizes", action='append', type=str,
+                        metavar='numberlist',
+                        default=None, help="evaluate upload size")
+    parser.add_argument("--upload-count", action='store', type=int,
+                        metavar='number', default=50,
+                        help="perform that many uploads")
+    parser.add_argument("-r", "--requests", action='store_true',
+                        default=False, help="evaluate requests")
+    parser.add_argument("--request-count", action='store', type=int,
+                        metavar='number',
+                        default=5000, help="perform that many requests")
+    parser.add_argument("--httpd", action='store_true', default=False,
+                        help="evaluate httpd server only")
+    parser.add_argument("--caddy", action='store_true', default=False,
+                        help="evaluate caddy server only")
+    parser.add_argument("--curl-verbose", action='store_true',
+                        default=False, help="run curl with `-v`")
+    parser.add_argument("--print", type=str, default=None, metavar='filename',
+                        help="print the results from a JSON file")
+    parser.add_argument("protocol", default=None, nargs='?',
+                        help="Name of protocol to score")
+    parser.add_argument("--start-only", action='store_true', default=False,
+                        help="only start the servers")
+    parser.add_argument("--remote", action='store', type=str,
+                        default=None, help="score against the remote server at <ip>:<port>")
+    args = parser.parse_args()
+
+    if args.verbose > 0:
+        console = logging.StreamHandler()
+        console.setLevel(logging.INFO)
+        console.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
+        logging.getLogger('').addHandler(console)
+
+    if args.print:
+        rv = print_file(args.print)
+    elif not args.protocol:
+        parser.print_usage()
+        rv = 1
+    else:
+        rv = run_score(args, args.protocol)
+
     sys.exit(rv)
 
 
index c6f38b3416a4e52ece1f7ec4b950d210ea84eca9..ad56ce13c86bf9c74306ed92f32b0410f38d82fd 100644 (file)
@@ -178,6 +178,9 @@ class Caddy:
                 '{',
                 f'  http_port {self._http_port}',
                 f'  https_port {self._https_port}',
+                '  log default {',
+                '     level ERROR',
+                '}',
                 f'  servers :{self._https_port} {{',
                 '    protocols h3 h2 h1',
                 '  }',
index aa66eab296c6b4454e99c65a634d23729a7469a3..1bf117244f9723289b4902e6723d408f84e79816 100644 (file)
@@ -53,8 +53,16 @@ def init_config_from(conf_path):
 
 
 TESTS_HTTPD_PATH = os.path.dirname(os.path.dirname(__file__))
+PROJ_PATH = os.path.dirname(os.path.dirname(TESTS_HTTPD_PATH))
 TOP_PATH = os.path.join(os.getcwd(), os.path.pardir)
-DEF_CONFIG = init_config_from(os.path.join(TOP_PATH, 'tests', 'http', 'config.ini'))
+CONFIG_PATH = os.path.join(TOP_PATH, 'tests', 'http', 'config.ini')
+if not os.path.exists(CONFIG_PATH):
+    ALT_CONFIG_PATH = os.path.join(PROJ_PATH, 'tests', 'http', 'config.ini')
+    if not os.path.exists(ALT_CONFIG_PATH):
+        raise Exception(f'unable to find config.ini in {CONFIG_PATH} nor {ALT_CONFIG_PATH}')
+    TOP_PATH = PROJ_PATH
+    CONFIG_PATH = ALT_CONFIG_PATH
+DEF_CONFIG = init_config_from(CONFIG_PATH)
 CURL = os.path.join(TOP_PATH, 'src', 'curl')
 
 
index 8a508f6c6747dfc606a4f226e69a99ce035fe97a..e9a712117a0340808abc75625c799fe20eab8db5 100644 (file)
@@ -242,7 +242,7 @@ class NghttpxQuic(Nghttpx):
             '--frontend-quic-early-data',
             f'--backend=127.0.0.1,{self.env.https_port};{self._domain};sni={self._domain};proto=h2;tls',
             f'--backend=127.0.0.1,{self.env.http_port}',
-            '--log-level=INFO',
+            '--log-level=ERROR',
             f'--pid-file={self._pid_file}',
             f'--errorlog-file={self._error_log}',
             f'--conf={self._conf_file}',
@@ -296,7 +296,7 @@ class NghttpxFwd(Nghttpx):
             '--http2-proxy',
             f'--frontend=*,{self._port}',
             f'--backend=127.0.0.1,{self.env.proxy_port}',
-            '--log-level=INFO',
+            '--log-level=ERROR',
             f'--pid-file={self._pid_file}',
             f'--errorlog-file={self._error_log}',
             f'--conf={self._conf_file}',