]> git.ipfire.org Git - thirdparty/curl.git/commitdiff
lib: timer stats improvements
authorStefan Eissing <stefan@eissing.org>
Tue, 25 Nov 2025 09:00:23 +0000 (10:00 +0100)
committerDaniel Stenberg <daniel@haxx.se>
Tue, 25 Nov 2025 15:18:59 +0000 (16:18 +0100)
* move the TIMER_POSTQUEUE to the time a connection is chosen,
  so that TIMER_NAMELOOKUP always happens afterwards
* client writer: do not trigger TIMER_STARTTRANSFER on CLIENTWRITE_INFO
  as ftp and other pingpong protocols write that before starting anything
  that is the tranfer itself
* Elimnating debug trancing of "closed stream/connection - bailing"
  as confusing, as connection is not really closed on most cases.
* Setting 'data->req.upload_done` correctly, so that no "abort upload"
  is happening at the end of a perfectly fine download.
* Adding test cases with up-/download of 0-length files.
* pytest: add a "timeline" of timer value checks to Resulst in curl.py,
  so that this can be used in several test cases, replacing the local
  stuff in test_16
* add timeline checks to ftp test cases

Closes #19269

14 files changed:
lib/cfilters.c
lib/multi.c
lib/request.c
lib/sendf.c
lib/transfer.c
lib/url.c
lib/vquic/curl_osslq.c
tests/http/test_02_download.py
tests/http/test_16_info.py
tests/http/test_30_vsftpd.py
tests/http/test_31_vsftpds.py
tests/http/test_32_ftps_vsftpd.py
tests/http/testenv/curl.py
tests/libtest/lib500.c

index 507512f8aa461906dab4b405fe766dda481c1498..f0da4778a685ee64239d0fe35b8790c5703f5c59 100644 (file)
@@ -63,8 +63,8 @@ CURLcode Curl_cf_def_shutdown(struct Curl_cfilter *cf,
   return CURLE_OK;
 }
 
-static void conn_report_connect_stats(struct Curl_easy *data,
-                                      struct connectdata *conn);
+static void conn_report_connect_stats(struct Curl_cfilter *cf,
+                                      struct Curl_easy *data);
 
 CURLcode Curl_cf_def_adjust_pollset(struct Curl_cfilter *cf,
                                     struct Curl_easy *data,
@@ -508,7 +508,7 @@ CURLcode Curl_conn_connect(struct Curl_easy *data,
        * persist information at the connection. E.g. cf-socket sets the
        * socket and ip related information. */
       cf_cntrl_update_info(data, data->conn);
-      conn_report_connect_stats(data, data->conn);
+      conn_report_connect_stats(cf, data);
       data->conn->keepalive = curlx_now();
 #ifndef CURL_DISABLE_VERBOSE_STRINGS
       result = cf_verboseconnect(data, cf);
@@ -518,7 +518,7 @@ CURLcode Curl_conn_connect(struct Curl_easy *data,
     else if(result) {
       CURL_TRC_CF(data, cf, "Curl_conn_connect(), filter returned %d",
                   result);
-      conn_report_connect_stats(data, data->conn);
+      conn_report_connect_stats(cf, data);
       goto out;
     }
 
@@ -1008,10 +1008,9 @@ static void cf_cntrl_update_info(struct Curl_easy *data,
 /**
  * Update connection statistics
  */
-static void conn_report_connect_stats(struct Curl_easy *data,
-                                      struct connectdata *conn)
+static void conn_report_connect_stats(struct Curl_cfilter *cf,
+                                      struct Curl_easy *data)
 {
-  struct Curl_cfilter *cf = conn->cfilter[FIRSTSOCKET];
   if(cf) {
     struct curltime connected;
     struct curltime appconnected;
index 00af3ca2bddfc1893f24bce8404bd660ee442fd1..db04d64c231b4f201911c025e495327c6438dc01 100644 (file)
@@ -2337,7 +2337,7 @@ static CURLMcode state_connect(struct Curl_multi *multi,
     process_pending_handles(data->multi);
 
   if(!result) {
-    *nowp = Curl_pgrsTime(data, TIMER_POSTQUEUE);
+    *nowp = curlx_now();
     if(async)
       /* We are now waiting for an asynchronous name lookup */
       multistate(data, MSTATE_RESOLVING);
index 5bfcdfbfb8c7ba23cad4577e0fffc1e7ae2fc059..324c4c48f96d545ba7f5de160744e2266b3d854f 100644 (file)
@@ -393,6 +393,11 @@ CURLcode Curl_req_send(struct Curl_easy *data, struct dynbuf *req,
       return result;
     buf += nwritten;
     blen -= nwritten;
+    if(!blen) {
+      result = req_set_upload_done(data);
+      if(result)
+        return result;
+    }
   }
 
   if(blen) {
@@ -469,6 +474,9 @@ CURLcode Curl_req_stop_send_recv(struct Curl_easy *data)
   /* stop receiving and ALL sending as well, including PAUSE and HOLD.
    * We might still be paused on receive client writes though, so
    * keep those bits around. */
+  CURLcode result = CURLE_OK;
+  if(data->req.keepon & KEEP_SEND)
+    result = Curl_req_abort_sending(data);
   data->req.keepon &= ~(KEEP_RECV|KEEP_SEND);
-  return Curl_req_abort_sending(data);
+  return result;
 }
index 4eabc14402da584125a24b958f6e79a1ec9ccdd8..6731cd874b6b08872a7247c88e491bc368204038 100644 (file)
@@ -230,7 +230,8 @@ static CURLcode cw_download_write(struct Curl_easy *data,
   size_t nwrite, excess_len = 0;
   bool is_connect = !!(type & CLIENTWRITE_CONNECT);
 
-  if(!is_connect && !ctx->started_response) {
+  if(!ctx->started_response &&
+     !(type & (CLIENTWRITE_INFO|CLIENTWRITE_CONNECT))) {
     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
     Curl_rlimit_start(&data->progress.dl.rlimit, curlx_now());
     ctx->started_response = TRUE;
index 04a013361ebd45ac54bf5faef9d093acdff5baea..f2df2cef8cbaa3fa85c28ba00abff5d6e934a26d 100644 (file)
@@ -301,18 +301,7 @@ static CURLcode sendrecv_dl(struct Curl_easy *data,
     /* We only get a 0-length receive at the end of the response */
     is_eos = (blen == 0);
 
-    if(!blen && (conn->recv[FIRSTSOCKET] == Curl_cf_recv)) {
-      /* if we receive 0 or less here and the protocol handler did not
-         replace the connection's `recv` callback, either the data transfer
-         is done or the server closed the connection and
-         we bail out from this!
-         With a `recv` replacement, we assume the protocol handler knows
-         what it is doing and a 0-length receive is fine. For example,
-         SFTP downloads of an empty file would show this. See #19165. */
-      if(is_multiplex)
-        DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
-      else
-        DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
+    if(!blen) {
       result = Curl_req_stop_send_recv(data);
       if(result)
         goto out;
index 1d4c6b1fc419360a64c82182bf87fae122ce518f..d953734dce9bc19e0190664113505613d81d48fb 100644 (file)
--- a/lib/url.c
+++ b/lib/url.c
@@ -3834,6 +3834,7 @@ CURLcode Curl_connect(struct Curl_easy *data,
 
   if(!result) {
     DEBUGASSERT(conn);
+    Curl_pgrsTime(data, TIMER_POSTQUEUE);
     if(reused) {
       if(CONN_ATTACHED(conn) > 1)
         /* multiplexed */
index f447e8dddf0f59cdae0b73ddb20ee2e98115d03b..b890a8d9516dc2873b9d14bbdd122b04eef7693a 100644 (file)
@@ -1803,6 +1803,14 @@ static CURLcode cf_osslq_connect(struct Curl_cfilter *cf,
 
   if(err == 1) {
     /* connected */
+    if(!ctx->got_first_byte) {
+      /* if not recorded yet, take the timestamp before we called
+       * SSL_do_handshake() as the time we received the first packet. */
+      ctx->got_first_byte = TRUE;
+      ctx->first_byte_at = now;
+    }
+    /* Record the handshake complete with a new time stamp. */
+    now = curlx_now();
     ctx->handshake_at = now;
     ctx->q.last_io = now;
     CURL_TRC_CF(data, cf, "handshake complete after %" FMT_TIMEDIFF_T "ms",
index 8cfd68b20463e6e504970f7ea064aa7a24166046..dcfe9690bcb47366f33d67f21161c069db6fcc49 100644 (file)
@@ -43,6 +43,7 @@ class TestDownload:
     @pytest.fixture(autouse=True, scope='class')
     def _class_scope(self, env, httpd):
         indir = httpd.docs_dir
+        env.make_data_file(indir=indir, fname="data-0k", fsize=0)
         env.make_data_file(indir=indir, fname="data-10k", fsize=10*1024)
         env.make_data_file(indir=indir, fname="data-100k", fsize=100*1024)
         env.make_data_file(indir=indir, fname="data-1m", fsize=1024*1024)
@@ -52,9 +53,10 @@ class TestDownload:
 
     # download 1 file
     @pytest.mark.parametrize("proto", Env.http_protos())
-    def test_02_01_download_1(self, env: Env, httpd, nghttpx, proto):
+    @pytest.mark.parametrize("docname", ['data.json', 'data-0k', 'data-10k', 'data-100k'])
+    def test_02_01_download_1(self, env: Env, httpd, nghttpx, proto, docname):
         curl = CurlClient(env=env)
-        url = f'https://{env.authority_for(env.domain1, proto)}/data.json'
+        url = f'https://{env.authority_for(env.domain1, proto)}/{docname}'
         r = curl.http_download(urls=[url], alpn_proto=proto)
         r.check_response(http_status=200)
 
index 4e0d1ed5b1a9160c9149c39df3dcfcc940aecb23..80984a284ee1f171cd7cd38e5650ff2fbec3c8e6 100644 (file)
@@ -56,6 +56,7 @@ class TestInfo:
                       remote_ip='127.0.0.1')
         for idx, s in enumerate(r.stats):
             self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+            r.check_stats_timeline(idx)
 
     # download plain file with a 302 redirect
     @pytest.mark.parametrize("proto", Env.http_protos())
@@ -71,6 +72,7 @@ class TestInfo:
                       remote_ip='127.0.0.1')
         for idx, s in enumerate(r.stats):
             self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+            r.check_stats_timeline(idx)
 
     @pytest.mark.parametrize("proto", Env.http_protos())
     def test_16_03_info_upload(self, env: Env, httpd, nghttpx, proto):
@@ -89,6 +91,7 @@ class TestInfo:
                       remote_ip='127.0.0.1')
         for idx, s in enumerate(r.stats):
             self.check_stat(idx, s, r, dl_size=fsize, ul_size=fsize)
+            r.check_stats_timeline(idx)
 
     # download plain file via http: ('time_appconnect' is 0)
     @pytest.mark.parametrize("proto", ['http/1.1'])
@@ -101,9 +104,9 @@ class TestInfo:
                       remote_port=env.http_port, remote_ip='127.0.0.1')
         for idx, s in enumerate(r.stats):
             self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+            r.check_stats_timeline(idx)
 
     def check_stat(self, idx, s, r, dl_size=None, ul_size=None):
-        self.check_stat_times(s, idx)
         # we always send something
         self.check_stat_positive(s, idx, 'size_request')
         # we always receive response headers
@@ -118,54 +121,3 @@ class TestInfo:
     def check_stat_positive(self, s, idx, key):
         assert key in s, f'stat #{idx} "{key}" missing: {s}'
         assert s[key] > 0, f'stat #{idx} "{key}" not positive: {s}'
-
-    def check_stat_positive_or_0(self, s, idx, key):
-        assert key in s, f'stat #{idx} "{key}" missing: {s}'
-        assert s[key] >= 0, f'stat #{idx} "{key}" not positive: {s}'
-
-    def check_stat_zero(self, s, key):
-        assert key in s, f'stat "{key}" missing: {s}'
-        assert s[key] == 0, f'stat "{key}" not zero: {s}'
-
-    def check_stat_times(self, s, idx):
-        # check timings reported on a transfer for consistency
-        url = s['url_effective']
-        # connect time is sometimes reported as 0 by openssl-quic (sigh)
-        self.check_stat_positive_or_0(s, idx, 'time_connect')
-        # all stat keys which reporting timings
-        all_keys = {
-            'time_appconnect', 'time_redirect',
-            'time_pretransfer', 'time_starttransfer', 'time_total'
-        }
-        # stat keys where we expect a positive value
-        pos_keys = {'time_pretransfer', 'time_starttransfer', 'time_total', 'time_queue'}
-        if s['num_connects'] > 0:
-            if url.startswith('https:'):
-                pos_keys.add('time_appconnect')
-        if s['num_redirects'] > 0:
-            pos_keys.add('time_redirect')
-        zero_keys = all_keys - pos_keys
-        # assert all zeros are zeros and the others are positive
-        for key in zero_keys:
-            self.check_stat_zero(s, key)
-        for key in pos_keys:
-            self.check_stat_positive(s, idx, key)
-        # assert that all timers before "time_pretransfer" are less or equal
-        for key in ['time_appconnect', 'time_connect', 'time_namelookup']:
-            assert s[key] < s['time_pretransfer'], f'time "{key}" larger than' \
-                f'"time_pretransfer": {s}'
-        # assert transfer total is after pretransfer.
-        # (in MOST situations, pretransfer is before starttransfer, BUT
-        # in protocols like HTTP we might get a server response already before
-        # we transition to multi state DID.)
-        assert s['time_pretransfer'] <= s['time_total'], f'"time_pretransfer" '\
-            f'greater than "time_total", {s}'
-        # assert that transfer start is before total
-        assert s['time_starttransfer'] <= s['time_total'], f'"time_starttransfer" '\
-            f'greater than "time_total", {s}'
-        if s['num_redirects'] > 0:
-            assert s['time_queue'] < s['time_starttransfer'], f'"time_queue" '\
-                f'greater/equal than "time_starttransfer", {s}'
-        else:
-            assert s['time_queue'] <= s['time_starttransfer'], f'"time_queue" '\
-                f'greater than "time_starttransfer", {s}'
index bffeae35cf7b318c69b7b6c92c97f3a5e2028509..7d113d5363fe0fc549e9247543837ffc9942d3b7 100644 (file)
@@ -63,10 +63,12 @@ class TestVsFTPD:
             shutil.rmtree(vsftpd.docs_dir)
         if not os.path.exists(vsftpd.docs_dir):
             os.makedirs(vsftpd.docs_dir)
+        self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-0k', fsize=0)
         self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-1k', fsize=1024)
         self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-10k', fsize=10*1024)
         self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-1m', fsize=1024*1024)
         self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-10m', fsize=10*1024*1024)
+        env.make_data_file(indir=env.gen_dir, fname="upload-0k", fsize=0)
         env.make_data_file(indir=env.gen_dir, fname="upload-1k", fsize=1024)
         env.make_data_file(indir=env.gen_dir, fname="upload-100k", fsize=100*1024)
         env.make_data_file(indir=env.gen_dir, fname="upload-1m", fsize=1024*1024)
@@ -77,11 +79,12 @@ class TestVsFTPD:
         r = curl.ftp_get(urls=[url], with_stats=True)
         r.check_stats(count=1, http_status=226)
         lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
-        assert len(lines) == 4, f'list: {lines}'
+        assert len(lines) == 5, f'list: {lines}'
+        r.check_stats_timelines()
 
     # download 1 file, no SSL
     @pytest.mark.parametrize("docname", [
-        'data-1k', 'data-1m', 'data-10m'
+        'data-0k', 'data-1k', 'data-1m', 'data-10m'
     ])
     def test_30_02_download_1(self, env: Env, vsftpd: VsFTPD, docname):
         curl = CurlClient(env=env)
@@ -91,9 +94,10 @@ class TestVsFTPD:
         r = curl.ftp_get(urls=[url], with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
-        'data-1k', 'data-1m', 'data-10m'
+        'data-0k', 'data-1k', 'data-1m', 'data-10m'
     ])
     def test_30_03_download_10_serial(self, env: Env, vsftpd: VsFTPD, docname):
         curl = CurlClient(env=env)
@@ -104,9 +108,10 @@ class TestVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects == count + 1, 'should reuse the control conn'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
-        'data-1k', 'data-1m', 'data-10m'
+        'data-0k', 'data-1k', 'data-1m', 'data-10m'
     ])
     def test_30_04_download_10_parallel(self, env: Env, vsftpd: VsFTPD, docname):
         curl = CurlClient(env=env)
@@ -119,9 +124,10 @@ class TestVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects > count + 1, 'should have used several control conns'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
-        'upload-1k', 'upload-100k', 'upload-1m'
+        'upload-0k', 'upload-1k', 'upload-100k', 'upload-1m'
     ])
     def test_30_05_upload_1(self, env: Env, vsftpd: VsFTPD, docname):
         curl = CurlClient(env=env)
@@ -133,6 +139,7 @@ class TestVsFTPD:
         r = curl.ftp_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_upload(env, vsftpd, docname=docname)
+        r.check_stats_timelines()
 
     def _rmf(self, path):
         if os.path.exists(path):
@@ -188,6 +195,7 @@ class TestVsFTPD:
         ])
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
+        r.check_stats_timelines()
 
     def test_30_09_active_up_file(self, env: Env, vsftpd: VsFTPD):
         docname = 'upload-1k'
@@ -202,6 +210,7 @@ class TestVsFTPD:
         ])
         r.check_stats(count=count, http_status=226)
         self.check_upload(env, vsftpd, docname=docname)
+        r.check_stats_timelines()
 
     def test_30_10_active_up_ascii(self, env: Env, vsftpd: VsFTPD):
         docname = 'upload-1k'
@@ -216,6 +225,7 @@ class TestVsFTPD:
         ])
         r.check_stats(count=count, http_status=226)
         self.check_upload(env, vsftpd, docname=docname, binary=False)
+        r.check_stats_timelines()
 
     def test_30_11_download_non_existing(self, env: Env, vsftpd: VsFTPD):
         curl = CurlClient(env=env)
@@ -223,6 +233,7 @@ class TestVsFTPD:
         r = curl.ftp_get(urls=[url], with_stats=True)
         r.check_exit_code(78)
         r.check_stats(count=1, exitcode=78)
+        r.check_stats_timelines()
 
     def check_downloads(self, client, srcfile: str, count: int,
                         complete: bool = True):
index f7e32cb77b7748cf893b9cab6ffa7ab895b12710..93fef708ccdd63cef5117a7d7bd279d42762c8af 100644 (file)
@@ -85,6 +85,7 @@ class TestVsFTPD:
         r.check_stats(count=1, http_status=226)
         lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
         assert len(lines) == 4, f'list: {lines}'
+        r.check_stats_timelines()
 
     # download 1 file, no SSL
     @pytest.mark.parametrize("docname", [
@@ -98,6 +99,7 @@ class TestVsFTPD:
         r = curl.ftp_ssl_get(urls=[url], with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'data-1k', 'data-1m', 'data-10m'
@@ -111,6 +113,7 @@ class TestVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects == count + 1, 'should reuse the control conn'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'data-1k', 'data-1m', 'data-10m'
@@ -126,6 +129,7 @@ class TestVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects > count + 1, 'should have used several control conns'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'upload-1k', 'upload-100k', 'upload-1m'
@@ -140,6 +144,7 @@ class TestVsFTPD:
         r = curl.ftp_ssl_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_upload(env, vsftpds, docname=docname)
+        r.check_stats_timelines()
 
     def _rmf(self, path):
         if os.path.exists(path):
index 46690a84355eba9ea50df7fac10f73e3f23edf6e..7a849d3740440f149116863284c49fa282898c5a 100644 (file)
@@ -85,6 +85,7 @@ class TestFtpsVsFTPD:
         r.check_stats(count=1, http_status=226)
         lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
         assert len(lines) == 4, f'list: {lines}'
+        r.check_stats_timelines()
 
     # download 1 file, no SSL
     @pytest.mark.parametrize("docname", [
@@ -98,6 +99,7 @@ class TestFtpsVsFTPD:
         r = curl.ftp_get(urls=[url], with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'data-1k', 'data-1m', 'data-10m'
@@ -111,6 +113,7 @@ class TestFtpsVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects == count + 1, 'should reuse the control conn'
+        r.check_stats_timelines()
 
     # 2 serial transfers, first with 'ftps://' and second with 'ftp://'
     # we want connection reuse in this case
@@ -123,6 +126,7 @@ class TestFtpsVsFTPD:
         r = curl.ftp_get(urls=[url1, url2], with_stats=True)
         r.check_stats(count=count, http_status=226)
         assert r.total_connects == count + 1, 'should reuse the control conn'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'data-1k', 'data-1m', 'data-10m'
@@ -138,6 +142,7 @@ class TestFtpsVsFTPD:
         r.check_stats(count=count, http_status=226)
         self.check_downloads(curl, srcfile, count)
         assert r.total_connects > count + 1, 'should have used several control conns'
+        r.check_stats_timelines()
 
     @pytest.mark.parametrize("docname", [
         'upload-1k', 'upload-100k', 'upload-1m'
@@ -152,6 +157,7 @@ class TestFtpsVsFTPD:
         r = curl.ftp_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
         r.check_stats(count=count, http_status=226)
         self.check_upload(env, vsftpds, docname=docname)
+        r.check_stats_timelines()
 
     def _rmf(self, path):
         if os.path.exists(path):
index a92e4f681f345ad6eb5eabbf1c0966fb41ccff54..e486715fa79b446032bec2888277ca7549e19639 100644 (file)
@@ -493,6 +493,80 @@ class ExecResult:
                         f'status #{idx} remote_ip: expected {remote_ip}, '\
                         f'got {x["remote_ip"]}\n{self.dump_stat(x)}'
 
+    def check_stat_positive(self, s, idx, key):
+        assert key in s, f'stat #{idx} "{key}" missing: {s}'
+        assert s[key] > 0, f'stat #{idx} "{key}" not positive: {s}'
+
+    def check_stat_positive_or_0(self, s, idx, key):
+        assert key in s, f'stat #{idx} "{key}" missing: {s}'
+        assert s[key] >= 0, f'stat #{idx} "{key}" not positive: {s}'
+
+    def check_stat_zero(self, s, key):
+        assert key in s, f'stat "{key}" missing: {s}'
+        assert s[key] == 0, f'stat "{key}" not zero: {s}'
+
+    def check_stats_timelines(self):
+        for i in range(len(self._stats)):
+            self.check_stats_timeline(i)
+
+    def check_stats_timeline(self, idx):
+        # check timings reported on a transfer for consistency
+        s = self._stats[idx]
+
+        url = s['url_effective']
+        # connect time is sometimes reported as 0 by openssl-quic (sigh)
+        self.check_stat_positive_or_0(s, idx, 'time_connect')
+        # all stat keys which reporting timings
+        all_keys = {
+            'time_queue', 'time_namelookup',
+            'time_connect', 'time_appconnect',
+            'time_pretransfer', 'time_posttransfer',
+            'time_starttransfer', 'time_total',
+        }
+        # stat keys where we expect a positive value
+        ref_tl = []
+        exact_match = True
+        # redirects mess up the queue time, only count without
+        if s['time_redirect'] == 0:
+            ref_tl += ['time_queue']
+        else:
+            exact_match = False
+        # connect events?
+        if url.startswith('ftp'):
+            # ftp is messy with connect events for its DATA connection
+            exact_match = False
+        elif s['num_connects'] > 0:
+            ref_tl += ['time_namelookup', 'time_connect']
+            if url.startswith('https:'):
+                ref_tl += ['time_appconnect']
+        # what kind of transfer was it?
+        if s['size_upload'] == 0 and s['size_download'] > 0:
+            # this is a download
+            dl_tl = ['time_pretransfer', 'time_starttransfer']
+            if s['size_request'] > 0:
+                dl_tl = ['time_posttransfer'] + dl_tl
+            ref_tl += dl_tl
+        elif s['size_upload'] > 0 and s['size_download'] == 0:
+            # this is an upload
+            ul_tl = ['time_pretransfer', 'time_posttransfer']
+            ref_tl += ul_tl
+        else:
+            # could be a 0-length upload or 0-length download, not sure
+            exact_match = False
+        # always there at the end
+        ref_tl += ['time_total']
+
+        # assert all events in reference timeline are > 0
+        for key in ref_tl:
+            self.check_stat_positive(s, idx, key)
+        if exact_match:
+            # assert all events not in reference timeline are 0
+            for key in [key for key in all_keys if key not in ref_tl]:
+                self.check_stat_zero(s, key)
+        # calculate the timeline that did happen
+        seen_tl = sorted(ref_tl, key=lambda ts: s[ts])
+        assert seen_tl == ref_tl, f'{[f"{ts}: {s[ts]}" for ts in seen_tl]}'
+
     def dump_logs(self):
         lines = ['>>--stdout ----------------------------------------------\n']
         lines.extend(self._stdout)
index ffb974f4f0cb09f0d366221a4d6cff296966ae47..ff886a05ca602889b1a1efc8ca91164dfca4d393 100644 (file)
@@ -128,7 +128,10 @@ static CURLcode test_lib500(const char *URL)
                         (time_pretransfer / 1000000),
                         (long)(time_pretransfer % 1000000));
         }
-        if(time_pretransfer > time_posttransfer) {
+        if(time_posttransfer > time_pretransfer) {
+          /* counter-intuitive: on a GET request, all bytes are sent *before*
+           * PRETRANSFER happens. Thus POSTTRANSFER has to be smaller.
+           * The reverse would be true for a POST/PUT. */
           curl_mfprintf(moo, "pretransfer vs posttransfer: %"
                         CURL_FORMAT_CURL_OFF_T
                         ".%06ld %" CURL_FORMAT_CURL_OFF_T ".%06ld\n",