]> git.ipfire.org Git - thirdparty/tornado.git/commitdiff
httpclient: Improve HTTPResonse time info 2416/head
authorBen Darnell <ben@bendarnell.com>
Sun, 10 Jun 2018 21:08:15 +0000 (17:08 -0400)
committerBen Darnell <ben@bendarnell.com>
Sun, 10 Jun 2018 21:08:15 +0000 (17:08 -0400)
The HTTPResponse.request_time attribute is now consistent between curl
and simple HTTP clients (it excludes queue time in both cases). Added
a start_time attribute which can be used to see when the network
processing started.

Replaces #1032

tornado/curl_httpclient.py
tornado/httpclient.py
tornado/simple_httpclient.py
tornado/test/httpclient_test.py

index 2edbca367d554ca07e88a33aa3c355e46f96fe18..7f5cb1051faab3d4ef841dcc3e61ef243c0cf365 100644 (file)
@@ -80,7 +80,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
         self._multi = None
 
     def fetch_impl(self, request, callback):
-        self._requests.append((request, callback))
+        self._requests.append((request, callback, self.io_loop.time()))
         self._process_queue()
         self._set_timeout(0)
 
@@ -205,13 +205,15 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
                 while self._free_list and self._requests:
                     started += 1
                     curl = self._free_list.pop()
-                    (request, callback) = self._requests.popleft()
+                    (request, callback, queue_start_time) = self._requests.popleft()
                     curl.info = {
                         "headers": httputil.HTTPHeaders(),
                         "buffer": BytesIO(),
                         "request": request,
                         "callback": callback,
+                        "queue_start_time": queue_start_time,
                         "curl_start_time": time.time(),
+                        "curl_start_ioloop_time": self.io_loop.current().time(),
                     }
                     try:
                         self._curl_setup_request(
@@ -257,7 +259,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
         # the various curl timings are documented at
         # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
         time_info = dict(
-            queue=info["curl_start_time"] - info["request"].start_time,
+            queue=info["curl_start_ioloop_time"] - info["queue_start_time"],
             namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
             connect=curl.getinfo(pycurl.CONNECT_TIME),
             appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
@@ -271,7 +273,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
                 request=info["request"], code=code, headers=info["headers"],
                 buffer=buffer, effective_url=effective_url, error=error,
                 reason=info['headers'].get("X-Http-Reason", None),
-                request_time=time.time() - info["curl_start_time"],
+                request_time=self.io_loop.time() - info["curl_start_ioloop_time"],
+                start_time=info["curl_start_time"],
                 time_info=time_info))
         except Exception:
             self.handle_callback_exception(info["callback"])
index 461994a454d3fb622d97f2f91069881458599e83..5ed2ee677785bca3443463ac1b0f1e903296387c 100644 (file)
@@ -578,17 +578,35 @@ class HTTPResponse(object):
 
     * error: Exception object, if any
 
-    * request_time: seconds from request start to finish
+    * request_time: seconds from request start to finish. Includes all network
+      operations from DNS resolution to receiving the last byte of data.
+      Does not include time spent in the queue (due to the ``max_clients`` option).
+      If redirects were followed, only includes the final request.
+
+    * start_time: Time at which the HTTP operation started, based on `time.time`
+      (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request
+      timed out while in the queue.
 
     * time_info: dictionary of diagnostic timing information from the request.
       Available data are subject to change, but currently uses timings
       available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
       plus ``queue``, which is the delay (if any) introduced by waiting for
       a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
+
+    .. versionadded:: 5.1
+
+       Added the ``start_time`` attribute.
+
+    .. versionchanged:: 5.1
+
+       The ``request_time`` attribute previously included time spent in the queue
+       for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
+       is excluded in both implementations. ``request_time`` is now more accurate for
+       ``curl_httpclient`` because it uses a monotonic clock when available.
     """
     def __init__(self, request, code, headers=None, buffer=None,
                  effective_url=None, error=None, request_time=None,
-                 time_info=None, reason=None):
+                 time_info=None, reason=None, start_time=None):
         if isinstance(request, _RequestProxy):
             self.request = request.request
         else:
@@ -615,6 +633,7 @@ class HTTPResponse(object):
                 self.error = None
         else:
             self.error = error
+        self.start_time = start_time
         self.request_time = request_time
         self.time_info = time_info or {}
 
index 35c719362de37df9a9ccb458701ef47d72498066..60b7956fe320166cfda79abb4dc7bb0f013c9bc2 100644 (file)
@@ -20,6 +20,7 @@ import functools
 import re
 import socket
 import sys
+import time
 from io import BytesIO
 
 
@@ -215,6 +216,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
                  max_header_size, max_body_size):
         self.io_loop = IOLoop.current()
         self.start_time = self.io_loop.time()
+        self.start_wall_time = time.time()
         self.client = client
         self.request = request
         self.release_callback = release_callback
@@ -447,6 +449,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
                     value = value.real_error
             self._run_callback(HTTPResponse(self.request, 599, error=value,
                                             request_time=self.io_loop.time() - self.start_time,
+                                            start_time=self.start_wall_time,
                                             ))
 
             if hasattr(self, "stream"):
@@ -539,6 +542,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
                                 self.code, reason=getattr(self, 'reason', None),
                                 headers=self.headers,
                                 request_time=self.io_loop.time() - self.start_time,
+                                start_time=self.start_wall_time,
                                 buffer=buffer,
                                 effective_url=self.request.url)
         self._run_callback(response)
index fb8b12d57f8e5bc22dc9eba9071f04faa332fe7d..35426a7df276b4f9acc7d5fa69ec8f2a80a6af33 100644 (file)
@@ -9,6 +9,7 @@ import sys
 import threading
 import datetime
 from io import BytesIO
+import time
 import unicodedata
 
 from tornado.escape import utf8, native_str
@@ -546,6 +547,22 @@ X-XSS-Protection: 1;
         response.rethrow()
         self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
 
+    def test_response_times(self):
+        # A few simple sanity checks of the response time fields to
+        # make sure they're using the right basis (between the
+        # wall-time and monotonic clocks).
+        start_time = time.time()
+        response = self.fetch("/hello")
+        response.rethrow()
+        self.assertGreaterEqual(response.request_time, 0)
+        self.assertLess(response.request_time, 1.0)
+        # A very crude check to make sure that start_time is based on
+        # wall time and not the monotonic clock.
+        self.assertLess(abs(response.start_time - start_time), 1.0)
+
+        for k, v in response.time_info.items():
+            self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
+
 
 class RequestProxyTest(unittest.TestCase):
     def test_request_set(self):