self._multi = None
def fetch_impl(self, request, callback):
- self._requests.append((request, callback))
+ self._requests.append((request, callback, self.io_loop.time()))
self._process_queue()
self._set_timeout(0)
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
- (request, callback) = self._requests.popleft()
+ (request, callback, queue_start_time) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
+ "queue_start_time": queue_start_time,
"curl_start_time": time.time(),
+ "curl_start_ioloop_time": self.io_loop.current().time(),
}
try:
self._curl_setup_request(
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
- queue=info["curl_start_time"] - info["request"].start_time,
+ queue=info["curl_start_ioloop_time"] - info["queue_start_time"],
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
reason=info['headers'].get("X-Http-Reason", None),
- request_time=time.time() - info["curl_start_time"],
+ request_time=self.io_loop.time() - info["curl_start_ioloop_time"],
+ start_time=info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
* error: Exception object, if any
- * request_time: seconds from request start to finish
+ * request_time: seconds from request start to finish. Includes all network
+ operations from DNS resolution to receiving the last byte of data.
+ Does not include time spent in the queue (due to the ``max_clients`` option).
+ If redirects were followed, only includes the final request.
+
+ * start_time: Time at which the HTTP operation started, based on `time.time`
+ (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request
+ timed out while in the queue.
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
+
+ .. versionadded:: 5.1
+
+ Added the ``start_time`` attribute.
+
+ .. versionchanged:: 5.1
+
+ The ``request_time`` attribute previously included time spent in the queue
+ for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
+ is excluded in both implementations. ``request_time`` is now more accurate for
+ ``curl_httpclient`` because it uses a monotonic clock when available.
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
- time_info=None, reason=None):
+ time_info=None, reason=None, start_time=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.error = None
else:
self.error = error
+ self.start_time = start_time
self.request_time = request_time
self.time_info = time_info or {}
import re
import socket
import sys
+import time
from io import BytesIO
max_header_size, max_body_size):
self.io_loop = IOLoop.current()
self.start_time = self.io_loop.time()
+ self.start_wall_time = time.time()
self.client = client
self.request = request
self.release_callback = release_callback
value = value.real_error
self._run_callback(HTTPResponse(self.request, 599, error=value,
request_time=self.io_loop.time() - self.start_time,
+ start_time=self.start_wall_time,
))
if hasattr(self, "stream"):
self.code, reason=getattr(self, 'reason', None),
headers=self.headers,
request_time=self.io_loop.time() - self.start_time,
+ start_time=self.start_wall_time,
buffer=buffer,
effective_url=self.request.url)
self._run_callback(response)
import threading
import datetime
from io import BytesIO
+import time
import unicodedata
from tornado.escape import utf8, native_str
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
+ def test_response_times(self):
+ # A few simple sanity checks of the response time fields to
+ # make sure they're using the right basis (between the
+ # wall-time and monotonic clocks).
+ start_time = time.time()
+ response = self.fetch("/hello")
+ response.rethrow()
+ self.assertGreaterEqual(response.request_time, 0)
+ self.assertLess(response.request_time, 1.0)
+ # A very crude check to make sure that start_time is based on
+ # wall time and not the monotonic clock.
+ self.assertLess(abs(response.start_time - start_time), 1.0)
+
+ for k, v in response.time_info.items():
+ self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
+
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):