return CURLE_OK;
}
-static void conn_report_connect_stats(struct Curl_easy *data,
- struct connectdata *conn);
+static void conn_report_connect_stats(struct Curl_cfilter *cf,
+ struct Curl_easy *data);
CURLcode Curl_cf_def_adjust_pollset(struct Curl_cfilter *cf,
struct Curl_easy *data,
* persist information at the connection. E.g. cf-socket sets the
* socket and ip related information. */
cf_cntrl_update_info(data, data->conn);
- conn_report_connect_stats(data, data->conn);
+ conn_report_connect_stats(cf, data);
data->conn->keepalive = curlx_now();
#ifndef CURL_DISABLE_VERBOSE_STRINGS
result = cf_verboseconnect(data, cf);
else if(result) {
CURL_TRC_CF(data, cf, "Curl_conn_connect(), filter returned %d",
result);
- conn_report_connect_stats(data, data->conn);
+ conn_report_connect_stats(cf, data);
goto out;
}
/**
* Update connection statistics
*/
-static void conn_report_connect_stats(struct Curl_easy *data,
- struct connectdata *conn)
+static void conn_report_connect_stats(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
- struct Curl_cfilter *cf = conn->cfilter[FIRSTSOCKET];
if(cf) {
struct curltime connected;
struct curltime appconnected;
process_pending_handles(data->multi);
if(!result) {
- *nowp = Curl_pgrsTime(data, TIMER_POSTQUEUE);
+ *nowp = curlx_now();
if(async)
/* We are now waiting for an asynchronous name lookup */
multistate(data, MSTATE_RESOLVING);
return result;
buf += nwritten;
blen -= nwritten;
+ if(!blen) {
+ result = req_set_upload_done(data);
+ if(result)
+ return result;
+ }
}
if(blen) {
/* stop receiving and ALL sending as well, including PAUSE and HOLD.
* We might still be paused on receive client writes though, so
* keep those bits around. */
+ CURLcode result = CURLE_OK;
+ if(data->req.keepon & KEEP_SEND)
+ result = Curl_req_abort_sending(data);
data->req.keepon &= ~(KEEP_RECV|KEEP_SEND);
- return Curl_req_abort_sending(data);
+ return result;
}
size_t nwrite, excess_len = 0;
bool is_connect = !!(type & CLIENTWRITE_CONNECT);
- if(!is_connect && !ctx->started_response) {
+ if(!ctx->started_response &&
+ !(type & (CLIENTWRITE_INFO|CLIENTWRITE_CONNECT))) {
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
Curl_rlimit_start(&data->progress.dl.rlimit, curlx_now());
ctx->started_response = TRUE;
/* We only get a 0-length receive at the end of the response */
is_eos = (blen == 0);
- if(!blen && (conn->recv[FIRSTSOCKET] == Curl_cf_recv)) {
- /* if we receive 0 or less here and the protocol handler did not
- replace the connection's `recv` callback, either the data transfer
- is done or the server closed the connection and
- we bail out from this!
- With a `recv` replacement, we assume the protocol handler knows
- what it is doing and a 0-length receive is fine. For example,
- SFTP downloads of an empty file would show this. See #19165. */
- if(is_multiplex)
- DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
- else
- DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
+ if(!blen) {
result = Curl_req_stop_send_recv(data);
if(result)
goto out;
if(!result) {
DEBUGASSERT(conn);
+ Curl_pgrsTime(data, TIMER_POSTQUEUE);
if(reused) {
if(CONN_ATTACHED(conn) > 1)
/* multiplexed */
if(err == 1) {
/* connected */
+ if(!ctx->got_first_byte) {
+ /* if not recorded yet, take the timestamp before we called
+ * SSL_do_handshake() as the time we received the first packet. */
+ ctx->got_first_byte = TRUE;
+ ctx->first_byte_at = now;
+ }
+ /* Record the handshake complete with a new time stamp. */
+ now = curlx_now();
ctx->handshake_at = now;
ctx->q.last_io = now;
CURL_TRC_CF(data, cf, "handshake complete after %" FMT_TIMEDIFF_T "ms",
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, httpd):
indir = httpd.docs_dir
+ env.make_data_file(indir=indir, fname="data-0k", fsize=0)
env.make_data_file(indir=indir, fname="data-10k", fsize=10*1024)
env.make_data_file(indir=indir, fname="data-100k", fsize=100*1024)
env.make_data_file(indir=indir, fname="data-1m", fsize=1024*1024)
# download 1 file
@pytest.mark.parametrize("proto", Env.http_protos())
- def test_02_01_download_1(self, env: Env, httpd, nghttpx, proto):
+ @pytest.mark.parametrize("docname", ['data.json', 'data-0k', 'data-10k', 'data-100k'])
+ def test_02_01_download_1(self, env: Env, httpd, nghttpx, proto, docname):
curl = CurlClient(env=env)
- url = f'https://{env.authority_for(env.domain1, proto)}/data.json'
+ url = f'https://{env.authority_for(env.domain1, proto)}/{docname}'
r = curl.http_download(urls=[url], alpn_proto=proto)
r.check_response(http_status=200)
remote_ip='127.0.0.1')
for idx, s in enumerate(r.stats):
self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+ r.check_stats_timeline(idx)
# download plain file with a 302 redirect
@pytest.mark.parametrize("proto", Env.http_protos())
remote_ip='127.0.0.1')
for idx, s in enumerate(r.stats):
self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+ r.check_stats_timeline(idx)
@pytest.mark.parametrize("proto", Env.http_protos())
def test_16_03_info_upload(self, env: Env, httpd, nghttpx, proto):
remote_ip='127.0.0.1')
for idx, s in enumerate(r.stats):
self.check_stat(idx, s, r, dl_size=fsize, ul_size=fsize)
+ r.check_stats_timeline(idx)
# download plain file via http: ('time_appconnect' is 0)
@pytest.mark.parametrize("proto", ['http/1.1'])
remote_port=env.http_port, remote_ip='127.0.0.1')
for idx, s in enumerate(r.stats):
self.check_stat(idx, s, r, dl_size=30, ul_size=0)
+ r.check_stats_timeline(idx)
def check_stat(self, idx, s, r, dl_size=None, ul_size=None):
- self.check_stat_times(s, idx)
# we always send something
self.check_stat_positive(s, idx, 'size_request')
# we always receive response headers
def check_stat_positive(self, s, idx, key):
assert key in s, f'stat #{idx} "{key}" missing: {s}'
assert s[key] > 0, f'stat #{idx} "{key}" not positive: {s}'
-
- def check_stat_positive_or_0(self, s, idx, key):
- assert key in s, f'stat #{idx} "{key}" missing: {s}'
- assert s[key] >= 0, f'stat #{idx} "{key}" not positive: {s}'
-
- def check_stat_zero(self, s, key):
- assert key in s, f'stat "{key}" missing: {s}'
- assert s[key] == 0, f'stat "{key}" not zero: {s}'
-
- def check_stat_times(self, s, idx):
- # check timings reported on a transfer for consistency
- url = s['url_effective']
- # connect time is sometimes reported as 0 by openssl-quic (sigh)
- self.check_stat_positive_or_0(s, idx, 'time_connect')
- # all stat keys which reporting timings
- all_keys = {
- 'time_appconnect', 'time_redirect',
- 'time_pretransfer', 'time_starttransfer', 'time_total'
- }
- # stat keys where we expect a positive value
- pos_keys = {'time_pretransfer', 'time_starttransfer', 'time_total', 'time_queue'}
- if s['num_connects'] > 0:
- if url.startswith('https:'):
- pos_keys.add('time_appconnect')
- if s['num_redirects'] > 0:
- pos_keys.add('time_redirect')
- zero_keys = all_keys - pos_keys
- # assert all zeros are zeros and the others are positive
- for key in zero_keys:
- self.check_stat_zero(s, key)
- for key in pos_keys:
- self.check_stat_positive(s, idx, key)
- # assert that all timers before "time_pretransfer" are less or equal
- for key in ['time_appconnect', 'time_connect', 'time_namelookup']:
- assert s[key] < s['time_pretransfer'], f'time "{key}" larger than' \
- f'"time_pretransfer": {s}'
- # assert transfer total is after pretransfer.
- # (in MOST situations, pretransfer is before starttransfer, BUT
- # in protocols like HTTP we might get a server response already before
- # we transition to multi state DID.)
- assert s['time_pretransfer'] <= s['time_total'], f'"time_pretransfer" '\
- f'greater than "time_total", {s}'
- # assert that transfer start is before total
- assert s['time_starttransfer'] <= s['time_total'], f'"time_starttransfer" '\
- f'greater than "time_total", {s}'
- if s['num_redirects'] > 0:
- assert s['time_queue'] < s['time_starttransfer'], f'"time_queue" '\
- f'greater/equal than "time_starttransfer", {s}'
- else:
- assert s['time_queue'] <= s['time_starttransfer'], f'"time_queue" '\
- f'greater than "time_starttransfer", {s}'
shutil.rmtree(vsftpd.docs_dir)
if not os.path.exists(vsftpd.docs_dir):
os.makedirs(vsftpd.docs_dir)
+ self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-0k', fsize=0)
self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-1k', fsize=1024)
self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-10k', fsize=10*1024)
self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-1m', fsize=1024*1024)
self._make_docs_file(docs_dir=vsftpd.docs_dir, fname='data-10m', fsize=10*1024*1024)
+ env.make_data_file(indir=env.gen_dir, fname="upload-0k", fsize=0)
env.make_data_file(indir=env.gen_dir, fname="upload-1k", fsize=1024)
env.make_data_file(indir=env.gen_dir, fname="upload-100k", fsize=100*1024)
env.make_data_file(indir=env.gen_dir, fname="upload-1m", fsize=1024*1024)
r = curl.ftp_get(urls=[url], with_stats=True)
r.check_stats(count=1, http_status=226)
lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
- assert len(lines) == 4, f'list: {lines}'
+ assert len(lines) == 5, f'list: {lines}'
+ r.check_stats_timelines()
# download 1 file, no SSL
@pytest.mark.parametrize("docname", [
- 'data-1k', 'data-1m', 'data-10m'
+ 'data-0k', 'data-1k', 'data-1m', 'data-10m'
])
def test_30_02_download_1(self, env: Env, vsftpd: VsFTPD, docname):
curl = CurlClient(env=env)
r = curl.ftp_get(urls=[url], with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
- 'data-1k', 'data-1m', 'data-10m'
+ 'data-0k', 'data-1k', 'data-1m', 'data-10m'
])
def test_30_03_download_10_serial(self, env: Env, vsftpd: VsFTPD, docname):
curl = CurlClient(env=env)
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects == count + 1, 'should reuse the control conn'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
- 'data-1k', 'data-1m', 'data-10m'
+ 'data-0k', 'data-1k', 'data-1m', 'data-10m'
])
def test_30_04_download_10_parallel(self, env: Env, vsftpd: VsFTPD, docname):
curl = CurlClient(env=env)
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects > count + 1, 'should have used several control conns'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
- 'upload-1k', 'upload-100k', 'upload-1m'
+ 'upload-0k', 'upload-1k', 'upload-100k', 'upload-1m'
])
def test_30_05_upload_1(self, env: Env, vsftpd: VsFTPD, docname):
curl = CurlClient(env=env)
r = curl.ftp_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_upload(env, vsftpd, docname=docname)
+ r.check_stats_timelines()
def _rmf(self, path):
if os.path.exists(path):
])
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
+ r.check_stats_timelines()
def test_30_09_active_up_file(self, env: Env, vsftpd: VsFTPD):
docname = 'upload-1k'
])
r.check_stats(count=count, http_status=226)
self.check_upload(env, vsftpd, docname=docname)
+ r.check_stats_timelines()
def test_30_10_active_up_ascii(self, env: Env, vsftpd: VsFTPD):
docname = 'upload-1k'
])
r.check_stats(count=count, http_status=226)
self.check_upload(env, vsftpd, docname=docname, binary=False)
+ r.check_stats_timelines()
def test_30_11_download_non_existing(self, env: Env, vsftpd: VsFTPD):
curl = CurlClient(env=env)
r = curl.ftp_get(urls=[url], with_stats=True)
r.check_exit_code(78)
r.check_stats(count=1, exitcode=78)
+ r.check_stats_timelines()
def check_downloads(self, client, srcfile: str, count: int,
complete: bool = True):
r.check_stats(count=1, http_status=226)
lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
assert len(lines) == 4, f'list: {lines}'
+ r.check_stats_timelines()
# download 1 file, no SSL
@pytest.mark.parametrize("docname", [
r = curl.ftp_ssl_get(urls=[url], with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'data-1k', 'data-1m', 'data-10m'
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects == count + 1, 'should reuse the control conn'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'data-1k', 'data-1m', 'data-10m'
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects > count + 1, 'should have used several control conns'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'upload-1k', 'upload-100k', 'upload-1m'
r = curl.ftp_ssl_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_upload(env, vsftpds, docname=docname)
+ r.check_stats_timelines()
def _rmf(self, path):
if os.path.exists(path):
r.check_stats(count=1, http_status=226)
lines = open(os.path.join(curl.run_dir, 'download_#1.data')).readlines()
assert len(lines) == 4, f'list: {lines}'
+ r.check_stats_timelines()
# download 1 file, no SSL
@pytest.mark.parametrize("docname", [
r = curl.ftp_get(urls=[url], with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'data-1k', 'data-1m', 'data-10m'
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects == count + 1, 'should reuse the control conn'
+ r.check_stats_timelines()
# 2 serial transfers, first with 'ftps://' and second with 'ftp://'
# we want connection reuse in this case
r = curl.ftp_get(urls=[url1, url2], with_stats=True)
r.check_stats(count=count, http_status=226)
assert r.total_connects == count + 1, 'should reuse the control conn'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'data-1k', 'data-1m', 'data-10m'
r.check_stats(count=count, http_status=226)
self.check_downloads(curl, srcfile, count)
assert r.total_connects > count + 1, 'should have used several control conns'
+ r.check_stats_timelines()
@pytest.mark.parametrize("docname", [
'upload-1k', 'upload-100k', 'upload-1m'
r = curl.ftp_upload(urls=[url], fupload=f'{srcfile}', with_stats=True)
r.check_stats(count=count, http_status=226)
self.check_upload(env, vsftpds, docname=docname)
+ r.check_stats_timelines()
def _rmf(self, path):
if os.path.exists(path):
f'status #{idx} remote_ip: expected {remote_ip}, '\
f'got {x["remote_ip"]}\n{self.dump_stat(x)}'
+ def check_stat_positive(self, s, idx, key):
+ assert key in s, f'stat #{idx} "{key}" missing: {s}'
+ assert s[key] > 0, f'stat #{idx} "{key}" not positive: {s}'
+
+ def check_stat_positive_or_0(self, s, idx, key):
+ assert key in s, f'stat #{idx} "{key}" missing: {s}'
+ assert s[key] >= 0, f'stat #{idx} "{key}" not positive: {s}'
+
+ def check_stat_zero(self, s, key):
+ assert key in s, f'stat "{key}" missing: {s}'
+ assert s[key] == 0, f'stat "{key}" not zero: {s}'
+
+ def check_stats_timelines(self):
+ for i in range(len(self._stats)):
+ self.check_stats_timeline(i)
+
+ def check_stats_timeline(self, idx):
+ # check timings reported on a transfer for consistency
+ s = self._stats[idx]
+
+ url = s['url_effective']
+ # connect time is sometimes reported as 0 by openssl-quic (sigh)
+ self.check_stat_positive_or_0(s, idx, 'time_connect')
+ # all stat keys which reporting timings
+ all_keys = {
+ 'time_queue', 'time_namelookup',
+ 'time_connect', 'time_appconnect',
+ 'time_pretransfer', 'time_posttransfer',
+ 'time_starttransfer', 'time_total',
+ }
+ # stat keys where we expect a positive value
+ ref_tl = []
+ exact_match = True
+ # redirects mess up the queue time, only count without
+ if s['time_redirect'] == 0:
+ ref_tl += ['time_queue']
+ else:
+ exact_match = False
+ # connect events?
+ if url.startswith('ftp'):
+ # ftp is messy with connect events for its DATA connection
+ exact_match = False
+ elif s['num_connects'] > 0:
+ ref_tl += ['time_namelookup', 'time_connect']
+ if url.startswith('https:'):
+ ref_tl += ['time_appconnect']
+ # what kind of transfer was it?
+ if s['size_upload'] == 0 and s['size_download'] > 0:
+ # this is a download
+ dl_tl = ['time_pretransfer', 'time_starttransfer']
+ if s['size_request'] > 0:
+ dl_tl = ['time_posttransfer'] + dl_tl
+ ref_tl += dl_tl
+ elif s['size_upload'] > 0 and s['size_download'] == 0:
+ # this is an upload
+ ul_tl = ['time_pretransfer', 'time_posttransfer']
+ ref_tl += ul_tl
+ else:
+ # could be a 0-length upload or 0-length download, not sure
+ exact_match = False
+ # always there at the end
+ ref_tl += ['time_total']
+
+ # assert all events in reference timeline are > 0
+ for key in ref_tl:
+ self.check_stat_positive(s, idx, key)
+ if exact_match:
+ # assert all events not in reference timeline are 0
+ for key in [key for key in all_keys if key not in ref_tl]:
+ self.check_stat_zero(s, key)
+ # calculate the timeline that did happen
+ seen_tl = sorted(ref_tl, key=lambda ts: s[ts])
+ assert seen_tl == ref_tl, f'{[f"{ts}: {s[ts]}" for ts in seen_tl]}'
+
def dump_logs(self):
lines = ['>>--stdout ----------------------------------------------\n']
lines.extend(self._stdout)
(time_pretransfer / 1000000),
(long)(time_pretransfer % 1000000));
}
- if(time_pretransfer > time_posttransfer) {
+ if(time_posttransfer > time_pretransfer) {
+ /* counter-intuitive: on a GET request, all bytes are sent *before*
+ * PRETRANSFER happens. Thus POSTTRANSFER has to be smaller.
+ * The reverse would be true for a POST/PUT. */
curl_mfprintf(moo, "pretransfer vs posttransfer: %"
CURL_FORMAT_CURL_OFF_T
".%06ld %" CURL_FORMAT_CURL_OFF_T ".%06ld\n",