cf->next = ctx->ballers.winner->cf;
ctx->ballers.winner->cf = NULL;
cf_ip_happy_ctx_clear(cf, data);
+ Curl_expire_done(data, EXPIRE_HAPPY_EYEBALLS);
if(cf->conn->handler->protocol & PROTO_FAMILY_SSH)
Curl_pgrsTime(data, TIMER_APPCONNECT); /* we are connected already */
CURL_LOG_LVL_NONE,
};
+static const char * const Curl_trc_timer_names[]={
+ "100_TIMEOUT",
+ "ASYNC_NAME",
+ "CONNECTTIMEOUT",
+ "DNS_PER_NAME",
+ "DNS_PER_NAME2",
+ "HAPPY_EYEBALLS_DNS",
+ "HAPPY_EYEBALLS",
+ "MULTI_PENDING",
+ "SPEEDCHECK",
+ "TIMEOUT",
+ "TOOFAST",
+ "QUIC",
+ "FTP_ACCEPT",
+ "ALPN_EYEBALLS",
+ "SHUTDOWN",
+};
+
+const char *Curl_trc_timer_name(int tid)
+{
+ if((tid >= 0) && ((size_t)tid < CURL_ARRAYSIZE(Curl_trc_timer_names)))
+ return Curl_trc_timer_names[(size_t)tid];
+ return "UNKNOWN?";
+}
+
+void Curl_trc_multi_timeouts(struct Curl_easy *data)
+{
+ struct Curl_llist_node *e = Curl_llist_head(&data->state.timeoutlist);
+ if(e) {
+ struct curltime now = curlx_now();
+ while(e) {
+ struct time_node *n = Curl_node_elem(e);
+ e = Curl_node_next(e);
+ CURL_TRC_M(data, "[TIMEOUT] %s expires in %" FMT_TIMEDIFF_T "ns",
+ CURL_TIMER_NAME(n->eid),
+ curlx_timediff_us(n->time, now));
+ }
+ }
+}
static const char * const Curl_trc_mstate_names[]={
"INIT",
void Curl_trc_multi(struct Curl_easy *data,
const char *fmt, ...) CURL_PRINTF(2, 3);
const char *Curl_trc_mstate_name(int state);
+const char *Curl_trc_timer_name(int tid);
+void Curl_trc_multi_timeouts(struct Curl_easy *data);
+
void Curl_trc_write(struct Curl_easy *data,
const char *fmt, ...) CURL_PRINTF(2, 3);
void Curl_trc_read(struct Curl_easy *data,
const char *fmt, ...) CURL_PRINTF(2, 3);
#endif
+#define CURL_TRC_M_is_verbose(data) \
+ Curl_trc_ft_is_verbose(data, &Curl_trc_feat_multi)
+
#if defined(CURL_HAVE_C99) && !defined(CURL_DISABLE_VERBOSE_STRINGS)
#define infof(data, ...) \
do { if(Curl_trc_is_verbose(data)) \
Curl_infof(data, __VA_ARGS__); } while(0)
#define CURL_TRC_M(data, ...) \
- do { if(Curl_trc_ft_is_verbose(data, &Curl_trc_feat_multi)) \
+ do { if(CURL_TRC_M_is_verbose(data)) \
Curl_trc_multi(data, __VA_ARGS__); } while(0)
#define CURL_TRC_CF(data, cf, ...) \
do { if(Curl_trc_cf_is_verbose(cf, data)) \
(Curl_trc_is_verbose(data) && \
(ft)->log_level >= CURL_LOG_LVL_INFO)
#define CURL_MSTATE_NAME(s) Curl_trc_mstate_name((int)(s))
+#define CURL_TIMER_NAME(t) Curl_trc_timer_name((int)(t))
+#define CURL_TRC_M_TIMEOUTS(data) \
+ do { if(CURL_TRC_M_is_verbose(data)) \
+ Curl_trc_multi_timeouts(data); } while(0)
#else /* CURL_DISABLE_VERBOSE_STRINGS */
/* All informational messages are not compiled in for size savings */
#define Curl_trc_cf_is_verbose(x,y) (FALSE)
#define Curl_trc_ft_is_verbose(x,y) (FALSE)
#define CURL_MSTATE_NAME(x) ((void)(x), "-")
+#define CURL_TIMER_NAME(x) ((void)(x), "-")
+#define CURL_TRC_M_TIMEOUTS(x) Curl_nop_stmt
#endif /* !CURL_DISABLE_VERBOSE_STRINGS */
/* Unblocked and waiting to receive with buffered input.
* Make transfer run again at next opportunity. */
- if(!Curl_xfer_is_blocked(data) &&
+ if(!Curl_xfer_is_blocked(data) && !Curl_xfer_is_too_fast(data) &&
((Curl_pollset_want_read(data, ps, data->conn->sock[FIRSTSOCKET]) &&
Curl_conn_data_pending(data, FIRSTSOCKET)) ||
(Curl_pollset_want_read(data, ps, data->conn->sock[SECONDARYSOCKET]) &&
Curl_multi_mark_dirty(data);
}
- switch(ps->n) {
- case 0:
- CURL_TRC_M(data, "%s pollset[], timeouts=%zu, paused %d/%d (r/w)",
- caller, Curl_llist_count(&data->state.timeoutlist),
- Curl_xfer_send_is_paused(data),
- Curl_xfer_recv_is_paused(data));
- break;
- case 1:
- CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
- caller, ps->sockets[0],
- (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
- (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
- Curl_llist_count(&data->state.timeoutlist));
- break;
- case 2:
- CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s, "
- "fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
- caller, ps->sockets[0],
- (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
- (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
- ps->sockets[1],
- (ps->actions[1] & CURL_POLL_IN) ? "IN" : "",
- (ps->actions[1] & CURL_POLL_OUT) ? "OUT" : "",
- Curl_llist_count(&data->state.timeoutlist));
- break;
- default:
- CURL_TRC_M(data, "%s pollset[fds=%u], timeouts=%zu",
- caller, ps->n, Curl_llist_count(&data->state.timeoutlist));
- break;
+ if(CURL_TRC_M_is_verbose(data)) {
+ size_t timeout_count = Curl_llist_count(&data->state.timeoutlist);
+ switch(ps->n) {
+ case 0:
+ CURL_TRC_M(data, "%s pollset[], timeouts=%zu, paused %d/%d (r/w)",
+ caller, timeout_count,
+ Curl_xfer_send_is_paused(data),
+ Curl_xfer_recv_is_paused(data));
+ break;
+ case 1:
+ CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
+ caller, ps->sockets[0],
+ (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
+ (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
+ timeout_count);
+ break;
+ case 2:
+ CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s, "
+ "fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
+ caller, ps->sockets[0],
+ (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
+ (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
+ ps->sockets[1],
+ (ps->actions[1] & CURL_POLL_IN) ? "IN" : "",
+ (ps->actions[1] & CURL_POLL_OUT) ? "OUT" : "",
+ timeout_count);
+ break;
+ default:
+ CURL_TRC_M(data, "%s pollset[fds=%u], timeouts=%zu",
+ caller, ps->n, timeout_count);
+ break;
+ }
+ CURL_TRC_M_TIMEOUTS(data);
}
+
if(expect_sockets && !ps->n && data->multi &&
!Curl_uint_bset_contains(&data->multi->dirty, data->mid) &&
!Curl_llist_count(&data->state.timeoutlist) &&
return CURLE_TOO_MANY_REDIRECTS;
}
+static CURLcode mspeed_check(struct Curl_easy *data,
+ struct curltime *nowp)
+{
+ timediff_t recv_wait_ms = 0;
+ timediff_t send_wait_ms = 0;
+
+ /* check if over send speed */
+ if(data->set.max_send_speed)
+ send_wait_ms = Curl_pgrsLimitWaitTime(&data->progress.ul,
+ data->set.max_send_speed,
+ *nowp);
+
+ /* check if over recv speed */
+ if(data->set.max_recv_speed)
+ recv_wait_ms = Curl_pgrsLimitWaitTime(&data->progress.dl,
+ data->set.max_recv_speed,
+ *nowp);
+
+ if(send_wait_ms || recv_wait_ms) {
+ if(data->mstate != MSTATE_RATELIMITING) {
+ Curl_ratelimit(data, *nowp);
+ multistate(data, MSTATE_RATELIMITING);
+ }
+ Curl_expire(data, CURLMAX(send_wait_ms, recv_wait_ms), EXPIRE_TOOFAST);
+ Curl_multi_clear_dirty(data);
+ return CURLE_AGAIN;
+ }
+ else if(data->mstate != MSTATE_PERFORMING) {
+ multistate(data, MSTATE_PERFORMING);
+ Curl_ratelimit(data, *nowp);
+ }
+ return CURLE_OK;
+}
+
static CURLMcode state_performing(struct Curl_easy *data,
struct curltime *nowp,
bool *stream_errorp,
{
char *newurl = NULL;
bool retry = FALSE;
- timediff_t recv_timeout_ms = 0;
- timediff_t send_timeout_ms = 0;
CURLMcode rc = CURLM_OK;
CURLcode result = *resultp = CURLE_OK;
*stream_errorp = FALSE;
- /* check if over send speed */
- if(data->set.max_send_speed)
- send_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.ul,
- data->set.max_send_speed,
- *nowp);
-
- /* check if over recv speed */
- if(data->set.max_recv_speed)
- recv_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.dl,
- data->set.max_recv_speed,
- *nowp);
-
- if(send_timeout_ms || recv_timeout_ms) {
- Curl_ratelimit(data, *nowp);
- multistate(data, MSTATE_RATELIMITING);
- if(send_timeout_ms >= recv_timeout_ms)
- Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST);
- else
- Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST);
+ if(mspeed_check(data, nowp) == CURLE_AGAIN)
return CURLM_OK;
- }
/* read/write data if it is ready to do so */
result = Curl_sendrecv(data, nowp);
}
}
}
+ else { /* not errored, not done */
+ mspeed_check(data, nowp);
+ }
free(newurl);
*resultp = result;
return rc;
multi_done(data, result, TRUE);
}
else {
- timediff_t recv_timeout_ms = 0;
- timediff_t send_timeout_ms = 0;
- if(data->set.max_send_speed)
- send_timeout_ms =
- Curl_pgrsLimitWaitTime(&data->progress.ul,
- data->set.max_send_speed,
- *nowp);
-
- if(data->set.max_recv_speed)
- recv_timeout_ms =
- Curl_pgrsLimitWaitTime(&data->progress.dl,
- data->set.max_recv_speed,
- *nowp);
-
- if(!send_timeout_ms && !recv_timeout_ms) {
- multistate(data, MSTATE_PERFORMING);
- Curl_ratelimit(data, *nowp);
- /* start performing again right away */
+ if(!mspeed_check(data, nowp))
rc = CURLM_CALL_MULTI_PERFORM;
- }
- else if(send_timeout_ms >= recv_timeout_ms)
- Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST);
- else
- Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST);
}
*resultp = result;
return rc;
multi->timetree = Curl_splayinsert(*curr_expire, multi->timetree,
&data->state.timenode);
if(data->id >= 0)
- CURL_TRC_M(data, "set expire[%d] in %" FMT_TIMEDIFF_T "ns",
- id, curlx_timediff_us(set, *nowp));
+ CURL_TRC_M(data, "[TIMEOUT] set %s to expire in %" FMT_TIMEDIFF_T "ns",
+ CURL_TIMER_NAME(id), curlx_timediff_us(set, *nowp));
}
/*
{
/* remove the timer, if there */
multi_deltimeout(data, id);
+ if(data->id >= 0)
+ CURL_TRC_M(data, "[TIMEOUT] cleared %s", CURL_TIMER_NAME(id));
}
/*
/* clear the timeout list too */
Curl_llist_destroy(list, NULL);
- CURL_TRC_M(data, "Expire cleared");
+ if(data->id >= 0)
+ CURL_TRC_M(data, "[TIMEOUT] all cleared");
nowp->tv_sec = 0;
nowp->tv_usec = 0;
return TRUE;
Curl_uint_bset_add(&data->multi->dirty, data->mid);
}
+void Curl_multi_clear_dirty(struct Curl_easy *data)
+{
+ if(data->multi && data->mid != UINT_MAX)
+ Curl_uint_bset_remove(&data->multi->dirty, data->mid);
+}
+
#ifdef DEBUGBUILD
static void multi_xfer_dump(struct Curl_multi *multi, unsigned int mid,
void *entry)
/* Mark a transfer as dirty, e.g. to be rerun at earliest convenience.
* A cheap operation, can be done many times repeatedly. */
void Curl_multi_mark_dirty(struct Curl_easy *data);
+/* Clear transfer from the dirty set. */
+void Curl_multi_clear_dirty(struct Curl_easy *data);
#endif /* HEADER_CURL_MULTIIF_H */
* to wait to get back under the speed limit.
*/
timediff_t Curl_pgrsLimitWaitTime(struct pgrs_dir *d,
- curl_off_t speed_limit,
+ curl_off_t bytes_per_sec,
struct curltime now)
{
- curl_off_t size = d->cur_size - d->limit.start_size;
- timediff_t minimum;
- timediff_t actual;
+ curl_off_t bytes = d->cur_size - d->limit.start_size;
+ timediff_t should_ms;
+ timediff_t took_ms;
- if(!speed_limit || !size)
+ /* no limit or we did not get to any bytes yet */
+ if(!bytes_per_sec || !bytes)
return 0;
- /*
- * 'minimum' is the number of milliseconds 'size' should take to download to
- * stay below 'limit'.
- */
- if(size < CURL_OFF_T_MAX/1000)
- minimum = (timediff_t) (1000 * size / speed_limit);
+ /* The time it took us to have `bytes` */
+ took_ms = curlx_timediff_ceil(now, d->limit.start);
+
+ /* The time it *should* have taken us to have `bytes`
+ * when obeying the bytes_per_sec speed_limit. */
+ if(bytes < CURL_OFF_T_MAX/1000) {
+ /* (1000 * bytes / (bytes / sec)) = 1000 * sec = ms */
+ should_ms = (timediff_t) (1000 * bytes / bytes_per_sec);
+ }
else {
- minimum = (timediff_t) (size / speed_limit);
- if(minimum < TIMEDIFF_T_MAX/1000)
- minimum *= 1000;
+ /* very large `bytes`, first calc the seconds it should have taken.
+ * if that is small enough, convert to milliseconds. */
+ should_ms = (timediff_t) (bytes / bytes_per_sec);
+ if(should_ms < TIMEDIFF_T_MAX/1000)
+ should_ms *= 1000;
else
- minimum = TIMEDIFF_T_MAX;
+ should_ms = TIMEDIFF_T_MAX;
}
- /*
- * 'actual' is the time in milliseconds it took to actually download the
- * last 'size' bytes.
- */
- actual = curlx_timediff_ceil(now, d->limit.start);
- if(actual < minimum) {
- /* if it downloaded the data faster than the limit, make it wait the
- difference */
- return minimum - actual;
+ if(took_ms < should_ms) {
+ /* when gotten to `bytes` too fast, wait the difference */
+ return should_ms - took_ms;
}
-
return 0;
}
if(bytestoread && data->set.max_recv_speed > 0) {
/* In case of speed limit on receiving: if this loop already got
- * data, break out. If not, limit the amount of bytes to receive.
- * The overall, timed, speed limiting is done in multi.c */
- if(total_received)
+ * a quarter of the quota, break out. We want to stutter a bit
+ * to keep in the limit, but too small receives will just cost
+ * cpu unnecessarily. */
+ if(total_received >= (data->set.max_recv_speed / 4))
break;
if(data->set.max_recv_speed < (curl_off_t)bytestoread)
bytestoread = (size_t)data->set.max_recv_speed;
Curl_conn_ev_data_pause(data, enable);
return result;
}
+
+bool Curl_xfer_is_too_fast(struct Curl_easy *data)
+{
+ struct Curl_llist_node *e = Curl_llist_head(&data->state.timeoutlist);
+ while(e) {
+ struct time_node *n = Curl_node_elem(e);
+ e = Curl_node_next(e);
+ if(n->eid == EXPIRE_TOOFAST)
+ return TRUE;
+ }
+ return FALSE;
+}
CURLcode Curl_xfer_pause_send(struct Curl_easy *data, bool enable);
CURLcode Curl_xfer_pause_recv(struct Curl_easy *data, bool enable);
+/* Query if transfer has expire timeout TOOFAST set. */
+bool Curl_xfer_is_too_fast(struct Curl_easy *data);
#endif /* HEADER_CURL_TRANSFER_H */
'curlx_base64_decode' => 'internal api',
'curlx_base64_encode' => 'internal api',
'curlx_base64url_encode' => 'internal api',
+ 'Curl_multi_clear_dirty' => 'internal api',
);
my %api = (
@classmethod
def fmt_mbs(cls, val):
- return f'{val/(1024*1024):0.000f} MB/s' if val >= 0 else '--'
+ if val is None or val < 0:
+ return '--'
+ if val >= (1024*1024):
+ return f'{val/(1024*1024):0.000f} MB/s'
+ elif val >= 1024:
+ return f'{val / 1024:0.000f} KB/s'
+ else:
+ return f'{val:0.000f} B/s'
@classmethod
def fmt_reqs(cls, val):
print(f'Version: {score["meta"]["curl_V"]}')
if 'curl_features' in score["meta"]:
print(f'Features: {score["meta"]["curl_features"]}')
+ if 'limit-rate' in score['meta']:
+ print(f'--limit-rate: {score["meta"]["limit-rate"]}')
print(f'Samples Size: {score["meta"]["samples"]}')
if 'handshakes' in score:
print(f'{"Handshakes":<24} {"ipv4":25} {"ipv6":28}')
server_addr: Optional[str] = None,
with_dtrace: bool = False,
with_flame: bool = False,
- socks_args: Optional[List[str]] = None):
+ socks_args: Optional[List[str]] = None,
+ limit_rate: Optional[str] = None):
self.verbose = verbose
self.env = env
self.protocol = protocol
self._with_dtrace = with_dtrace
self._with_flame = with_flame
self._socks_args = socks_args
+ self._limit_rate = limit_rate
def info(self, msg):
if self.verbose > 0:
curl = self.mk_curl_client()
r = curl.http_download(urls=[url], alpn_proto=self.protocol,
no_save=True, with_headers=False,
- with_profile=True)
+ with_profile=True,
+ limit_rate=self._limit_rate)
err = self._check_downloads(r, count)
if err:
errors.append(err)
curl = self.mk_curl_client()
r = curl.http_download(urls=[url], alpn_proto=self.protocol,
no_save=True,
- with_headers=False, with_profile=True)
+ with_headers=False,
+ with_profile=True,
+ limit_rate=self._limit_rate)
err = self._check_downloads(r, count)
if err:
errors.append(err)
no_save=True,
with_headers=False,
with_profile=True,
+ limit_rate=self._limit_rate,
extra_args=[
'--parallel',
'--parallel-max', str(max_parallel)
'date': f'{datetime.datetime.now(tz=datetime.timezone.utc).isoformat()}',
}
}
+ if self._limit_rate:
+ score['meta']['limit-rate'] = self._limit_rate
+
if self.protocol == 'h3':
score['meta']['protocol'] = 'h3'
if not self.env.have_h3_curl():
download_parallel=args.download_parallel,
with_dtrace=args.dtrace,
with_flame=args.flame,
- socks_args=socks_args)
+ socks_args=socks_args,
+ limit_rate=args.limit_rate)
cards.append(card)
if test_httpd:
download_parallel=args.download_parallel,
with_dtrace=args.dtrace,
with_flame=args.flame,
- socks_args=socks_args)
+ socks_args=socks_args,
+ limit_rate=args.limit_rate)
card.setup_resources(server_docs, downloads)
cards.append(card)
verbose=args.verbose, curl_verbose=args.curl_verbose,
download_parallel=args.download_parallel,
with_dtrace=args.dtrace,
- socks_args=socks_args)
+ socks_args=socks_args,
+ limit_rate=args.limit_rate)
card.setup_resources(server_docs, downloads)
cards.append(card)
default = False, help="produce dtrace of curl")
parser.add_argument("--flame", action='store_true',
default = False, help="produce a flame graph on curl, implies --dtrace")
+ parser.add_argument("--limit-rate", action='store', type=str,
+ default=None, help="use curl's --limit-rate")
parser.add_argument("-H", "--handshakes", action='store_true',
default=False, help="evaluate handshakes only")
with_profile: bool = False,
with_tcpdump: bool = False,
no_save: bool = False,
+ limit_rate: Optional[str] = None,
extra_args: Optional[List[str]] = None):
if extra_args is None:
extra_args = []
if no_save:
- extra_args.extend([
- '--out-null',
- ])
+ extra_args.extend(['--out-null'])
else:
- extra_args.extend([
- '-o', 'download_#1.data',
- ])
+ extra_args.extend(['-o', 'download_#1.data'])
+ if limit_rate:
+ extra_args.extend(['--limit-rate', limit_rate])
# remove any existing ones
for i in range(100):
self._rmf(self.download_file(i))