]> git.ipfire.org Git - thirdparty/curl.git/commitdiff
multi: limit-rate revisited
authorStefan Eissing <stefan@eissing.org>
Tue, 2 Sep 2025 13:16:21 +0000 (15:16 +0200)
committerDaniel Stenberg <daniel@haxx.se>
Wed, 3 Sep 2025 13:53:41 +0000 (15:53 +0200)
Tweaks around handling of --limit-rate:

* tracing: trace outstanding timeouts by name
* multi: do not mark transfer as dirty that have
  an EXPIRE_TOOFAST set
* multi: have one static function to asses speed limits
* multi: when setting EXPIRE_TOOFAST remove the transfers
  from the dirty set
* progress: rename vars and comment on how speed limit
  timeouts are calculated, for clarity
* transfer: when speed limiting, exit the receive loop
  after a quarter of the limit has been received, not
  on the first chunk received.
* cf-ip-happy.c: clear EXPIRE_HAPPY_EYEBALLS on connect
* scorecard: add --limit-rate parameter to test with
  speed limits in effect

Closes #18454

lib/cf-ip-happy.c
lib/curl_trc.c
lib/curl_trc.h
lib/multi.c
lib/multiif.h
lib/progress.c
lib/transfer.c
lib/transfer.h
scripts/singleuse.pl
tests/http/scorecard.py
tests/http/testenv/curl.py

index 59b34ae46ffde4e1b45cc8d6231c6c430a5aa604..5e4c20444e79bd32ab02a89fab072f28154b2e5a 100644 (file)
@@ -767,6 +767,7 @@ static CURLcode cf_ip_happy_connect(struct Curl_cfilter *cf,
         cf->next = ctx->ballers.winner->cf;
         ctx->ballers.winner->cf = NULL;
         cf_ip_happy_ctx_clear(cf, data);
+        Curl_expire_done(data, EXPIRE_HAPPY_EYEBALLS);
 
         if(cf->conn->handler->protocol & PROTO_FAMILY_SSH)
           Curl_pgrsTime(data, TIMER_APPCONNECT); /* we are connected already */
index 8c01eff14247b6c3fac4bac20c604782dc250f6a..52671f4eaeb4ec2c98d03ee6137c9b019b92ab0e 100644 (file)
@@ -273,6 +273,45 @@ struct curl_trc_feat Curl_trc_feat_dns = {
   CURL_LOG_LVL_NONE,
 };
 
+static const char * const Curl_trc_timer_names[]={
+  "100_TIMEOUT",
+  "ASYNC_NAME",
+  "CONNECTTIMEOUT",
+  "DNS_PER_NAME",
+  "DNS_PER_NAME2",
+  "HAPPY_EYEBALLS_DNS",
+  "HAPPY_EYEBALLS",
+  "MULTI_PENDING",
+  "SPEEDCHECK",
+  "TIMEOUT",
+  "TOOFAST",
+  "QUIC",
+  "FTP_ACCEPT",
+  "ALPN_EYEBALLS",
+  "SHUTDOWN",
+};
+
+const char *Curl_trc_timer_name(int tid)
+{
+  if((tid >= 0) && ((size_t)tid < CURL_ARRAYSIZE(Curl_trc_timer_names)))
+    return Curl_trc_timer_names[(size_t)tid];
+  return "UNKNOWN?";
+}
+
+void Curl_trc_multi_timeouts(struct Curl_easy *data)
+{
+  struct Curl_llist_node *e = Curl_llist_head(&data->state.timeoutlist);
+  if(e) {
+    struct curltime now = curlx_now();
+    while(e) {
+      struct time_node *n = Curl_node_elem(e);
+      e = Curl_node_next(e);
+      CURL_TRC_M(data, "[TIMEOUT] %s expires in %" FMT_TIMEDIFF_T "ns",
+                 CURL_TIMER_NAME(n->eid),
+                 curlx_timediff_us(n->time, now));
+    }
+  }
+}
 
 static const char * const Curl_trc_mstate_names[]={
   "INIT",
index 6db1496c7708286018b11826e76c5a4c9e2eeb62..fa0999250fbe82fedbdbf1b52a2ce0bf98a3d2eb 100644 (file)
@@ -85,6 +85,9 @@ void Curl_trc_cf_infof(struct Curl_easy *data, const struct Curl_cfilter *cf,
 void Curl_trc_multi(struct Curl_easy *data,
                     const char *fmt, ...) CURL_PRINTF(2, 3);
 const char *Curl_trc_mstate_name(int state);
+const char *Curl_trc_timer_name(int tid);
+void Curl_trc_multi_timeouts(struct Curl_easy *data);
+
 void Curl_trc_write(struct Curl_easy *data,
                     const char *fmt, ...) CURL_PRINTF(2, 3);
 void Curl_trc_read(struct Curl_easy *data,
@@ -113,12 +116,15 @@ void Curl_trc_ws(struct Curl_easy *data,
                  const char *fmt, ...) CURL_PRINTF(2, 3);
 #endif
 
+#define CURL_TRC_M_is_verbose(data) \
+  Curl_trc_ft_is_verbose(data, &Curl_trc_feat_multi)
+
 #if defined(CURL_HAVE_C99) && !defined(CURL_DISABLE_VERBOSE_STRINGS)
 #define infof(data, ...) \
   do { if(Curl_trc_is_verbose(data)) \
          Curl_infof(data, __VA_ARGS__); } while(0)
 #define CURL_TRC_M(data, ...) \
-  do { if(Curl_trc_ft_is_verbose(data, &Curl_trc_feat_multi)) \
+  do { if(CURL_TRC_M_is_verbose(data)) \
          Curl_trc_multi(data, __VA_ARGS__); } while(0)
 #define CURL_TRC_CF(data, cf, ...) \
   do { if(Curl_trc_cf_is_verbose(cf, data)) \
@@ -202,6 +208,10 @@ extern struct curl_trc_feat Curl_trc_feat_dns;
             (Curl_trc_is_verbose(data) && \
              (ft)->log_level >= CURL_LOG_LVL_INFO)
 #define CURL_MSTATE_NAME(s)  Curl_trc_mstate_name((int)(s))
+#define CURL_TIMER_NAME(t)   Curl_trc_timer_name((int)(t))
+#define CURL_TRC_M_TIMEOUTS(data) \
+  do { if(CURL_TRC_M_is_verbose(data)) \
+         Curl_trc_multi_timeouts(data); } while(0)
 
 #else /* CURL_DISABLE_VERBOSE_STRINGS */
 /* All informational messages are not compiled in for size savings */
@@ -210,6 +220,8 @@ extern struct curl_trc_feat Curl_trc_feat_dns;
 #define Curl_trc_cf_is_verbose(x,y)   (FALSE)
 #define Curl_trc_ft_is_verbose(x,y)   (FALSE)
 #define CURL_MSTATE_NAME(x)           ((void)(x), "-")
+#define CURL_TIMER_NAME(x)            ((void)(x), "-")
+#define CURL_TRC_M_TIMEOUTS(x)        Curl_nop_stmt
 
 #endif /* !CURL_DISABLE_VERBOSE_STRINGS */
 
index 1c09b14b6b15cbf3e2e3003b9d1a308b4fb954a9..99746a4ce11bf30b565a7f2e7fa4b625cda29633 100644 (file)
@@ -1093,7 +1093,7 @@ CURLMcode Curl_multi_pollset(struct Curl_easy *data,
 
   /* Unblocked and waiting to receive with buffered input.
    * Make transfer run again at next opportunity. */
-  if(!Curl_xfer_is_blocked(data) &&
+  if(!Curl_xfer_is_blocked(data) && !Curl_xfer_is_too_fast(data) &&
      ((Curl_pollset_want_read(data, ps, data->conn->sock[FIRSTSOCKET]) &&
        Curl_conn_data_pending(data, FIRSTSOCKET)) ||
       (Curl_pollset_want_read(data, ps, data->conn->sock[SECONDARYSOCKET]) &&
@@ -1103,36 +1103,41 @@ CURLMcode Curl_multi_pollset(struct Curl_easy *data,
     Curl_multi_mark_dirty(data);
   }
 
-  switch(ps->n) {
-    case 0:
-      CURL_TRC_M(data, "%s pollset[], timeouts=%zu, paused %d/%d (r/w)",
-                 caller, Curl_llist_count(&data->state.timeoutlist),
-                 Curl_xfer_send_is_paused(data),
-                 Curl_xfer_recv_is_paused(data));
-      break;
-    case 1:
-      CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
-                 caller, ps->sockets[0],
-                 (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
-                 (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
-                 Curl_llist_count(&data->state.timeoutlist));
-      break;
-    case 2:
-      CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s, "
-                 "fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
-                 caller, ps->sockets[0],
-                 (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
-                 (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
-                 ps->sockets[1],
-                 (ps->actions[1] & CURL_POLL_IN) ? "IN" : "",
-                 (ps->actions[1] & CURL_POLL_OUT) ? "OUT" : "",
-                 Curl_llist_count(&data->state.timeoutlist));
-      break;
-    default:
-      CURL_TRC_M(data, "%s pollset[fds=%u], timeouts=%zu",
-                 caller, ps->n, Curl_llist_count(&data->state.timeoutlist));
-      break;
+  if(CURL_TRC_M_is_verbose(data)) {
+    size_t timeout_count = Curl_llist_count(&data->state.timeoutlist);
+    switch(ps->n) {
+      case 0:
+        CURL_TRC_M(data, "%s pollset[], timeouts=%zu, paused %d/%d (r/w)",
+                   caller, timeout_count,
+                   Curl_xfer_send_is_paused(data),
+                   Curl_xfer_recv_is_paused(data));
+        break;
+      case 1:
+        CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
+                   caller, ps->sockets[0],
+                   (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
+                   (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
+                   timeout_count);
+        break;
+      case 2:
+        CURL_TRC_M(data, "%s pollset[fd=%" FMT_SOCKET_T " %s%s, "
+                   "fd=%" FMT_SOCKET_T " %s%s], timeouts=%zu",
+                   caller, ps->sockets[0],
+                   (ps->actions[0] & CURL_POLL_IN) ? "IN" : "",
+                   (ps->actions[0] & CURL_POLL_OUT) ? "OUT" : "",
+                   ps->sockets[1],
+                   (ps->actions[1] & CURL_POLL_IN) ? "IN" : "",
+                   (ps->actions[1] & CURL_POLL_OUT) ? "OUT" : "",
+                   timeout_count);
+        break;
+      default:
+        CURL_TRC_M(data, "%s pollset[fds=%u], timeouts=%zu",
+                   caller, ps->n, timeout_count);
+        break;
+    }
+    CURL_TRC_M_TIMEOUTS(data);
   }
+
   if(expect_sockets && !ps->n && data->multi &&
      !Curl_uint_bset_contains(&data->multi->dirty, data->mid) &&
      !Curl_llist_count(&data->state.timeoutlist) &&
@@ -1880,6 +1885,40 @@ static CURLcode multi_follow(struct Curl_easy *data,
   return CURLE_TOO_MANY_REDIRECTS;
 }
 
+static CURLcode mspeed_check(struct Curl_easy *data,
+                             struct curltime *nowp)
+{
+  timediff_t recv_wait_ms = 0;
+  timediff_t send_wait_ms = 0;
+
+  /* check if over send speed */
+  if(data->set.max_send_speed)
+    send_wait_ms = Curl_pgrsLimitWaitTime(&data->progress.ul,
+                                          data->set.max_send_speed,
+                                          *nowp);
+
+  /* check if over recv speed */
+  if(data->set.max_recv_speed)
+    recv_wait_ms = Curl_pgrsLimitWaitTime(&data->progress.dl,
+                                          data->set.max_recv_speed,
+                                          *nowp);
+
+  if(send_wait_ms || recv_wait_ms) {
+    if(data->mstate != MSTATE_RATELIMITING) {
+      Curl_ratelimit(data, *nowp);
+      multistate(data, MSTATE_RATELIMITING);
+    }
+    Curl_expire(data, CURLMAX(send_wait_ms, recv_wait_ms), EXPIRE_TOOFAST);
+    Curl_multi_clear_dirty(data);
+    return CURLE_AGAIN;
+  }
+  else if(data->mstate != MSTATE_PERFORMING) {
+    multistate(data, MSTATE_PERFORMING);
+    Curl_ratelimit(data, *nowp);
+  }
+  return CURLE_OK;
+}
+
 static CURLMcode state_performing(struct Curl_easy *data,
                                   struct curltime *nowp,
                                   bool *stream_errorp,
@@ -1887,33 +1926,12 @@ static CURLMcode state_performing(struct Curl_easy *data,
 {
   char *newurl = NULL;
   bool retry = FALSE;
-  timediff_t recv_timeout_ms = 0;
-  timediff_t send_timeout_ms = 0;
   CURLMcode rc = CURLM_OK;
   CURLcode result = *resultp = CURLE_OK;
   *stream_errorp = FALSE;
 
-  /* check if over send speed */
-  if(data->set.max_send_speed)
-    send_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.ul,
-                                             data->set.max_send_speed,
-                                             *nowp);
-
-  /* check if over recv speed */
-  if(data->set.max_recv_speed)
-    recv_timeout_ms = Curl_pgrsLimitWaitTime(&data->progress.dl,
-                                             data->set.max_recv_speed,
-                                             *nowp);
-
-  if(send_timeout_ms || recv_timeout_ms) {
-    Curl_ratelimit(data, *nowp);
-    multistate(data, MSTATE_RATELIMITING);
-    if(send_timeout_ms >= recv_timeout_ms)
-      Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST);
-    else
-      Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST);
+  if(mspeed_check(data, nowp) == CURLE_AGAIN)
     return CURLM_OK;
-  }
 
   /* read/write data if it is ready to do so */
   result = Curl_sendrecv(data, nowp);
@@ -2028,6 +2046,9 @@ static CURLMcode state_performing(struct Curl_easy *data,
       }
     }
   }
+  else { /* not errored, not done */
+    mspeed_check(data, nowp);
+  }
   free(newurl);
   *resultp = result;
   return rc;
@@ -2195,30 +2216,8 @@ static CURLMcode state_ratelimiting(struct Curl_easy *data,
     multi_done(data, result, TRUE);
   }
   else {
-    timediff_t recv_timeout_ms = 0;
-    timediff_t send_timeout_ms = 0;
-    if(data->set.max_send_speed)
-      send_timeout_ms =
-        Curl_pgrsLimitWaitTime(&data->progress.ul,
-                               data->set.max_send_speed,
-                               *nowp);
-
-    if(data->set.max_recv_speed)
-      recv_timeout_ms =
-        Curl_pgrsLimitWaitTime(&data->progress.dl,
-                               data->set.max_recv_speed,
-                               *nowp);
-
-    if(!send_timeout_ms && !recv_timeout_ms) {
-      multistate(data, MSTATE_PERFORMING);
-      Curl_ratelimit(data, *nowp);
-      /* start performing again right away */
+    if(!mspeed_check(data, nowp))
       rc = CURLM_CALL_MULTI_PERFORM;
-    }
-    else if(send_timeout_ms >= recv_timeout_ms)
-      Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST);
-    else
-      Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST);
   }
   *resultp = result;
   return rc;
@@ -3584,8 +3583,8 @@ void Curl_expire_ex(struct Curl_easy *data,
   multi->timetree = Curl_splayinsert(*curr_expire, multi->timetree,
                                      &data->state.timenode);
   if(data->id >= 0)
-    CURL_TRC_M(data, "set expire[%d] in %" FMT_TIMEDIFF_T "ns",
-               id, curlx_timediff_us(set, *nowp));
+    CURL_TRC_M(data, "[TIMEOUT] set %s to expire in %" FMT_TIMEDIFF_T "ns",
+               CURL_TIMER_NAME(id), curlx_timediff_us(set, *nowp));
 }
 
 /*
@@ -3615,6 +3614,8 @@ void Curl_expire_done(struct Curl_easy *data, expire_id id)
 {
   /* remove the timer, if there */
   multi_deltimeout(data, id);
+  if(data->id >= 0)
+    CURL_TRC_M(data, "[TIMEOUT] cleared %s", CURL_TIMER_NAME(id));
 }
 
 /*
@@ -3646,7 +3647,8 @@ bool Curl_expire_clear(struct Curl_easy *data)
     /* clear the timeout list too */
     Curl_llist_destroy(list, NULL);
 
-    CURL_TRC_M(data, "Expire cleared");
+    if(data->id >= 0)
+      CURL_TRC_M(data, "[TIMEOUT] all cleared");
     nowp->tv_sec = 0;
     nowp->tv_usec = 0;
     return TRUE;
@@ -3974,6 +3976,12 @@ void Curl_multi_mark_dirty(struct Curl_easy *data)
     Curl_uint_bset_add(&data->multi->dirty, data->mid);
 }
 
+void Curl_multi_clear_dirty(struct Curl_easy *data)
+{
+  if(data->multi && data->mid != UINT_MAX)
+    Curl_uint_bset_remove(&data->multi->dirty, data->mid);
+}
+
 #ifdef DEBUGBUILD
 static void multi_xfer_dump(struct Curl_multi *multi, unsigned int mid,
                             void *entry)
index 5cf22bc0467d97247470e9e331cf711481871d03..1423d5a03d0ceb746710ec58acefbdd6a2cd56e7 100644 (file)
@@ -162,5 +162,7 @@ unsigned int Curl_multi_xfers_running(struct Curl_multi *multi);
 /* Mark a transfer as dirty, e.g. to be rerun at earliest convenience.
  * A cheap operation, can be done many times repeatedly. */
 void Curl_multi_mark_dirty(struct Curl_easy *data);
+/* Clear transfer from the dirty set. */
+void Curl_multi_clear_dirty(struct Curl_easy *data);
 
 #endif /* HEADER_CURL_MULTIIF_H */
index 1a3f4334ca9e8b4420b5cdea1835d0805fe3aa21..fdae3194c55daa08da09e6461360892b859e9805 100644 (file)
@@ -281,41 +281,40 @@ void Curl_pgrsStartNow(struct Curl_easy *data)
  * to wait to get back under the speed limit.
  */
 timediff_t Curl_pgrsLimitWaitTime(struct pgrs_dir *d,
-                                  curl_off_t speed_limit,
+                                  curl_off_t bytes_per_sec,
                                   struct curltime now)
 {
-  curl_off_t size = d->cur_size - d->limit.start_size;
-  timediff_t minimum;
-  timediff_t actual;
+  curl_off_t bytes = d->cur_size - d->limit.start_size;
+  timediff_t should_ms;
+  timediff_t took_ms;
 
-  if(!speed_limit || !size)
+  /* no limit or we did not get to any bytes yet */
+  if(!bytes_per_sec || !bytes)
     return 0;
 
-  /*
-   * 'minimum' is the number of milliseconds 'size' should take to download to
-   * stay below 'limit'.
-   */
-  if(size < CURL_OFF_T_MAX/1000)
-    minimum = (timediff_t) (1000 * size / speed_limit);
+  /* The time it took us to have `bytes` */
+  took_ms = curlx_timediff_ceil(now, d->limit.start);
+
+  /* The time it *should* have taken us to have `bytes`
+   * when obeying the bytes_per_sec speed_limit. */
+  if(bytes < CURL_OFF_T_MAX/1000) {
+    /* (1000 * bytes / (bytes / sec)) = 1000 * sec = ms */
+    should_ms = (timediff_t) (1000 * bytes / bytes_per_sec);
+  }
   else {
-    minimum = (timediff_t) (size / speed_limit);
-    if(minimum < TIMEDIFF_T_MAX/1000)
-      minimum *= 1000;
+    /* very large `bytes`, first calc the seconds it should have taken.
+     * if that is small enough, convert to milliseconds. */
+    should_ms = (timediff_t) (bytes / bytes_per_sec);
+    if(should_ms < TIMEDIFF_T_MAX/1000)
+      should_ms *= 1000;
     else
-      minimum = TIMEDIFF_T_MAX;
+      should_ms = TIMEDIFF_T_MAX;
   }
 
-  /*
-   * 'actual' is the time in milliseconds it took to actually download the
-   * last 'size' bytes.
-   */
-  actual = curlx_timediff_ceil(now, d->limit.start);
-  if(actual < minimum) {
-    /* if it downloaded the data faster than the limit, make it wait the
-       difference */
-    return minimum - actual;
+  if(took_ms < should_ms) {
+    /* when gotten to `bytes` too fast, wait the difference */
+    return should_ms - took_ms;
   }
-
   return 0;
 }
 
index 50f621056f7ac0ae1d6c61c1ed98677b880f79c9..1297a3e82d26928a6427a74111a8b419f4ee64ac 100644 (file)
@@ -280,9 +280,10 @@ static CURLcode sendrecv_dl(struct Curl_easy *data,
 
     if(bytestoread && data->set.max_recv_speed > 0) {
       /* In case of speed limit on receiving: if this loop already got
-       * data, break out. If not, limit the amount of bytes to receive.
-       * The overall, timed, speed limiting is done in multi.c */
-      if(total_received)
+       * a quarter of the quota, break out. We want to stutter a bit
+       * to keep in the limit, but too small receives will just cost
+       * cpu unnecessarily. */
+      if(total_received >= (data->set.max_recv_speed / 4))
         break;
       if(data->set.max_recv_speed < (curl_off_t)bytestoread)
         bytestoread = (size_t)data->set.max_recv_speed;
@@ -958,3 +959,15 @@ CURLcode Curl_xfer_pause_recv(struct Curl_easy *data, bool enable)
   Curl_conn_ev_data_pause(data, enable);
   return result;
 }
+
+bool Curl_xfer_is_too_fast(struct Curl_easy *data)
+{
+  struct Curl_llist_node *e = Curl_llist_head(&data->state.timeoutlist);
+  while(e) {
+    struct time_node *n = Curl_node_elem(e);
+    e = Curl_node_next(e);
+    if(n->eid == EXPIRE_TOOFAST)
+      return TRUE;
+  }
+  return FALSE;
+}
index 91a81e52e8fb2d62809e53eccea583af92d92b1d..6145efb4a5fd7fe81342200cd1828118f8ef17f9 100644 (file)
@@ -143,5 +143,7 @@ bool Curl_xfer_recv_is_paused(struct Curl_easy *data);
 CURLcode Curl_xfer_pause_send(struct Curl_easy *data, bool enable);
 CURLcode Curl_xfer_pause_recv(struct Curl_easy *data, bool enable);
 
+/* Query if transfer has expire timeout TOOFAST set. */
+bool Curl_xfer_is_too_fast(struct Curl_easy *data);
 
 #endif /* HEADER_CURL_TRANSFER_H */
index 755c3987fe9c43dd22e0be0ded8bbc0cf759e97a..2e8a8e0afd225374edc69d7253d0c4535ac4d2a7 100755 (executable)
@@ -56,6 +56,7 @@ my %wl = (
     'curlx_base64_decode' => 'internal api',
     'curlx_base64_encode' => 'internal api',
     'curlx_base64url_encode' => 'internal api',
+    'Curl_multi_clear_dirty' => 'internal api',
 );
 
 my %api = (
index d6ff5dbfdfea2584ef93ed208bb6a67081bc4e9b..9c5417f1bf23c7d4a391c00898c808f4cbfb5847 100644 (file)
@@ -61,7 +61,14 @@ class Card:
 
     @classmethod
     def fmt_mbs(cls, val):
-        return f'{val/(1024*1024):0.000f} MB/s' if val >= 0 else '--'
+        if val is None or val < 0:
+            return '--'
+        if val >= (1024*1024):
+            return f'{val/(1024*1024):0.000f} MB/s'
+        elif val >= 1024:
+            return f'{val / 1024:0.000f} KB/s'
+        else:
+            return f'{val:0.000f} B/s'
 
     @classmethod
     def fmt_reqs(cls, val):
@@ -119,6 +126,8 @@ class Card:
             print(f'Version: {score["meta"]["curl_V"]}')
         if 'curl_features' in score["meta"]:
             print(f'Features: {score["meta"]["curl_features"]}')
+        if 'limit-rate' in score['meta']:
+            print(f'--limit-rate: {score["meta"]["limit-rate"]}')
         print(f'Samples Size: {score["meta"]["samples"]}')
         if 'handshakes' in score:
             print(f'{"Handshakes":<24} {"ipv4":25} {"ipv6":28}')
@@ -188,7 +197,8 @@ class ScoreRunner:
                  server_addr: Optional[str] = None,
                  with_dtrace: bool = False,
                  with_flame: bool = False,
-                 socks_args: Optional[List[str]] = None):
+                 socks_args: Optional[List[str]] = None,
+                 limit_rate: Optional[str] = None):
         self.verbose = verbose
         self.env = env
         self.protocol = protocol
@@ -200,6 +210,7 @@ class ScoreRunner:
         self._with_dtrace = with_dtrace
         self._with_flame = with_flame
         self._socks_args = socks_args
+        self._limit_rate = limit_rate
 
     def info(self, msg):
         if self.verbose > 0:
@@ -289,7 +300,8 @@ class ScoreRunner:
             curl = self.mk_curl_client()
             r = curl.http_download(urls=[url], alpn_proto=self.protocol,
                                    no_save=True, with_headers=False,
-                                   with_profile=True)
+                                   with_profile=True,
+                                   limit_rate=self._limit_rate)
             err = self._check_downloads(r, count)
             if err:
                 errors.append(err)
@@ -309,7 +321,9 @@ class ScoreRunner:
             curl = self.mk_curl_client()
             r = curl.http_download(urls=[url], alpn_proto=self.protocol,
                                    no_save=True,
-                                   with_headers=False, with_profile=True)
+                                   with_headers=False,
+                                   with_profile=True,
+                                   limit_rate=self._limit_rate)
             err = self._check_downloads(r, count)
             if err:
                 errors.append(err)
@@ -332,6 +346,7 @@ class ScoreRunner:
                                    no_save=True,
                                    with_headers=False,
                                    with_profile=True,
+                                   limit_rate=self._limit_rate,
                                    extra_args=[
                                        '--parallel',
                                        '--parallel-max', str(max_parallel)
@@ -582,6 +597,9 @@ class ScoreRunner:
                 'date': f'{datetime.datetime.now(tz=datetime.timezone.utc).isoformat()}',
             }
         }
+        if self._limit_rate:
+            score['meta']['limit-rate'] = self._limit_rate
+
         if self.protocol == 'h3':
             score['meta']['protocol'] = 'h3'
             if not self.env.have_h3_curl():
@@ -710,7 +728,8 @@ def run_score(args, protocol):
                                download_parallel=args.download_parallel,
                                with_dtrace=args.dtrace,
                                with_flame=args.flame,
-                               socks_args=socks_args)
+                               socks_args=socks_args,
+                               limit_rate=args.limit_rate)
             cards.append(card)
 
         if test_httpd:
@@ -737,7 +756,8 @@ def run_score(args, protocol):
                                download_parallel=args.download_parallel,
                                with_dtrace=args.dtrace,
                                with_flame=args.flame,
-                               socks_args=socks_args)
+                               socks_args=socks_args,
+                               limit_rate=args.limit_rate)
             card.setup_resources(server_docs, downloads)
             cards.append(card)
 
@@ -763,7 +783,8 @@ def run_score(args, protocol):
                                verbose=args.verbose, curl_verbose=args.curl_verbose,
                                download_parallel=args.download_parallel,
                                with_dtrace=args.dtrace,
-                               socks_args=socks_args)
+                               socks_args=socks_args,
+                               limit_rate=args.limit_rate)
             card.setup_resources(server_docs, downloads)
             cards.append(card)
 
@@ -847,6 +868,8 @@ def main():
                         default = False, help="produce dtrace of curl")
     parser.add_argument("--flame", action='store_true',
                         default = False, help="produce a flame graph on curl, implies --dtrace")
+    parser.add_argument("--limit-rate", action='store', type=str,
+                        default=None, help="use curl's --limit-rate")
 
     parser.add_argument("-H", "--handshakes", action='store_true',
                         default=False, help="evaluate handshakes only")
index 26d2dd716cab929ffd16c81b58394b003043ab98..7c3f6e72d97c25eeb372846febf2af5e4396e366 100644 (file)
@@ -582,17 +582,16 @@ class CurlClient:
                       with_profile: bool = False,
                       with_tcpdump: bool = False,
                       no_save: bool = False,
+                      limit_rate: Optional[str] = None,
                       extra_args: Optional[List[str]] = None):
         if extra_args is None:
             extra_args = []
         if no_save:
-            extra_args.extend([
-                '--out-null',
-            ])
+            extra_args.extend(['--out-null'])
         else:
-            extra_args.extend([
-                '-o', 'download_#1.data',
-            ])
+            extra_args.extend(['-o', 'download_#1.data'])
+        if limit_rate:
+            extra_args.extend(['--limit-rate', limit_rate])
         # remove any existing ones
         for i in range(100):
             self._rmf(self.download_file(i))