]> git.ipfire.org Git - thirdparty/curl.git/commitdiff
http2: polish things around POST
authorStefan Eissing <stefan@eissing.org>
Tue, 29 Aug 2023 11:08:35 +0000 (13:08 +0200)
committerDaniel Stenberg <daniel@haxx.se>
Mon, 4 Sep 2023 17:48:49 +0000 (19:48 +0200)
- added test cases for various code paths
- fixed handling of blocked write when stream had
  been closed inbetween attempts
- re-enabled DEBUGASSERT on send with smaller data size

- in debug builds, environment variables can be set to simulate a slow
  network when sending data. cf-socket.c and vquic.c support
  * CURL_DBG_SOCK_WBLOCK: percentage of send() calls that should be
    answered with a EAGAIN. TCP/UNIX sockets.
    This is chosen randomly.
  * CURL_DBG_SOCK_WPARTIAL: percentage of data that shall be written
    to the network. TCP/UNIX sockets.
    Example: 80 means a send with 1000 bytes would only send 800
    This is applied to every send.
  * CURL_DBG_QUIC_WBLOCK: percentage of send() calls that should be
    answered with EAGAIN. QUIC only.
    This is chosen randomly.

Closes #11756

23 files changed:
.github/workflows/ngtcp2-linux.yml
lib/cf-h2-proxy.c
lib/cf-socket.c
lib/http2.c
lib/multi.c
lib/transfer.c
lib/vquic/curl_ngtcp2.c
lib/vquic/curl_quiche.c
lib/vquic/vquic.c
lib/vquic/vquic_int.h
lib/vtls/wolfssl.c
tests/http/test_01_basic.py
tests/http/test_02_download.py
tests/http/test_04_stuttered.py
tests/http/test_07_upload.py
tests/http/test_07_upload.py.orig [new file with mode: 0644]
tests/http/test_08_caddy.py
tests/http/test_10_proxy.py
tests/http/test_13_proxy_auth.py
tests/http/test_14_auth.py
tests/http/testenv/curl.py
tests/http/testenv/env.py
tests/http/testenv/httpd.py

index b9fb3c094874876f9b18dc41754b43ddcfc32777..b5960f1b2128f1392c7557d63e46bd8c5c7eaad3 100644 (file)
@@ -267,3 +267,11 @@ jobs:
       name: 'run pytest'
       env:
         TFLAGS: "${{ matrix.build.tflags }}"
+
+    - run: pytest
+      name: 'run pytest with slowed network'
+      env:
+        # 33% of sends are EAGAINed
+        CURL_DBG_SOCK_WBLOCK: 33
+        # only 80% of data > 10 bytes is send
+        CURL_DBG_SOCK_WPARTIAL: 80
index 9f9f6059351a6e6e24658fff7de29df6baf344de..a3feefca072037f3920cb6a5d0aca0e6b949a6a5 100644 (file)
@@ -35,6 +35,7 @@
 #include "dynbuf.h"
 #include "dynhds.h"
 #include "http1.h"
+#include "http2.h"
 #include "http_proxy.h"
 #include "multiif.h"
 #include "cf-h2-proxy.h"
@@ -146,29 +147,30 @@ static void h2_tunnel_go_state(struct Curl_cfilter *cf,
   /* entering this one */
   switch(new_state) {
   case H2_TUNNEL_INIT:
-    CURL_TRC_CF(data, cf, "new tunnel state 'init'");
+    CURL_TRC_CF(data, cf, "[%d] new tunnel state 'init'", ts->stream_id);
     tunnel_stream_clear(ts);
     break;
 
   case H2_TUNNEL_CONNECT:
-    CURL_TRC_CF(data, cf, "new tunnel state 'connect'");
+    CURL_TRC_CF(data, cf, "[%d] new tunnel state 'connect'", ts->stream_id);
     ts->state = H2_TUNNEL_CONNECT;
     break;
 
   case H2_TUNNEL_RESPONSE:
-    CURL_TRC_CF(data, cf, "new tunnel state 'response'");
+    CURL_TRC_CF(data, cf, "[%d] new tunnel state 'response'", ts->stream_id);
     ts->state = H2_TUNNEL_RESPONSE;
     break;
 
   case H2_TUNNEL_ESTABLISHED:
-    CURL_TRC_CF(data, cf, "new tunnel state 'established'");
+    CURL_TRC_CF(data, cf, "[%d] new tunnel state 'established'",
+                ts->stream_id);
     infof(data, "CONNECT phase completed");
     data->state.authproxy.done = TRUE;
     data->state.authproxy.multipass = FALSE;
     /* FALLTHROUGH */
   case H2_TUNNEL_FAILED:
     if(new_state == H2_TUNNEL_FAILED)
-      CURL_TRC_CF(data, cf, "new tunnel state 'failed'");
+      CURL_TRC_CF(data, cf, "[%d] new tunnel state 'failed'", ts->stream_id);
     ts->state = new_state;
     /* If a proxy-authorization header was used for the proxy, then we should
        make sure that it isn't accidentally used for the document request
@@ -231,8 +233,8 @@ static void drain_tunnel(struct Curl_cfilter *cf,
   bits = CURL_CSELECT_IN;
   if(!tunnel->closed && !tunnel->reset && tunnel->upload_blocked_len)
     bits |= CURL_CSELECT_OUT;
-  if(data->state.dselect_bits != bits) {
-    CURL_TRC_CF(data, cf, "[h2sid=%d] DRAIN dselect_bits=%x",
+  if(data->state.dselect_bits != bits || 1) {
+    CURL_TRC_CF(data, cf, "[%d] DRAIN dselect_bits=%x",
                 tunnel->stream_id, bits);
     data->state.dselect_bits = bits;
     Curl_expire(data, 0, EXPIRE_RUN_NOW);
@@ -249,7 +251,7 @@ static ssize_t proxy_nw_in_reader(void *reader_ctx,
   if(cf) {
     struct Curl_easy *data = CF_DATA_CURRENT(cf);
     nread = Curl_conn_cf_recv(cf->next, data, (char *)buf, buflen, err);
-    CURL_TRC_CF(data, cf, "nw_in_reader(len=%zu) -> %zd, %d",
+    CURL_TRC_CF(data, cf, "[0] nw_in_reader(len=%zu) -> %zd, %d",
                 buflen, nread, *err);
   }
   else {
@@ -269,7 +271,7 @@ static ssize_t proxy_h2_nw_out_writer(void *writer_ctx,
     struct Curl_easy *data = CF_DATA_CURRENT(cf);
     nwritten = Curl_conn_cf_send(cf->next, data, (const char *)buf, buflen,
                                  err);
-    CURL_TRC_CF(data, cf, "nw_out_writer(len=%zu) -> %zd, %d",
+    CURL_TRC_CF(data, cf, "[0] nw_out_writer(len=%zu) -> %zd, %d",
                 buflen, nwritten, *err);
   }
   else {
@@ -306,6 +308,10 @@ static ssize_t on_session_send(nghttp2_session *h2,
 static int proxy_h2_on_frame_recv(nghttp2_session *session,
                                   const nghttp2_frame *frame,
                                   void *userp);
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+static int on_frame_send(nghttp2_session *session, const nghttp2_frame *frame,
+                         void *userp);
+#endif
 static int proxy_h2_on_stream_close(nghttp2_session *session,
                                     int32_t stream_id,
                                     uint32_t error_code, void *userp);
@@ -348,6 +354,9 @@ static CURLcode cf_h2_proxy_ctx_init(struct Curl_cfilter *cf,
   nghttp2_session_callbacks_set_send_callback(cbs, on_session_send);
   nghttp2_session_callbacks_set_on_frame_recv_callback(
     cbs, proxy_h2_on_frame_recv);
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+  nghttp2_session_callbacks_set_on_frame_send_callback(cbs, on_frame_send);
+#endif
   nghttp2_session_callbacks_set_on_data_chunk_recv_callback(
     cbs, tunnel_recv_callback);
   nghttp2_session_callbacks_set_on_stream_close_callback(
@@ -395,7 +404,7 @@ static CURLcode cf_h2_proxy_ctx_init(struct Curl_cfilter *cf,
 out:
   if(cbs)
     nghttp2_session_callbacks_del(cbs);
-  CURL_TRC_CF(data, cf, "init proxy ctx -> %d", result);
+  CURL_TRC_CF(data, cf, "[0] init proxy ctx -> %d", result);
   return result;
 }
 
@@ -420,13 +429,13 @@ static CURLcode proxy_h2_nw_out_flush(struct Curl_cfilter *cf,
                             &result);
   if(nwritten < 0) {
     if(result == CURLE_AGAIN) {
-      CURL_TRC_CF(data, cf, "flush nw send buffer(%zu) -> EAGAIN",
+      CURL_TRC_CF(data, cf, "[0] flush nw send buffer(%zu) -> EAGAIN",
                   Curl_bufq_len(&ctx->outbufq));
       ctx->nw_out_blocked = 1;
     }
     return result;
   }
-  CURL_TRC_CF(data, cf, "nw send buffer flushed");
+  CURL_TRC_CF(data, cf, "[0] nw send buffer flushed");
   return Curl_bufq_is_empty(&ctx->outbufq)? CURLE_OK: CURLE_AGAIN;
 }
 
@@ -447,7 +456,7 @@ static int proxy_h2_process_pending_input(struct Curl_cfilter *cf,
   while(Curl_bufq_peek(&ctx->inbufq, &buf, &blen)) {
 
     rv = nghttp2_session_mem_recv(ctx->h2, (const uint8_t *)buf, blen);
-    CURL_TRC_CF(data, cf, "fed %zu bytes from nw to nghttp2 -> %zd", blen, rv);
+    CURL_TRC_CF(data, cf, "[0] %zu bytes to nghttp2 -> %zd", blen, rv);
     if(rv < 0) {
       failf(data,
             "process_pending_input: nghttp2_session_mem_recv() returned "
@@ -457,11 +466,11 @@ static int proxy_h2_process_pending_input(struct Curl_cfilter *cf,
     }
     Curl_bufq_skip(&ctx->inbufq, (size_t)rv);
     if(Curl_bufq_is_empty(&ctx->inbufq)) {
-      CURL_TRC_CF(data, cf, "all data in connection buffer processed");
+      CURL_TRC_CF(data, cf, "[0] all data in connection buffer processed");
       break;
     }
     else {
-      CURL_TRC_CF(data, cf, "process_pending_input: %zu bytes left "
+      CURL_TRC_CF(data, cf, "[0] process_pending_input: %zu bytes left "
                   "in connection buffer", Curl_bufq_len(&ctx->inbufq));
     }
   }
@@ -478,7 +487,7 @@ static CURLcode proxy_h2_progress_ingress(struct Curl_cfilter *cf,
 
   /* Process network input buffer fist */
   if(!Curl_bufq_is_empty(&ctx->inbufq)) {
-    CURL_TRC_CF(data, cf, "Process %zu bytes in connection buffer",
+    CURL_TRC_CF(data, cf, "[0] process %zu bytes in connection buffer",
                 Curl_bufq_len(&ctx->inbufq));
     if(proxy_h2_process_pending_input(cf, data, &result) < 0)
       return result;
@@ -492,7 +501,7 @@ static CURLcode proxy_h2_progress_ingress(struct Curl_cfilter *cf,
         !Curl_bufq_is_full(&ctx->tunnel.recvbuf)) {
 
     nread = Curl_bufq_slurp(&ctx->inbufq, proxy_nw_in_reader, cf, &result);
-    CURL_TRC_CF(data, cf, "read %zu bytes nw data -> %zd, %d",
+    CURL_TRC_CF(data, cf, "[0] read %zu bytes nw data -> %zd, %d",
                 Curl_bufq_len(&ctx->inbufq), nread, result);
     if(nread < 0) {
       if(result != CURLE_AGAIN) {
@@ -528,7 +537,7 @@ static CURLcode proxy_h2_progress_egress(struct Curl_cfilter *cf,
     rv = nghttp2_session_send(ctx->h2);
 
   if(nghttp2_is_fatal(rv)) {
-    CURL_TRC_CF(data, cf, "nghttp2_session_send error (%s)%d",
+    CURL_TRC_CF(data, cf, "[0] nghttp2_session_send error (%s)%d",
                 nghttp2_strerror(rv), rv);
     return CURLE_SEND_ERROR;
   }
@@ -565,6 +574,97 @@ static ssize_t on_session_send(nghttp2_session *h2,
   return nwritten;
 }
 
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+static int fr_print(const nghttp2_frame *frame, char *buffer, size_t blen)
+{
+  switch(frame->hd.type) {
+    case NGHTTP2_DATA: {
+      return msnprintf(buffer, blen,
+                       "FRAME[DATA, len=%d, eos=%d, padlen=%d]",
+                       (int)frame->hd.length,
+                       !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM),
+                       (int)frame->data.padlen);
+    }
+    case NGHTTP2_HEADERS: {
+      return msnprintf(buffer, blen,
+                       "FRAME[HEADERS, len=%d, hend=%d, eos=%d]",
+                       (int)frame->hd.length,
+                       !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+                       !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+    }
+    case NGHTTP2_PRIORITY: {
+      return msnprintf(buffer, blen,
+                       "FRAME[PRIORITY, len=%d, flags=%d]",
+                       (int)frame->hd.length, frame->hd.flags);
+    }
+    case NGHTTP2_RST_STREAM: {
+      return msnprintf(buffer, blen,
+                       "FRAME[RST_STREAM, len=%d, flags=%d, error=%u]",
+                       (int)frame->hd.length, frame->hd.flags,
+                       frame->rst_stream.error_code);
+    }
+    case NGHTTP2_SETTINGS: {
+      if(frame->hd.flags & NGHTTP2_FLAG_ACK) {
+        return msnprintf(buffer, blen, "FRAME[SETTINGS, ack=1]");
+      }
+      return msnprintf(buffer, blen,
+                       "FRAME[SETTINGS, len=%d]", (int)frame->hd.length);
+    }
+    case NGHTTP2_PUSH_PROMISE: {
+      return msnprintf(buffer, blen,
+                       "FRAME[PUSH_PROMISE, len=%d, hend=%d]",
+                       (int)frame->hd.length,
+                       !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS));
+    }
+    case NGHTTP2_PING: {
+      return msnprintf(buffer, blen,
+                       "FRAME[PING, len=%d, ack=%d]",
+                       (int)frame->hd.length,
+                       frame->hd.flags&NGHTTP2_FLAG_ACK);
+    }
+    case NGHTTP2_GOAWAY: {
+      char scratch[128];
+      size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+        size_t len = (frame->goaway.opaque_data_len < s_len)?
+                      frame->goaway.opaque_data_len : s_len-1;
+        if(len)
+          memcpy(scratch, frame->goaway.opaque_data, len);
+        scratch[len] = '\0';
+        return msnprintf(buffer, blen, "FRAME[GOAWAY, error=%d, reason='%s', "
+                         "last_stream=%d]", frame->goaway.error_code,
+                         scratch, frame->goaway.last_stream_id);
+    }
+    case NGHTTP2_WINDOW_UPDATE: {
+      return msnprintf(buffer, blen,
+                       "FRAME[WINDOW_UPDATE, incr=%d]",
+                       frame->window_update.window_size_increment);
+    }
+    default:
+      return msnprintf(buffer, blen, "FRAME[%d, len=%d, flags=%d]",
+                       frame->hd.type, (int)frame->hd.length,
+                       frame->hd.flags);
+  }
+}
+
+static int on_frame_send(nghttp2_session *session, const nghttp2_frame *frame,
+                         void *userp)
+{
+  struct Curl_cfilter *cf = userp;
+  struct Curl_easy *data = CF_DATA_CURRENT(cf);
+
+  (void)session;
+  DEBUGASSERT(data);
+  if(data && Curl_trc_cf_is_verbose(cf, data)) {
+    char buffer[256];
+    int len;
+    len = fr_print(frame, buffer, sizeof(buffer)-1);
+    buffer[len] = 0;
+    CURL_TRC_CF(data, cf, "[%d] -> %s", frame->hd.stream_id, buffer);
+  }
+  return 0;
+}
+#endif /* !CURL_DISABLE_VERBOSE_STRINGS */
+
 static int proxy_h2_on_frame_recv(nghttp2_session *session,
                                   const nghttp2_frame *frame,
                                   void *userp)
@@ -576,47 +676,57 @@ static int proxy_h2_on_frame_recv(nghttp2_session *session,
 
   (void)session;
   DEBUGASSERT(data);
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+  if(Curl_trc_cf_is_verbose(cf, data)) {
+    char buffer[256];
+    int len;
+    len = fr_print(frame, buffer, sizeof(buffer)-1);
+    buffer[len] = 0;
+    CURL_TRC_CF(data, cf, "[%d] <- %s",frame->hd.stream_id, buffer);
+  }
+#endif /* !CURL_DISABLE_VERBOSE_STRINGS */
+
   if(!stream_id) {
     /* stream ID zero is for connection-oriented stuff */
     DEBUGASSERT(data);
     switch(frame->hd.type) {
     case NGHTTP2_SETTINGS:
-      /* we do not do anything with this for now */
+      /* Since the initial stream window is 64K, a request might be on HOLD,
+       * due to exhaustion. The (initial) SETTINGS may announce a much larger
+       * window and *assume* that we treat this like a WINDOW_UPDATE. Some
+       * servers send an explicit WINDOW_UPDATE, but not all seem to do that.
+       * To be safe, we UNHOLD a stream in order not to stall. */
+      if((data->req.keepon & KEEP_SEND_HOLD) &&
+         (data->req.keepon & KEEP_SEND)) {
+        data->req.keepon &= ~KEEP_SEND_HOLD;
+        drain_tunnel(cf, data, &ctx->tunnel);
+        CURL_TRC_CF(data, cf, "[%d] un-holding after SETTINGS",
+                    stream_id);
+      }
       break;
     case NGHTTP2_GOAWAY:
-      infof(data, "recveived GOAWAY, error=%d, last_stream=%u",
-                  frame->goaway.error_code, frame->goaway.last_stream_id);
       ctx->goaway = TRUE;
       break;
-    case NGHTTP2_WINDOW_UPDATE:
-      CURL_TRC_CF(data, cf, "recv frame WINDOW_UPDATE");
-      break;
     default:
-      CURL_TRC_CF(data, cf, "recv frame %x on 0", frame->hd.type);
+      break;
     }
     return 0;
   }
 
   if(stream_id != ctx->tunnel.stream_id) {
-    CURL_TRC_CF(data, cf, "[h2sid=%u] rcvd FRAME not for tunnel", stream_id);
+    CURL_TRC_CF(data, cf, "[%d] rcvd FRAME not for tunnel", stream_id);
     return NGHTTP2_ERR_CALLBACK_FAILURE;
   }
 
   switch(frame->hd.type) {
-  case NGHTTP2_DATA:
-    /* If body started on this stream, then receiving DATA is illegal. */
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv frame DATA", stream_id);
-    break;
   case NGHTTP2_HEADERS:
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv frame HEADERS", stream_id);
-
     /* nghttp2 guarantees that :status is received, and we store it to
        stream->status_code. Fuzzing has proven this can still be reached
        without status code having been set. */
     if(!ctx->tunnel.resp)
       return NGHTTP2_ERR_CALLBACK_FAILURE;
     /* Only final status code signals the end of header */
-    CURL_TRC_CF(data, cf, "[h2sid=%u] got http status: %d",
+    CURL_TRC_CF(data, cf, "[%d] got http status: %d",
                 stream_id, ctx->tunnel.resp->status);
     if(!ctx->tunnel.has_final_response) {
       if(ctx->tunnel.resp->status / 100 != 1) {
@@ -624,26 +734,16 @@ static int proxy_h2_on_frame_recv(nghttp2_session *session,
       }
     }
     break;
-  case NGHTTP2_PUSH_PROMISE:
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv PUSH_PROMISE", stream_id);
-    return NGHTTP2_ERR_CALLBACK_FAILURE;
-  case NGHTTP2_RST_STREAM:
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv RST", stream_id);
-    ctx->tunnel.reset = TRUE;
-    break;
   case NGHTTP2_WINDOW_UPDATE:
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv WINDOW_UPDATE", stream_id);
     if((data->req.keepon & KEEP_SEND_HOLD) &&
        (data->req.keepon & KEEP_SEND)) {
       data->req.keepon &= ~KEEP_SEND_HOLD;
       Curl_expire(data, 0, EXPIRE_RUN_NOW);
-      CURL_TRC_CF(data, cf, "[h2sid=%u] unpausing after win update",
+      CURL_TRC_CF(data, cf, "[%d] unpausing after win update",
                   stream_id);
     }
     break;
   default:
-    CURL_TRC_CF(data, cf, "[h2sid=%u] recv frame %x",
-                stream_id, frame->hd.type);
     break;
   }
   return 0;
@@ -667,7 +767,7 @@ static int proxy_h2_on_header(nghttp2_session *session,
   (void)session;
   DEBUGASSERT(stream_id); /* should never be a zero stream ID here */
   if(stream_id != ctx->tunnel.stream_id) {
-    CURL_TRC_CF(data, cf, "[h2sid=%u] header for non-tunnel stream: "
+    CURL_TRC_CF(data, cf, "[%d] header for non-tunnel stream: "
                 "%.*s: %.*s", stream_id,
                 (int)namelen, name, (int)valuelen, value);
     return NGHTTP2_ERR_CALLBACK_FAILURE;
@@ -697,7 +797,7 @@ static int proxy_h2_on_header(nghttp2_session *session,
       return NGHTTP2_ERR_CALLBACK_FAILURE;
     resp->prev = ctx->tunnel.resp;
     ctx->tunnel.resp = resp;
-    CURL_TRC_CF(data, cf, "[h2sid=%u] status: HTTP/2 %03d",
+    CURL_TRC_CF(data, cf, "[%d] status: HTTP/2 %03d",
                 stream_id, ctx->tunnel.resp->status);
     return 0;
   }
@@ -711,7 +811,7 @@ static int proxy_h2_on_header(nghttp2_session *session,
   if(result)
     return NGHTTP2_ERR_CALLBACK_FAILURE;
 
-  CURL_TRC_CF(data, cf, "[h2sid=%u] header: %.*s: %.*s",
+  CURL_TRC_CF(data, cf, "[%d] header: %.*s: %.*s",
               stream_id, (int)namelen, name, (int)valuelen, value);
 
   return 0; /* 0 is successful */
@@ -752,7 +852,7 @@ static ssize_t tunnel_send_callback(nghttp2_session *session,
   if(ts->closed && Curl_bufq_is_empty(&ts->sendbuf))
     *data_flags = NGHTTP2_DATA_FLAG_EOF;
 
-  CURL_TRC_CF(data, cf, "[h2sid=%u] tunnel_send_callback -> %zd",
+  CURL_TRC_CF(data, cf, "[%d] tunnel_send_callback -> %zd",
               ts->stream_id, nread);
   return nread;
 }
@@ -797,7 +897,7 @@ static int proxy_h2_on_stream_close(nghttp2_session *session,
   if(stream_id != ctx->tunnel.stream_id)
     return 0;
 
-  CURL_TRC_CF(data, cf, "[h2sid=%u] proxy_h2_on_stream_close, %s (err %d)",
+  CURL_TRC_CF(data, cf, "[%d] proxy_h2_on_stream_close, %s (err %d)",
               stream_id, nghttp2_http2_strerror(error_code), error_code);
   ctx->tunnel.closed = TRUE;
   ctx->tunnel.error = error_code;
@@ -916,8 +1016,8 @@ static CURLcode submit_CONNECT(struct Curl_cfilter *cf,
   result = proxy_h2_submit(&ts->stream_id, cf, data, ctx->h2, req,
                            NULL, ts, tunnel_send_callback, cf);
   if(result) {
-    CURL_TRC_CF(data, cf, "send: nghttp2_submit_request error (%s)%u",
-                nghttp2_strerror(ts->stream_id), ts->stream_id);
+    CURL_TRC_CF(data, cf, "[%d] send, nghttp2_submit_request error: %s",
+                ts->stream_id, nghttp2_strerror(ts->stream_id));
   }
 
 out:
@@ -951,7 +1051,7 @@ static CURLcode inspect_response(struct Curl_cfilter *cf,
   }
 
   if(auth_reply) {
-    CURL_TRC_CF(data, cf, "CONNECT: fwd auth header '%s'",
+    CURL_TRC_CF(data, cf, "[0] CONNECT: fwd auth header '%s'",
                 auth_reply->value);
     result = Curl_http_input_auth(data, ts->resp->status == 407,
                                   auth_reply->value);
@@ -982,7 +1082,7 @@ static CURLcode H2_CONNECT(struct Curl_cfilter *cf,
     switch(ts->state) {
     case H2_TUNNEL_INIT:
       /* Prepare the CONNECT request and make a first attempt to send. */
-      CURL_TRC_CF(data, cf, "CONNECT start for %s", ts->authority);
+      CURL_TRC_CF(data, cf, "[0] CONNECT start for %s", ts->authority);
       result = submit_CONNECT(cf, data, ts);
       if(result)
         goto out;
@@ -1149,7 +1249,7 @@ static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
   ssize_t rv = 0;
 
   if(ctx->tunnel.error == NGHTTP2_REFUSED_STREAM) {
-    CURL_TRC_CF(data, cf, "[h2sid=%u] REFUSED_STREAM, try again on a new "
+    CURL_TRC_CF(data, cf, "[%d] REFUSED_STREAM, try again on a new "
                 "connection", ctx->tunnel.stream_id);
     connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
     *err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
@@ -1170,7 +1270,8 @@ static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
 
   *err = CURLE_OK;
   rv = 0;
-  CURL_TRC_CF(data, cf, "handle_tunnel_close -> %zd, %d", rv, *err);
+  CURL_TRC_CF(data, cf, "[%d] handle_tunnel_close -> %zd, %d",
+              ctx->tunnel.stream_id, rv, *err);
   return rv;
 }
 
@@ -1206,8 +1307,8 @@ static ssize_t tunnel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
   }
 
 out:
-  CURL_TRC_CF(data, cf, "tunnel_recv(len=%zu) -> %zd, %d",
-              len, nread, *err);
+  CURL_TRC_CF(data, cf, "[%d] tunnel_recv(len=%zu) -> %zd, %d",
+              ctx->tunnel.stream_id, len, nread, *err);
   return nread;
 }
 
@@ -1235,13 +1336,22 @@ static ssize_t cf_h2_proxy_recv(struct Curl_cfilter *cf,
   nread = tunnel_recv(cf, data, buf, len, err);
 
   if(nread > 0) {
-    CURL_TRC_CF(data, cf, "[h2sid=%u] increase window by %zd",
+    CURL_TRC_CF(data, cf, "[%d] increase window by %zd",
                 ctx->tunnel.stream_id, nread);
     nghttp2_session_consume(ctx->h2, ctx->tunnel.stream_id, (size_t)nread);
   }
 
   result = proxy_h2_progress_egress(cf, data);
-  if(result && result != CURLE_AGAIN) {
+  if(result == CURLE_AGAIN) {
+    /* pending data to send, need to be called again. Ideally, we'd
+     * monitor the socket for POLLOUT, but we might not be in SENDING
+     * transfer state any longer and are unable to make this happen.
+     */
+    CURL_TRC_CF(data, cf, "[%d] egress blocked, DRAIN",
+                ctx->tunnel.stream_id);
+    drain_tunnel(cf, data, &ctx->tunnel);
+  }
+  else if(result) {
     *err = result;
     nread = -1;
   }
@@ -1253,7 +1363,7 @@ out:
      * draining to avoid stalling when no socket events happen. */
     drain_tunnel(cf, data, &ctx->tunnel);
   }
-  CURL_TRC_CF(data, cf, "[h2sid=%u] cf_recv(len=%zu) -> %zd %d",
+  CURL_TRC_CF(data, cf, "[%d] cf_recv(len=%zu) -> %zd %d",
               ctx->tunnel.stream_id, len, nread, *err);
   CF_DATA_RESTORE(cf, save);
   return nread;
@@ -1295,6 +1405,7 @@ static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
     }
     nwritten = (ssize_t)ctx->tunnel.upload_blocked_len;
     ctx->tunnel.upload_blocked_len = 0;
+    *err = CURLE_OK;
   }
   else {
     nwritten = Curl_bufq_write(&ctx->tunnel.sendbuf, buf, len, err);
@@ -1352,7 +1463,7 @@ static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
        * proxy connection AND to UNHOLD all of them again when the
        * window increases.
        * We *could* iterate over all data on this conn maybe? */
-      CURL_TRC_CF(data, cf, "[h2sid=%d] remote flow "
+      CURL_TRC_CF(data, cf, "[%d] remote flow "
                   "window is exhausted", ctx->tunnel.stream_id);
     }
 
@@ -1360,11 +1471,12 @@ static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
      * We have unwritten state that needs us being invoked again and EAGAIN
      * is the only way to ensure that. */
     ctx->tunnel.upload_blocked_len = nwritten;
-    CURL_TRC_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) BLOCK: win %u/%zu "
+    CURL_TRC_CF(data, cf, "[%d] cf_send(len=%zu) BLOCK: win %u/%zu "
                 "blocked_len=%zu",
                 ctx->tunnel.stream_id, len,
                 nghttp2_session_get_remote_window_size(ctx->h2), rwin,
                 nwritten);
+    drain_tunnel(cf, data, &ctx->tunnel);
     *err = CURLE_AGAIN;
     nwritten = -1;
     goto out;
@@ -1377,14 +1489,20 @@ static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
       nwritten = -1;
     }
     else {
-      CURL_TRC_CF(data, cf, "send: nothing to do in this session");
+      CURL_TRC_CF(data, cf, "[0] send: nothing to do in this session");
       *err = CURLE_HTTP2;
       nwritten = -1;
     }
   }
 
 out:
-  CURL_TRC_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) -> %zd, %d, "
+  if(!Curl_bufq_is_empty(&ctx->tunnel.recvbuf) &&
+     (nwritten >= 0 || *err == CURLE_AGAIN)) {
+    /* data pending and no fatal error to report. Need to trigger
+     * draining to avoid stalling when no socket events happen. */
+    drain_tunnel(cf, data, &ctx->tunnel);
+  }
+  CURL_TRC_CF(data, cf, "[%d] cf_send(len=%zu) -> %zd, %d, "
               "h2 windows %d-%d (stream-conn), buffers %zu-%zu (stream-conn)",
               ctx->tunnel.stream_id, len, nwritten, *err,
               nghttp2_session_get_stream_remote_window_size(
@@ -1443,7 +1561,7 @@ static bool cf_h2_proxy_is_alive(struct Curl_cfilter *cf,
 
   CF_DATA_SAVE(save, cf, data);
   result = (ctx && ctx->h2 && proxy_h2_connisalive(cf, data, input_pending));
-  CURL_TRC_CF(data, cf, "conn alive -> %d, input_pending=%d",
+  CURL_TRC_CF(data, cf, "[0] conn alive -> %d, input_pending=%d",
               result, *input_pending);
   CF_DATA_RESTORE(cf, save);
   return result;
index 2d6189cb7fadb6ae7bcec7e4774a8f0fdf7d26f0..effe6e649d1390124dd8af5e618a93f39e7510a5 100644 (file)
@@ -71,6 +71,7 @@
 #include "warnless.h"
 #include "conncache.h"
 #include "multihandle.h"
+#include "rand.h"
 #include "share.h"
 #include "version_win32.h"
 
@@ -777,6 +778,10 @@ struct cf_socket_ctx {
   struct curltime connected_at;      /* when socket connected/got first byte */
   struct curltime first_byte_at;     /* when first byte was recvd */
   int error;                         /* errno of last failure or 0 */
+#ifdef DEBUGBUILD
+  int wblock_percent;                /* percent of writes doing EAGAIN */
+  int wpartial_percent;              /* percent of bytes written in send */
+#endif
   BIT(got_first_byte);               /* if first byte was received */
   BIT(accepted);                     /* socket was accepted, not connected */
   BIT(active);
@@ -792,6 +797,22 @@ static void cf_socket_ctx_init(struct cf_socket_ctx *ctx,
   ctx->transport = transport;
   Curl_sock_assign_addr(&ctx->addr, ai, transport);
   Curl_bufq_init(&ctx->recvbuf, NW_RECV_CHUNK_SIZE, NW_RECV_CHUNKS);
+#ifdef DEBUGBUILD
+  {
+    char *p = getenv("CURL_DBG_SOCK_WBLOCK");
+    if(p) {
+      long l = strtol(p, NULL, 10);
+      if(l >= 0 && l <= 100)
+        ctx->wblock_percent = (int)l;
+    }
+    p = getenv("CURL_DBG_SOCK_WPARTIAL");
+    if(p) {
+      long l = strtol(p, NULL, 10);
+      if(l >= 0 && l <= 100)
+        ctx->wpartial_percent = (int)l;
+    }
+  }
+#endif
 }
 
 struct reader_ctx {
@@ -1253,11 +1274,34 @@ static ssize_t cf_socket_send(struct Curl_cfilter *cf, struct Curl_easy *data,
   struct cf_socket_ctx *ctx = cf->ctx;
   curl_socket_t fdsave;
   ssize_t nwritten;
+  size_t orig_len = len;
 
   *err = CURLE_OK;
   fdsave = cf->conn->sock[cf->sockindex];
   cf->conn->sock[cf->sockindex] = ctx->sock;
 
+#ifdef DEBUGBUILD
+  /* simulate network blocking/partial writes */
+  if(ctx->wblock_percent > 0) {
+    unsigned char c;
+    Curl_rand(data, &c, 1);
+    if(c >= ((100-ctx->wblock_percent)*256/100)) {
+      CURL_TRC_CF(data, cf, "send(len=%zu) SIMULATE EWOULDBLOCK", orig_len);
+      *err = CURLE_AGAIN;
+      nwritten = -1;
+      cf->conn->sock[cf->sockindex] = fdsave;
+      return nwritten;
+    }
+  }
+  if(cf->cft != &Curl_cft_udp && ctx->wpartial_percent > 0 && len > 8) {
+    len = len * ctx->wpartial_percent / 100;
+    if(!len)
+      len = 1;
+    CURL_TRC_CF(data, cf, "send(len=%zu) SIMULATE partial write of %zu bytes",
+                orig_len, len);
+  }
+#endif
+
 #if defined(MSG_FASTOPEN) && !defined(TCP_FASTOPEN_CONNECT) /* Linux */
   if(cf->conn->bits.tcp_fastopen) {
     nwritten = sendto(ctx->sock, buf, len, MSG_FASTOPEN,
@@ -1297,7 +1341,7 @@ static ssize_t cf_socket_send(struct Curl_cfilter *cf, struct Curl_easy *data,
   }
 
   CURL_TRC_CF(data, cf, "send(len=%zu) -> %d, err=%d",
-              len, (int)nwritten, *err);
+              orig_len, (int)nwritten, *err);
   cf->conn->sock[cf->sockindex] = fdsave;
   return nwritten;
 }
index 2c5d6b4d74830f713c70ba36485fabff297dbcfc..903eac4c4cb3b129202f8c8f18f9e8b7ee409c04 100644 (file)
@@ -187,6 +187,7 @@ struct stream_ctx {
   int status_code; /* HTTP response status code */
   uint32_t error; /* stream error code */
   uint32_t local_window_size; /* the local recv window size */
+  bool resp_hds_complete; /* we have a complete, final response */
   bool closed; /* TRUE on stream close */
   bool reset;  /* TRUE on stream reset */
   bool close_handled; /* TRUE if stream closure is handled by libcurl */
@@ -1044,6 +1045,9 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     if(result)
       return result;
 
+    if(stream->status_code / 100 != 1) {
+      stream->resp_hds_complete = TRUE;
+    }
     drain_stream(cf, data, stream);
     break;
   case NGHTTP2_PUSH_PROMISE:
@@ -1064,7 +1068,9 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     break;
   case NGHTTP2_RST_STREAM:
     stream->closed = TRUE;
-    stream->reset = TRUE;
+    if(frame->rst_stream.error_code) {
+      stream->reset = TRUE;
+    }
     stream->send_closed = TRUE;
     data->req.keepon &= ~KEEP_SEND_HOLD;
     drain_stream(cf, data, stream);
@@ -1109,8 +1115,9 @@ static int fr_print(const nghttp2_frame *frame, char *buffer, size_t blen)
     }
     case NGHTTP2_RST_STREAM: {
       return msnprintf(buffer, blen,
-                       "FRAME[RST_STREAM, len=%d, flags=%d]",
-                       (int)frame->hd.length, frame->hd.flags);
+                       "FRAME[RST_STREAM, len=%d, flags=%d, error=%u]",
+                       (int)frame->hd.length, frame->hd.flags,
+                       frame->rst_stream.error_code);
     }
     case NGHTTP2_SETTINGS: {
       if(frame->hd.flags & NGHTTP2_FLAG_ACK) {
@@ -1166,7 +1173,7 @@ static int on_frame_send(nghttp2_session *session, const nghttp2_frame *frame,
   if(data && Curl_trc_cf_is_verbose(cf, data)) {
     char buffer[256];
     int len;
-    len = fr_print(frame, buffer, (sizeof(buffer)/sizeof(buffer[0]))-1);
+    len = fr_print(frame, buffer, sizeof(buffer)-1);
     buffer[len] = 0;
     CURL_TRC_CF(data, cf, "[%d] -> %s", frame->hd.stream_id, buffer);
   }
@@ -1187,7 +1194,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
   if(Curl_trc_cf_is_verbose(cf, data)) {
     char buffer[256];
     int len;
-    len = fr_print(frame, buffer, (sizeof(buffer)/sizeof(buffer[0]))-1);
+    len = fr_print(frame, buffer, sizeof(buffer)-1);
     buffer[len] = 0;
     CURL_TRC_CF(data, cf, "[%d] <- %s",frame->hd.stream_id, buffer);
   }
@@ -1975,7 +1982,14 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
 
 out:
   result = h2_progress_egress(cf, data);
-  if(result && result != CURLE_AGAIN) {
+  if(result == CURLE_AGAIN) {
+    /* pending data to send, need to be called again. Ideally, we'd
+     * monitor the socket for POLLOUT, but we might not be in SENDING
+     * transfer state any longer and are unable to make this happen.
+     */
+    drain_stream(cf, data, stream);
+  }
+  else if(result) {
     *err = result;
     nread = -1;
   }
@@ -2151,27 +2165,17 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
   int rv;
   ssize_t nwritten;
   CURLcode result;
-  int blocked = 0;
+  int blocked = 0, was_blocked = 0;
 
   CF_DATA_SAVE(save, cf, data);
 
   if(stream && stream->id != -1) {
-    if(stream->close_handled) {
-      infof(data, "stream %u closed", stream->id);
-      *err = CURLE_HTTP2_STREAM;
-      nwritten = -1;
-      goto out;
-    }
-    else if(stream->closed) {
-      nwritten = http2_handle_stream_close(cf, data, stream, err);
-      goto out;
-    }
-    else if(stream->upload_blocked_len) {
+    if(stream->upload_blocked_len) {
       /* the data in `buf` has already been submitted or added to the
        * buffers, but have been EAGAINed on the last invocation. */
       /* TODO: this assertion triggers in OSSFuzz runs and it is not
-       * clear why. Disable for now to let OSSFuzz continue its tests.
-      DEBUGASSERT(len >= stream->upload_blocked_len); */
+       * clear why. Disable for now to let OSSFuzz continue its tests. */
+      DEBUGASSERT(len >= stream->upload_blocked_len);
       if(len < stream->upload_blocked_len) {
         /* Did we get called again with a smaller `len`? This should not
          * happen. We are not prepared to handle that. */
@@ -2182,6 +2186,25 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
       }
       nwritten = (ssize_t)stream->upload_blocked_len;
       stream->upload_blocked_len = 0;
+      was_blocked = 1;
+    }
+    else if(stream->closed) {
+      if(stream->resp_hds_complete) {
+        /* Server decided to close the stream after having sent us a findl
+         * response. This is valid if it is not interested in the request
+         * body. This happens on 30x or 40x responses.
+         * We silently discard the data sent, since this is not a transport
+         * error situation. */
+        CURL_TRC_CF(data, cf, "[%d] discarding data"
+                    "on closed stream with response", stream->id);
+        *err = CURLE_OK;
+        nwritten = (ssize_t)len;
+        goto out;
+      }
+      infof(data, "stream %u closed", stream->id);
+      *err = CURLE_SEND_ERROR;
+      nwritten = -1;
+      goto out;
     }
     else {
       /* If stream_id != -1, we have dispatched request HEADERS and
@@ -2218,8 +2241,10 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
   result = h2_progress_egress(cf, data);
   /* if the stream has been closed in egress handling (nghttp2 does that
    * when it does not like the headers, for example */
-  if(stream && stream->closed) {
-    nwritten = http2_handle_stream_close(cf, data, stream, err);
+  if(stream && stream->closed && !was_blocked) {
+    infof(data, "stream %u closed", stream->id);
+    *err = CURLE_SEND_ERROR;
+    nwritten = -1;
     goto out;
   }
   else if(result == CURLE_AGAIN) {
@@ -2367,8 +2392,12 @@ static CURLcode cf_h2_connect(struct Curl_cfilter *cf,
   if(result)
     goto out;
 
+  /* Send out our SETTINGS and ACKs and such. If that blocks, we
+   * have it buffered and  can count this filter as being connected */
   result = h2_progress_egress(cf, data);
-  if(result)
+  if(result == CURLE_AGAIN)
+    result = CURLE_OK;
+  else if(result)
     goto out;
 
   *done = TRUE;
@@ -2376,6 +2405,7 @@ static CURLcode cf_h2_connect(struct Curl_cfilter *cf,
   result = CURLE_OK;
 
 out:
+  CURL_TRC_CF(data, cf, "cf_connect() -> %d, %d, ", result, *done);
   CF_DATA_RESTORE(cf, save);
   return result;
 }
index b17a3b03c380b387f32a9727c84606fa2eef6a0b..dab207a6f32bf16a509398deb772442359f2f0be 100644 (file)
@@ -667,8 +667,14 @@ static CURLcode multi_done(struct Curl_easy *data,
   struct connectdata *conn = data->conn;
   unsigned int i;
 
+#if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS)
+  DEBUGF(infof(data, "multi_done[%s]: status: %d prem: %d done: %d",
+               multi_statename[data->mstate],
+               (int)status, (int)premature, data->state.done));
+#else
   DEBUGF(infof(data, "multi_done: status: %d prem: %d done: %d",
                (int)status, (int)premature, data->state.done));
+#endif
 
   if(data->state.done)
     /* Stop if multi_done() has already been called */
index 0f49631f7fa9cc166e0855ef287b2e28addbf127..fb0a6a45d916c6792d7100f8a570d4f82e796eec 100644 (file)
@@ -431,6 +431,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
   curl_off_t max_recv = data->set.max_recv_speed?
                         data->set.max_recv_speed : CURL_OFF_T_MAX;
   char *buf = data->state.buffer;
+  bool data_eof_handled = FALSE;
   DEBUGASSERT(buf);
 
   *done = FALSE;
@@ -448,8 +449,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
        to ensure that http2_handle_stream_close is called when we read all
        incoming bytes for a particular stream. */
     bool is_http3 = Curl_conn_is_http3(data, conn, FIRSTSOCKET);
-    bool data_eof_handled = is_http3
-                            || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
+    data_eof_handled = is_http3 || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
 
     if(!data_eof_handled && k->size != -1 && !k->header) {
       /* make sure we don't read too much */
@@ -761,7 +761,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
   }
 
   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
-     conn->bits.close) {
+     (conn->bits.close || data_eof_handled)) {
     /* When we've read the entire thing and the close bit is set, the server
        may now close the connection. If there's now any kind of sending going
        on from our side, we need to stop that immediately. */
index ebc391bf0b2ea92db5d9a72eede75c4a9ea9858b..7e71be98757e39c45586e3c0de446e113860f850 100644 (file)
@@ -1106,6 +1106,7 @@ static int cb_h3_stream_close(nghttp3_conn *conn, int64_t stream_id,
   else {
     CURL_TRC_CF(data, cf, "[%" PRId64 "] CLOSED", stream->id);
   }
+  data->req.keepon &= ~KEEP_SEND_HOLD;
   h3_drain_stream(cf, data);
   return 0;
 }
@@ -1785,6 +1786,18 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
     stream->upload_blocked_len = 0;
   }
   else if(stream->closed) {
+    if(stream->resp_hds_complete) {
+      /* Server decided to close the stream after having sent us a final
+       * response. This is valid if it is not interested in the request
+       * body. This happens on 30x or 40x responses.
+       * We silently discard the data sent, since this is not a transport
+       * error situation. */
+      CURL_TRC_CF(data, cf, "[%" PRId64 "] discarding data"
+                  "on closed stream with response", stream->id);
+      *err = CURLE_OK;
+      sent = (ssize_t)len;
+      goto out;
+    }
     *err = CURLE_HTTP3;
     sent = -1;
     goto out;
@@ -2245,9 +2258,16 @@ static CURLcode cf_ngtcp2_data_event(struct Curl_cfilter *cf,
     }
     break;
   }
-  case CF_CTRL_DATA_IDLE:
-    result = check_and_set_expiry(cf, data, NULL);
+  case CF_CTRL_DATA_IDLE: {
+    struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
+    CURL_TRC_CF(data, cf, "data idle");
+    if(stream && !stream->closed) {
+      result = check_and_set_expiry(cf, data, NULL);
+      if(result)
+        CURL_TRC_CF(data, cf, "data idle, check_and_set_expiry -> %d", result);
+    }
     break;
+  }
   default:
     break;
   }
index bbb069096ed21382db0dfb20a5e7cc854c6202e3..849a948230b9277c52040b0ba25d8310d5a95348 100644 (file)
@@ -565,6 +565,7 @@ static CURLcode h3_process_event(struct Curl_cfilter *cf,
     }
     stream->closed = TRUE;
     streamclose(cf->conn, "End of stream");
+    data->req.keepon &= ~KEEP_SEND_HOLD;
     break;
 
   case QUICHE_H3_EVENT_GOAWAY:
@@ -1082,6 +1083,22 @@ static ssize_t cf_quiche_send(struct Curl_cfilter *cf, struct Curl_easy *data,
       nwritten = -1;
       goto out;
     }
+    else if(nwritten == QUICHE_H3_TRANSPORT_ERR_INVALID_STREAM_STATE &&
+            stream->closed && stream->resp_hds_complete) {
+      /* sending request body on a stream that has been closed by the
+       * server. If the server has send us a final response, we should
+       * silently discard the send data.
+       * This happens for example on redirects where the server, instead
+       * of reading the full request body just closed the stream after
+       * sending the 30x response.
+       * This is sort of a race: had the transfer loop called recv first,
+       * it would see the response and stop/discard sending on its own- */
+      CURL_TRC_CF(data, cf, "[%" PRId64 "] discarding data"
+                  "on closed stream with response", stream->id);
+      *err = CURLE_OK;
+      nwritten = (ssize_t)len;
+      goto out;
+    }
     else if(nwritten == QUICHE_H3_TRANSPORT_ERR_FINAL_SIZE) {
       CURL_TRC_CF(data, cf, "[%" PRId64 "] send_body(len=%zu) "
                   "-> exceeds size", stream->id, len);
@@ -1213,11 +1230,15 @@ static CURLcode cf_quiche_data_event(struct Curl_cfilter *cf,
     }
     break;
   }
-  case CF_CTRL_DATA_IDLE:
-    result = cf_flush_egress(cf, data);
-    if(result)
-      CURL_TRC_CF(data, cf, "data idle, flush egress -> %d", result);
+  case CF_CTRL_DATA_IDLE: {
+    struct stream_ctx *stream = H3_STREAM_CTX(data);
+    if(stream && !stream->closed) {
+      result = cf_flush_egress(cf, data);
+      if(result)
+        CURL_TRC_CF(data, cf, "data idle, flush egress -> %d", result);
+    }
     break;
+  }
   default:
     break;
   }
index e6b50d327afe0f25b65fafaf0b8c6ceaca556c4e..9a1a1bbb3c39e926f8440f5aa93584bf7ea0f60e 100644 (file)
@@ -47,6 +47,7 @@
 #include "curl_msh3.h"
 #include "curl_ngtcp2.h"
 #include "curl_quiche.h"
+#include "rand.h"
 #include "vquic.h"
 #include "vquic_int.h"
 #include "strerror.h"
@@ -89,6 +90,16 @@ CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx)
 #else
   qctx->no_gso = TRUE;
 #endif
+#ifdef DEBUGBUILD
+  {
+    char *p = getenv("CURL_DBG_QUIC_WBLOCK");
+    if(p) {
+      long l = strtol(p, NULL, 10);
+      if(l >= 0 && l <= 100)
+        qctx->wblock_percent = (int)l;
+    }
+  }
+#endif
 
   return CURLE_OK;
 }
@@ -231,6 +242,17 @@ static CURLcode vquic_send_packets(struct Curl_cfilter *cf,
                                    const uint8_t *pkt, size_t pktlen,
                                    size_t gsolen, size_t *psent)
 {
+#ifdef DEBUGBUILD
+  /* simulate network blocking/partial writes */
+  if(qctx->wblock_percent > 0) {
+    unsigned char c;
+    Curl_rand(data, &c, 1);
+    if(c >= ((100-qctx->wblock_percent)*256/100)) {
+      CURL_TRC_CF(data, cf, "vquic_flush() simulate EWOULDBLOCK");
+      return CURLE_AGAIN;
+    }
+  }
+#endif
   if(qctx->no_gso && pktlen > gsolen) {
     return send_packet_no_gso(cf, data, qctx, pkt, pktlen, gsolen, psent);
   }
index 8e08784e7d948185e536fd22823b818c72760e3c..dbcd009d72121d211582e9b183f7f89c266dc735 100644 (file)
@@ -41,6 +41,9 @@ struct cf_quic_ctx {
   size_t gsolen; /* length of individual packets in send buf */
   size_t split_len; /* if != 0, buffer length after which GSO differs */
   size_t split_gsolen; /* length of individual packets after split_len */
+#ifdef DEBUGBUILD
+  int wblock_percent; /* percent of writes doing EAGAIN */
+#endif
   bool no_gso; /* do not use gso on sending */
 };
 
index 3485246c0d9d543cba950ea0c07bddabdf68d755..5f157207428f6c1a443603511333787e27f8a836 100644 (file)
@@ -295,7 +295,7 @@ static int bio_cf_out_write(WOLFSSL_BIO *bio, const char *buf, int blen)
               blen, nwritten, result);
   wolfSSL_BIO_clear_retry_flags(bio);
   if(nwritten < 0 && CURLE_AGAIN == result)
-    BIO_set_retry_read(bio);
+    BIO_set_retry_write(bio);
   return (int)nwritten;
 }
 
index 30b87007b606d1f0754618eaa94dd8bf3556d6b4..7c1792d3747add03ef647b10f82f87f6f21bf88f 100644 (file)
@@ -81,6 +81,6 @@ class TestBasic:
     def test_01_05_h3_get(self, env: Env, httpd, nghttpx):
         curl = CurlClient(env=env)
         url = f'https://{env.domain1}:{env.h3_port}/data.json'
-        r = curl.http_get(url=url, extra_args=['--http3'])
+        r = curl.http_get(url=url, extra_args=['--http3-only'])
         r.check_response(http_status=200, protocol='HTTP/3')
         assert r.json['server'] == env.domain1
index 5d4d22230c25810b067c6358e5dcee22a6753461..38db3372bb21488f255e42065e82f8f14c57e446 100644 (file)
@@ -200,6 +200,7 @@ class TestDownload:
         ])
         r.check_response(count=count, http_status=200)
 
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
     def test_02_10_10MB_serial(self, env: Env,
                               httpd, nghttpx, repeat, proto):
@@ -211,6 +212,7 @@ class TestDownload:
         r = curl.http_download(urls=[urln], alpn_proto=proto)
         r.check_response(count=count, http_status=200)
 
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     @pytest.mark.parametrize("proto", ['h2', 'h3'])
     def test_02_11_10MB_parallel(self, env: Env,
                               httpd, nghttpx, repeat, proto):
@@ -252,6 +254,7 @@ class TestDownload:
         ])
         r.check_response(count=count, http_status=200)
 
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     def test_02_20_h2_small_frames(self, env: Env, httpd, repeat):
         # Test case to reproduce content corruption as observed in
         # https://github.com/curl/curl/issues/10525
index 2a5f1e2666035be19b54db1be56c1fdc2822b6c2..41dc55992a24adb8c61377cdb2047317cbb341e3 100644 (file)
@@ -25,6 +25,7 @@
 ###########################################################################
 #
 import logging
+import os
 from typing import Tuple, List, Dict
 import pytest
 
@@ -34,6 +35,7 @@ from testenv import Env, CurlClient
 log = logging.getLogger(__name__)
 
 
+@pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
 class TestStuttered:
 
     @pytest.fixture(autouse=True, scope='class')
index eb2ff55e0a07a20facbf99dd95f29b9e0a7c7aea..2738ed3409461f3d014831fe435419b4bf92a5c4 100644 (file)
@@ -42,6 +42,8 @@ class TestUpload:
     def _class_scope(self, env, httpd, nghttpx):
         if env.have_h3():
             nghttpx.start_if_needed()
+        env.make_data_file(indir=env.gen_dir, fname="data-63k", fsize=63*1024)
+        env.make_data_file(indir=env.gen_dir, fname="data-64k", fsize=64*1024)
         env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024)
         env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024)
         httpd.clear_extra_configs()
@@ -58,7 +60,7 @@ class TestUpload:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
         r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
-        r.check_response(count=1, http_status=200)
+        r.check_stats(count=1, http_status=200, exitcode=0)
         respdata = open(curl.response_file(0)).readlines()
         assert respdata == [data]
 
@@ -73,7 +75,7 @@ class TestUpload:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
         r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
-        r.check_response(count=1, http_status=200)
+        r.check_stats(count=1, http_status=200, exitcode=0)
         indata = open(fdata).readlines()
         respdata = open(curl.response_file(0)).readlines()
         assert respdata == indata
@@ -90,7 +92,7 @@ class TestUpload:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
         r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         for i in range(count):
             respdata = open(curl.response_file(i)).readlines()
             assert respdata == [data]
@@ -109,14 +111,14 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
         r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
                              extra_args=['--parallel'])
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         for i in range(count):
             respdata = open(curl.response_file(i)).readlines()
             assert respdata == [data]
 
     # upload large data sequentially, check that this is what was echoed
     @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
-    def test_07_20_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
+    def test_07_12_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
         if proto == 'h3' and not env.have_h3():
             pytest.skip("h3 not supported")
         if proto == 'h3' and env.curl_uses_lib('msh3'):
@@ -128,14 +130,14 @@ class TestUpload:
         r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
         r.check_response(count=count, http_status=200)
         indata = open(fdata).readlines()
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         for i in range(count):
             respdata = open(curl.response_file(i)).readlines()
             assert respdata == indata
 
     # upload very large data sequentially, check that this is what was echoed
     @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
-    def test_07_12_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
+    def test_07_13_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
         if proto == 'h3' and not env.have_h3():
             pytest.skip("h3 not supported")
         if proto == 'h3' and env.curl_uses_lib('msh3'):
@@ -145,7 +147,7 @@ class TestUpload:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
         r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         indata = open(fdata).readlines()
         for i in range(count):
             respdata = open(curl.response_file(i)).readlines()
@@ -165,7 +167,7 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
         r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
                              extra_args=['--parallel'])
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         for i in range(count):
             respdata = open(curl.response_file(i)).readlines()
             assert respdata == [data]
@@ -200,7 +202,7 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]'
         r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
                              extra_args=['--parallel'])
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         exp_data = [f'{os.path.getsize(fdata)}']
         r.check_response(count=count, http_status=200)
         for i in range(count):
@@ -220,7 +222,7 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]&chunk_delay=10ms'
         r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
                              extra_args=['--parallel'])
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
         exp_data = [f'{os.path.getsize(fdata)}']
         r.check_response(count=count, http_status=200)
         for i in range(count):
@@ -239,7 +241,7 @@ class TestUpload:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]'
         r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto)
-        r.check_response(count=count, http_status=200)
+        r.check_stats(count=count, http_status=200, exitcode=0)
 
     # issue #11157, upload that is 404'ed by server, needs to terminate
     # correctly and not time out on sending
@@ -272,6 +274,7 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?chunk_delay=2ms'
         curl = CurlClient(env=env)
         r = curl.run_direct(with_stats=True, args=[
+            '--verbose', '--trace-config', 'ids,time',
             '--resolve', f'{env.authority_for(env.domain1, proto)}:127.0.0.1',
             '--cacert', env.ca.cert_file,
             '--request', 'PUT',
@@ -293,7 +296,7 @@ class TestUpload:
         url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put'
         curl = CurlClient(env=env)
         r = curl.run_direct(with_stats=True, args=[
-            '--verbose',
+            '--verbose', '--trace-config', 'ids,time',
             '--resolve', f'{env.authority_for(env.domain1, proto)}:127.0.0.1',
             '--cacert', env.ca.cert_file,
             '--request', 'PUT',
@@ -319,6 +322,91 @@ class TestUpload:
         respdata = open(curl.response_file(0)).readlines()
         assert respdata == indata
 
+    # upload to a 301,302,303 response
+    @pytest.mark.parametrize("redir", ['301', '302', '303'])
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_36_upload_30x(self, env: Env, httpd, nghttpx, repeat, redir, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        data = '0123456789' * 10
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo{redir}?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, extra_args=[
+            '-L', '--trace-config', 'http/2,http/3'
+        ])
+        r.check_response(count=1, http_status=200)
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == []  # was transformed to a GET
+
+    # upload to a 307 response
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_37_upload_307(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        data = '0123456789' * 10
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo307?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, extra_args=[
+            '-L', '--trace-config', 'http/2,http/3'
+        ])
+        r.check_response(count=1, http_status=200)
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == [data]  # was POST again
+
+    # POST form data, yet another code path in transfer
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_38_form_small(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_form(urls=[url], alpn_proto=proto, form={
+            'name1': 'value1',
+        })
+        r.check_stats(count=1, http_status=200, exitcode=0)
+
+    # POST data urlencoded, small enough to be sent with request headers
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_39_post_urlenc_small(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-63k')
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto, extra_args=[
+            '--trace-config', 'http/2,http/3'
+        ])
+        r.check_stats(count=1, http_status=200, exitcode=0)
+        indata = open(fdata).readlines()
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == indata
+
+    # POST data urlencoded, large enough to be sent separate from request headers
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_40_post_urlenc_large(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-64k')
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto, extra_args=[
+            '--trace-config', 'http/2,http/3'
+        ])
+        r.check_stats(count=1, http_status=200, exitcode=0)
+        indata = open(fdata).readlines()
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == indata
+
     def check_download(self, count, srcfile, curl):
         for i in range(count):
             dfile = curl.download_file(i)
diff --git a/tests/http/test_07_upload.py.orig b/tests/http/test_07_upload.py.orig
new file mode 100644 (file)
index 0000000..5d80ece
--- /dev/null
@@ -0,0 +1,366 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+import difflib
+import filecmp
+import logging
+import os
+import pytest
+
+from testenv import Env, CurlClient
+
+
+log = logging.getLogger(__name__)
+
+
+class TestUpload:
+
+    @pytest.fixture(autouse=True, scope='class')
+    def _class_scope(self, env, httpd, nghttpx):
+        if env.have_h3():
+            nghttpx.start_if_needed()
+        env.make_data_file(indir=env.gen_dir, fname="data-63k", fsize=63*1024)
+        env.make_data_file(indir=env.gen_dir, fname="data-64k", fsize=64*1024)
+        env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024)
+        env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024)
+        httpd.clear_extra_configs()
+        httpd.reload()
+
+    # upload small data, check that this is what was echoed
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_01_upload_1_small(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        data = '0123456789'
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
+        r.check_stats(count=1, http_status=200, exitcode=0)
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == [data]
+
+    # upload large data, check that this is what was echoed
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_02_upload_1_large(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
+        r.check_stats(count=1, http_status=200, exitcode=0)
+        indata = open(fdata).readlines()
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == indata
+
+    # upload data sequentially, check that they were echoed
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_10_upload_sequential(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        count = 50
+        data = '0123456789'
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == [data]
+
+    # upload data parallel, check that they were echoed
+    @pytest.mark.parametrize("proto", ['h2', 'h3'])
+    def test_07_11_upload_parallel(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        # limit since we use a separate connection in h1
+        count = 50
+        data = '0123456789'
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
+                             extra_args=['--parallel'])
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == [data]
+
+    # upload large data sequentially, check that this is what was echoed
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_20_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        count = 50
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
+        r.check_response(count=count, http_status=200)
+        indata = open(fdata).readlines()
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == indata
+
+    # upload very large data sequentially, check that this is what was echoed
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_12_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        count = 2
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        indata = open(fdata).readlines()
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == indata
+
+    # upload data parallel, check that they were echoed
+    @pytest.mark.parametrize("proto", ['h2', 'h3'])
+    def test_07_20_upload_parallel(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        # limit since we use a separate connection in h1
+        count = 50
+        data = '0123456789'
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
+                             extra_args=['--parallel'])
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == [data]
+
+    # upload large data parallel, check that this is what was echoed
+    @pytest.mark.parametrize("proto", ['h2', 'h3'])
+    def test_07_21_upload_parallel_large(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 stalls here")
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        # limit since we use a separate connection in h1
+        count = 50
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto,
+                             extra_args=['--parallel'])
+        r.check_response(count=count, http_status=200)
+        self.check_download(count, fdata, curl)
+
+    # PUT 100k
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_30_put_100k(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        count = 1
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]'
+        r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
+                             extra_args=['--parallel'])
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        exp_data = [f'{os.path.getsize(fdata)}']
+        r.check_response(count=count, http_status=200)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == exp_data
+
+    # PUT 10m
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_31_put_10m(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        count = 1
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]&chunk_delay=10ms'
+        r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
+                             extra_args=['--parallel'])
+        r.check_stats(count=count, http_status=200, exitcode=0)
+        exp_data = [f'{os.path.getsize(fdata)}']
+        r.check_response(count=count, http_status=200)
+        for i in range(count):
+            respdata = open(curl.response_file(i)).readlines()
+            assert respdata == exp_data
+
+    # issue #10591
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_32_issue_10591(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        count = 1
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]'
+        r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto)
+        r.check_stats(count=count, http_status=200, exitcode=0)
+
+    # issue #11157, upload that is 404'ed by server, needs to terminate
+    # correctly and not time out on sending
+    def test_07_33_issue_11157a(self, env: Env, httpd, nghttpx, repeat):
+        proto = 'h2'
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        # send a POST to our PUT handler which will send immediately a 404 back
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put'
+        curl = CurlClient(env=env)
+        r = curl.run_direct(with_stats=True, args=[
+            '--resolve', f'{env.authority_for(env.domain1, proto)}:127.0.0.1',
+            '--cacert', env.ca.cert_file,
+            '--request', 'POST',
+            '--max-time', '5', '-v',
+            '--url', url,
+            '--form', 'idList=12345678',
+            '--form', 'pos=top',
+            '--form', 'name=mr_test',
+            '--form', f'fileSource=@{fdata};type=application/pdf',
+        ])
+        assert r.exit_code == 0, f'{r}'
+        r.check_stats(1, 404)
+
+    # issue #11157, send upload that is slowly read in
+    def test_07_33_issue_11157b(self, env: Env, httpd, nghttpx, repeat):
+        proto = 'h2'
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        # tell our test PUT handler to read the upload more slowly, so
+        # that the send buffering and transfer loop needs to wait
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?chunk_delay=2ms'
+        curl = CurlClient(env=env)
+        r = curl.run_direct(with_stats=True, args=[
+            '--resolve', f'{env.authority_for(env.domain1, proto)}:127.0.0.1',
+            '--cacert', env.ca.cert_file,
+            '--request', 'PUT',
+            '--max-time', '10', '-v',
+            '--url', url,
+            '--form', 'idList=12345678',
+            '--form', 'pos=top',
+            '--form', 'name=mr_test',
+            '--form', f'fileSource=@{fdata};type=application/pdf',
+        ])
+        assert r.exit_code == 0, r.dump_logs()
+        r.check_stats(1, 200)
+
+    def test_07_34_issue_11194(self, env: Env, httpd, nghttpx, repeat):
+        proto = 'h2'
+        fdata = os.path.join(env.gen_dir, 'data-10m')
+        # tell our test PUT handler to read the upload more slowly, so
+        # that the send buffering and transfer loop needs to wait
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put'
+        curl = CurlClient(env=env)
+        r = curl.run_direct(with_stats=True, args=[
+            '--verbose',
+            '--resolve', f'{env.authority_for(env.domain1, proto)}:127.0.0.1',
+            '--cacert', env.ca.cert_file,
+            '--request', 'PUT',
+            '--digest', '--user', 'test:test',
+            '--data-binary', f'@{fdata}'
+            '--url', url,
+        ])
+        assert r.exit_code == 0, r.dump_logs()
+        r.check_stats(1, 200)
+
+    # upload large data on a h1 to h2 upgrade
+    def test_07_35_h1_h2_upgrade_upload(self, env: Env, httpd, nghttpx, repeat):
+        fdata = os.path.join(env.gen_dir, 'data-100k')
+        curl = CurlClient(env=env)
+        url = f'http://{env.domain1}:{env.http_port}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', extra_args=[
+            '--http2'
+        ])
+        r.check_response(count=1, http_status=200)
+        # apache does not Upgrade on request with a body
+        assert r.stats[0]['http_version'] == '1.1', f'{r}'
+        indata = open(fdata).readlines()
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == indata
+
+    # POST form data, yet another code path in transfer
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_40_form_small(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_form(urls=[url], alpn_proto=proto, form={
+            'name1': 'value1',
+        })
+        r.check_stats(count=1, http_status=200, exitcode=0)
+
+    # POST data urlencoded, just as large to be still written together
+    # with the request headers
+    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+    def test_07_41_upload_small(self, env: Env, httpd, nghttpx, repeat, proto):
+        if proto == 'h3' and not env.have_h3():
+            pytest.skip("h3 not supported")
+        if proto == 'h3' and env.curl_uses_lib('msh3'):
+            pytest.skip("msh3 fails here")
+        fdata = os.path.join(env.gen_dir, 'data-63k')
+        curl = CurlClient(env=env)
+        url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
+        r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
+        r.check_stats(count=1, http_status=200, exitcode=0)
+        indata = open(fdata).readlines()
+        respdata = open(curl.response_file(0)).readlines()
+        assert respdata == indata
+
+    def check_download(self, count, srcfile, curl):
+        for i in range(count):
+            dfile = curl.download_file(i)
+            assert os.path.exists(dfile)
+            if not filecmp.cmp(srcfile, dfile, shallow=False):
+                diff = "".join(difflib.unified_diff(a=open(srcfile).readlines(),
+                                                    b=open(dfile).readlines(),
+                                                    fromfile=srcfile,
+                                                    tofile=dfile,
+                                                    n=1))
+                assert False, f'download {dfile} differs:\n{diff}'
index 13adaf4798000eceb229038d4b7ca72888527f26..0faba9b4ef0b020a3ae09c7a0cd10323a8a2f822 100644 (file)
@@ -110,6 +110,7 @@ class TestCaddy:
             assert r.total_connects == 1, r.dump_logs()
 
     # download 5MB files sequentially
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     @pytest.mark.parametrize("proto", ['h2', 'h3'])
     def test_08_04a_download_10mb_sequential(self, env: Env, caddy: Caddy,
                                            repeat, proto):
@@ -124,6 +125,7 @@ class TestCaddy:
         r.check_response(count=count, http_status=200, connect_count=1)
 
     # download 10MB files sequentially
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     @pytest.mark.parametrize("proto", ['h2', 'h3'])
     def test_08_04b_download_10mb_sequential(self, env: Env, caddy: Caddy,
                                            repeat, proto):
@@ -138,6 +140,7 @@ class TestCaddy:
         r.check_response(count=count, http_status=200, connect_count=1)
 
     # download 10MB files parallel
+    @pytest.mark.skipif(condition=Env.slow_network, reason="not suitable for slow network tests")
     @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
     def test_08_05_download_1mb_parallel(self, env: Env, caddy: Caddy,
                                          repeat, proto):
index 7f365a925d877adf8f6e7427effb1098bf488ba7..df234013c15d0c44a1d11b3e84d137491e739052 100644 (file)
@@ -139,7 +139,6 @@ class TestProxy:
         url = f'https://localhost:{env.https_port}/data.json'
         xargs = curl.get_proxy_args(proxys=False, tunnel=True)
         r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
-                               with_headers=True,
                                extra_args=xargs)
         r.check_response(count=1, http_status=200,
                          protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
@@ -157,7 +156,7 @@ class TestProxy:
         url = f'https://localhost:{env.https_port}/data.json?[0-0]'
         xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
         r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
-                               with_headers=True, extra_args=xargs)
+                               extra_args=xargs)
         r.check_response(count=1, http_status=200,
                          protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
         assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
@@ -185,7 +184,7 @@ class TestProxy:
         url = f'https://localhost:{env.https_port}/{fname}?[0-{count-1}]'
         xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
         r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
-                               with_headers=True, extra_args=xargs)
+                               extra_args=xargs)
         r.check_response(count=count, http_status=200,
                          protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
         assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
@@ -237,7 +236,7 @@ class TestProxy:
         url2 = f'http://localhost:{env.http_port}/data.json'
         xargs = curl.get_proxy_args(tunnel=True, proto=tunnel)
         r = curl.http_download(urls=[url1, url2], alpn_proto='http/1.1', with_stats=True,
-                               with_headers=True, extra_args=xargs)
+                               extra_args=xargs)
         r.check_response(count=2, http_status=200)
         assert self.get_tunnel_proto_used(r) == 'HTTP/2' \
             if tunnel == 'h2' else 'HTTP/1.1'
index 554006fdea3f0d41cc71f709cca5524e0cc27c98..abeae0100e0a1f38e950b30f4f6c4f537761a723 100644 (file)
@@ -131,7 +131,6 @@ class TestProxyAuth:
         url = f'https://localhost:{env.https_port}/data.json'
         xargs = curl.get_proxy_args(proxys=True, tunnel=True, proto=tunnel)
         r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
-                               with_headers=True, with_trace=True,
                                extra_args=xargs)
         # expect "COULD_NOT_CONNECT"
         r.check_response(exitcode=56, http_status=None)
@@ -151,7 +150,6 @@ class TestProxyAuth:
         xargs = curl.get_proxy_args(proxys=True, tunnel=True, proto=tunnel)
         xargs.extend(['--proxy-user', 'proxy:proxy'])
         r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
-                               with_headers=True, with_trace=True,
                                extra_args=xargs)
         r.check_response(count=1, http_status=200,
                          protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
index a2fb48ba63ad94ae47b42e8842d767ad475b15ab..0cc0f925f249ec214f1e896109a383751d960f11 100644 (file)
@@ -91,7 +91,8 @@ class TestAuth:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/restricted/digest/data.json'
         r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, extra_args=[
-            '--digest', '--user', f'test:{password}'
+            '--digest', '--user', f'test:{password}',
+            '--trace-config', 'http/2,http/3'
         ])
         # digest does not submit the password, but a hash of it, so all
         # works and, since the pw is not correct, we get a 401
@@ -111,7 +112,8 @@ class TestAuth:
         curl = CurlClient(env=env)
         url = f'https://{env.authority_for(env.domain1, proto)}/restricted/digest/data.json'
         r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto, extra_args=[
-            '--basic', '--user', f'test:{password}'
+            '--basic', '--user', f'test:{password}',
+            '--trace-config', 'http/2,http/3'
         ])
         # but apache denies on length limit
         r.check_response(http_status=431)
index aee0779f72ea25218e90cb09b6e0d3522fe1576b..809087ae7fa0a3b18277b8cca29660e1556f1021 100644 (file)
@@ -45,7 +45,6 @@ class ExecResult:
 
     def __init__(self, args: List[str], exit_code: int,
                  stdout: List[str], stderr: List[str],
-                 trace: Optional[List[str]] = None,
                  duration: Optional[timedelta] = None,
                  with_stats: bool = False,
                  exception: Optional[str] = None):
@@ -54,7 +53,6 @@ class ExecResult:
         self._exception = exception
         self._stdout = stdout
         self._stderr = stderr
-        self._trace = trace
         self._duration = duration if duration is not None else timedelta()
         self._response = None
         self._responses = []
@@ -113,7 +111,7 @@ class ExecResult:
 
     @property
     def trace_lines(self) -> List[str]:
-        return self._trace if self._trace else self._stderr
+        return self._stderr
 
     @property
     def duration(self) -> timedelta:
@@ -258,12 +256,8 @@ class ExecResult:
         lines = []
         lines.append('>>--stdout ----------------------------------------------\n')
         lines.extend(self._stdout)
-        if self._trace:
-            lines.append('>>--trace ----------------------------------------------\n')
-            lines.extend(self._trace)
-        else:
-            lines.append('>>--stderr ----------------------------------------------\n')
-            lines.extend(self._stderr)
+        lines.append('>>--stderr ----------------------------------------------\n')
+        lines.extend(self._stderr)
         lines.append('<<-------------------------------------------------------\n')
         return ''.join(lines)
 
@@ -288,7 +282,6 @@ class CurlClient:
         self._stdoutfile = f'{self._run_dir}/curl.stdout'
         self._stderrfile = f'{self._run_dir}/curl.stderr'
         self._headerfile = f'{self._run_dir}/curl.headers'
-        self._tracefile = f'{self._run_dir}/curl.trace'
         self._log_path = f'{self._run_dir}/curl.log'
         self._silent = silent
         self._rmrf(self._run_dir)
@@ -301,10 +294,6 @@ class CurlClient:
     def download_file(self, i: int) -> str:
         return os.path.join(self.run_dir, f'download_{i}.data')
 
-    @property
-    def trace_file(self) -> str:
-        return self._tracefile
-
     def _rmf(self, path):
         if os.path.exists(path):
             return os.remove(path)
@@ -347,7 +336,6 @@ class CurlClient:
                       with_stats: bool = True,
                       with_headers: bool = False,
                       no_save: bool = False,
-                      with_trace: bool = False,
                       extra_args: List[str] = None):
         if extra_args is None:
             extra_args = []
@@ -368,14 +356,12 @@ class CurlClient:
             ])
         return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
                          with_stats=with_stats,
-                         with_headers=with_headers,
-                         with_trace=with_trace)
+                         with_headers=with_headers)
 
     def http_upload(self, urls: List[str], data: str,
                     alpn_proto: Optional[str] = None,
                     with_stats: bool = True,
                     with_headers: bool = False,
-                    with_trace: bool = False,
                     extra_args: Optional[List[str]] = None):
         if extra_args is None:
             extra_args = []
@@ -388,14 +374,12 @@ class CurlClient:
             ])
         return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
                          with_stats=with_stats,
-                         with_headers=with_headers,
-                         with_trace=with_trace)
+                         with_headers=with_headers)
 
     def http_put(self, urls: List[str], data=None, fdata=None,
                  alpn_proto: Optional[str] = None,
                  with_stats: bool = True,
                  with_headers: bool = False,
-                 with_trace: bool = False,
                  extra_args: Optional[List[str]] = None):
         if extra_args is None:
             extra_args = []
@@ -413,8 +397,27 @@ class CurlClient:
         return self._raw(urls, intext=data,
                          alpn_proto=alpn_proto, options=extra_args,
                          with_stats=with_stats,
-                         with_headers=with_headers,
-                         with_trace=with_trace)
+                         with_headers=with_headers)
+
+    def http_form(self, urls: List[str], form: Dict[str, str],
+                  alpn_proto: Optional[str] = None,
+                  with_stats: bool = True,
+                  with_headers: bool = False,
+                  extra_args: Optional[List[str]] = None):
+        if extra_args is None:
+            extra_args = []
+        for key, val in form.items():
+            extra_args.extend(['-F', f'{key}={val}'])
+        extra_args.extend([
+            '-o', 'download_#1.data',
+        ])
+        if with_stats:
+            extra_args.extend([
+                '-w', '%{json}\\n'
+            ])
+        return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
+                         with_stats=with_stats,
+                         with_headers=with_headers)
 
     def response_file(self, idx: int):
         return os.path.join(self._run_dir, f'download_{idx}.data')
@@ -435,7 +438,6 @@ class CurlClient:
         self._rmf(self._stdoutfile)
         self._rmf(self._stderrfile)
         self._rmf(self._headerfile)
-        self._rmf(self._tracefile)
         start = datetime.now()
         exception = None
         try:
@@ -452,11 +454,8 @@ class CurlClient:
             exception = 'TimeoutExpired'
         coutput = open(self._stdoutfile).readlines()
         cerrput = open(self._stderrfile).readlines()
-        ctrace = None
-        if os.path.exists(self._tracefile):
-            ctrace = open(self._tracefile).readlines()
         return ExecResult(args=args, exit_code=exitcode, exception=exception,
-                          stdout=coutput, stderr=cerrput, trace=ctrace,
+                          stdout=coutput, stderr=cerrput,
                           duration=datetime.now() - start,
                           with_stats=with_stats)
 
@@ -465,13 +464,11 @@ class CurlClient:
              force_resolve=True,
              with_stats=False,
              with_headers=True,
-             with_trace=False,
              def_tracing=True):
         args = self._complete_args(
             urls=urls, timeout=timeout, options=options, insecure=insecure,
             alpn_proto=alpn_proto, force_resolve=force_resolve,
-            with_headers=with_headers, with_trace=with_trace,
-            def_tracing=def_tracing)
+            with_headers=with_headers, def_tracing=def_tracing)
         r = self._run(args, intext=intext, with_stats=with_stats)
         if r.exit_code == 0 and with_headers:
             self._parse_headerfile(self._headerfile, r=r)
@@ -483,24 +480,18 @@ class CurlClient:
                        insecure=False, force_resolve=True,
                        alpn_proto: Optional[str] = None,
                        with_headers: bool = True,
-                       with_trace: bool = False,
                        def_tracing: bool = True):
         if not isinstance(urls, list):
             urls = [urls]
 
         args = [self._curl, "-s", "--path-as-is"]
-        if def_tracing:
-            args.extend(['--trace-time', '--trace-ids'])
         if with_headers:
             args.extend(["-D", self._headerfile])
-        if with_trace or self.env.verbose > 2:
-            args.extend(['--trace', self._tracefile])
-        elif def_tracing is False:
-            pass
-        elif self.env.verbose > 1:
-            args.extend(['--trace-ascii', self._tracefile])
-        elif not self._silent:
-            args.extend(['-v'])
+        if def_tracing is not False:
+            args.extend(['-v', '--trace-config', 'ids,time'])
+            if self.env.verbose > 1:
+                args.extend(['--trace-config', 'http/2,http/3,h2-proxy,h1-proxy'])
+                pass
 
         for url in urls:
             u = urlparse(urls[0])
index a366ea53f674a708b532823c10fd5eb39a87b108..440f574aad7e7a5b843a68c319a599ae44ba1946 100644 (file)
@@ -439,6 +439,11 @@ class Env:
     def nghttpx(self) -> Optional[str]:
         return self.CONFIG.nghttpx
 
+    @property
+    def slow_network(self) -> bool:
+        return "CURL_DBG_SOCK_WBLOCK" in os.environ or \
+               "CURL_DBG_SOCK_WPARTIAL" in os.environ
+
     def authority_for(self, domain: str, alpn_proto: Optional[str] = None):
         if alpn_proto is None or \
                 alpn_proto in ['h2', 'http/1.1', 'http/1.0', 'http/0.9']:
index 38a8a8b6a4a373b995dfaa772f8d9afb2ce33510..7e7688d065ad739c862c6c0826ec742ddfeff4ef 100644 (file)
@@ -47,7 +47,7 @@ class Httpd:
         'authn_core', 'authn_file',
         'authz_user', 'authz_core', 'authz_host',
         'auth_basic', 'auth_digest',
-        'env', 'filter', 'headers', 'mime',
+        'alias', 'env', 'filter', 'headers', 'mime',
         'socache_shmcb',
         'rewrite', 'http2', 'ssl', 'proxy', 'proxy_http', 'proxy_connect',
         'mpm_event',
@@ -372,6 +372,10 @@ class Httpd:
         lines = []
         if Httpd.MOD_CURLTEST is not None:
             lines.extend([
+                f'    Redirect 301 /curltest/echo301 /curltest/echo',
+                f'    Redirect 302 /curltest/echo302 /curltest/echo',
+                f'    Redirect 303 /curltest/echo303 /curltest/echo',
+                f'    Redirect 307 /curltest/echo307 /curltest/echo',
                 f'    <Location /curltest/echo>',
                 f'      SetHandler curltest-echo',
                 f'    </Location>',