]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
backport of https://github.com/apache/httpd/pull/364
authorStefan Eissing <icing@apache.org>
Fri, 30 Jun 2023 07:22:40 +0000 (07:22 +0000)
committerStefan Eissing <icing@apache.org>
Fri, 30 Jun 2023 07:22:40 +0000 (07:22 +0000)
Merge 1907696,1907697 from trunk:

  *) mod_http2: new directive 'H2MaxDataFrameLen n' to limit the maximum
     amount of response body bytes put into a single HTTP/2 DATA frame.
     Setting this to 0 places no limit (but the max size allowed by the
     protocol is observed).
     The module, by default, tries to use the maximum size possible, which is
     somewhat around 16KB. This sets the maximum. When less response data is
     available, smaller frames will be sent.

Merge r1909769 from trunk (plus sync mod_http2.xml)

  *) mod_http2: v2.0.15 with the following fixes and improvements
     - New directive 'H2EarlyHint name value' to add headers to a response,
       picked up already when a "103 Early Hints" response is sent. 'name' and
       'value' must comply to the HTTP field restrictions.
       This directive can be repeated several times and header fields of the
       same names add. Sending a 'Link' header with 'preload' relation will
       also cause a HTTP/2 PUSH if enabled and supported by the client.
     - Fixed an issue where requests were not logged and accounted in a timely
       fashion when the connection returns to "keepalive" handling, e.g. when
       the request served was the last outstanding one.
       This led to late appearance in access logs with wrong duration times
       reported.
     - Accurately report the bytes sent for a request in the '%O' Log format.
       This addresses #203, a long outstanding issue where mod_h2 has reported
       numbers over-eagerly from internal buffering and not what has actually
       been placed on the connection.
       The numbers are now the same with and without H2CopyFiles enabled.

Merge r1910157 from trunk

 * mod_proxy_http2: fixed using the wrong "bucket_alloc" from the backend
   connection when sending data on the frontend one. This caused crashes
   or infinite loops in rare situations.
 * mod_proxy_http2: fixed a bug in retry/response handling that could lead
   to wrong status codes or HTTP messages send at the end of response bodies
   exceeding the announced content-length.
 * mod_proxy_http2: fix retry handling to not leak temporary errors.
   On detecting that that an existing connection was shutdown by the other
   side, a 503 response leaked even though the request was retried on a
   fresh connection.
 * mod_http2: fixed a bug that did cleanup of consumed and pending buckets in
   the wrong order when a bucket_beam was destroyed.

Merge of 1910331,1910386,1910441 from trunk

  *) mod_http2: fixed a bug in flushing pending data on an already closed
     connection that could lead to a busy loop, preventing the HTTP/2 session
     to close down successfully. Fixed PR 66624.
     Fixed a bug that could lead to a crash in main connection
     output handling. This occured only when the last request on a HTTP/2
     connection had been processed and the session decided to shut down.
     This could lead to an attempt to send a final GOAWAY while the previous
     write was still in progress. See PR 66646.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1910699 13f79535-47bb-0310-9956-ffa450edef68

33 files changed:
STATUS
changes-entries/h2_early_hints_and_fixes.txt [new file with mode: 0644]
changes-entries/h2_flush_fix.txt [new file with mode: 0644]
changes-entries/h2_max_data_frame_len.txt [new file with mode: 0644]
changes-entries/h2_pr66646.txt [new file with mode: 0644]
changes-entries/h2_v2.0.18.txt [new file with mode: 0644]
docs/manual/mod/mod_http2.xml
modules/http2/h2_bucket_beam.c
modules/http2/h2_bucket_beam.h
modules/http2/h2_c1_io.c
modules/http2/h2_c1_io.h
modules/http2/h2_c2.c
modules/http2/h2_c2_filter.c
modules/http2/h2_config.c
modules/http2/h2_config.h
modules/http2/h2_conn_ctx.h
modules/http2/h2_headers.c
modules/http2/h2_mplx.c
modules/http2/h2_mplx.h
modules/http2/h2_proxy_session.c
modules/http2/h2_proxy_session.h
modules/http2/h2_request.c
modules/http2/h2_session.c
modules/http2/h2_session.h
modules/http2/h2_stream.c
modules/http2/h2_stream.h
modules/http2/h2_switch.c
modules/http2/h2_version.h
modules/http2/mod_proxy_http2.c
test/modules/http2/test_008_ranges.py
test/modules/http2/test_009_timing.py
test/modules/http2/test_107_frame_lengths.py
test/modules/http2/test_500_proxy.py

diff --git a/STATUS b/STATUS
index 634c0d999e11e7604e8f62aec5663685ba471697..914b6581bf338d75f6c171a3fc836dc8895fb389 100644 (file)
--- a/STATUS
+++ b/STATUS
@@ -193,24 +193,6 @@ PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
      +1: jfclere, rpluem, ylavic
      rpluem says: Does anyone know why we don't merge the server config?
 
-  *) mod_http2:
-     Cumulative patch of fixes and new features in github PR #364:
-     - Merge 1907696,1907697 from trunk:
-       * new directive 'H2MaxDataFrameLen n'
-       * deny protocol upgrade on request with chunked encoding
-     - Merge 1909769 from trunk:
-       * New directive 'H2EarlyHint name value'
-       * Fixed PR 66537 where requests were logged late and with wrong duration
-       * Fixed reporting of bytes actually sent (%O)
-     - Merge r1910157 from trunk
-       * Fixed crash in mod_proxy_http2 caused by use for wrong bucket_alloc
-     - Merge of 1910331,1910386,1910441 from trunk
-       * Fix for PR 66624, busy loop on connection shutdown
-       * Fix for PR 66646, recursive call into output filters
-     Github PR at <https://github.com/apache/httpd/pull/364>,
-     all CI checks passed.
-     +1: icing, gbechis, ylavic
-
    *) mod_ssl: Fix for BZ 66626.
       Trunk version of patch:
          https://svn.apache.org/r1875355
@@ -235,7 +217,7 @@ PATCHES PROPOSED TO BACKPORT FROM TRUNK:
      jailletc36: r1745033 seems related and should in included
 
   *) core: Add formats %{z} and %{strftime-format} to ErrorLogFormat, and make
-     sure that if the format is configured early enough it applies to every log
+     sure that if the forat is configured early enough it applies to every log
      line.  PR 62161.
      trunk patch: https://svn.apache.org/r1908380
                   https://svn.apache.org/r1908383
diff --git a/changes-entries/h2_early_hints_and_fixes.txt b/changes-entries/h2_early_hints_and_fixes.txt
new file mode 100644 (file)
index 0000000..74e5731
--- /dev/null
@@ -0,0 +1,18 @@
+  *) mod_http2: v2.0.15 with the following fixes and improvements
+     - New directive 'H2EarlyHint name value' to add headers to a response,
+       picked up already when a "103 Early Hints" response is sent. 'name' and
+       'value' must comply to the HTTP field restrictions.
+       This directive can be repeated several times and header fields of the
+       same names add. Sending a 'Link' header with 'preload' relation will
+       also cause a HTTP/2 PUSH if enabled and supported by the client.
+     - Fixed an issue where requests were not logged and accounted in a timely
+       fashion when the connection returns to "keepalive" handling, e.g. when
+       the request served was the last outstanding one.
+       This led to late appearance in access logs with wrong duration times
+       reported.
+     - Accurately report the bytes sent for a request in the '%O' Log format.
+       This addresses #203, a long outstanding issue where mod_h2 has reported
+       numbers over-eagerly from internal buffering and not what has actually
+       been placed on the connection.
+       The numbers are now the same with and without H2CopyFiles enabled.
+     [Stefan Eissing]
diff --git a/changes-entries/h2_flush_fix.txt b/changes-entries/h2_flush_fix.txt
new file mode 100644 (file)
index 0000000..b44a331
--- /dev/null
@@ -0,0 +1,4 @@
+  *) mod_http2: fixed a bug in flushing pending data on an already closed
+     connection that could lead to a busy loop, preventing the HTTP/2 session
+     to close down successfully. Fixed PR 66624.
+     [Stefan Eissing]
diff --git a/changes-entries/h2_max_data_frame_len.txt b/changes-entries/h2_max_data_frame_len.txt
new file mode 100644 (file)
index 0000000..f32f6e0
--- /dev/null
@@ -0,0 +1,7 @@
+  *) mod_http2: new directive 'H2MaxDataFrameLen n' to limit the maximum
+     amount of response body bytes put into a single HTTP/2 DATA frame.
+     Setting this to 0 places no limit (but the max size allowed by the
+     protocol is observed).
+     The module, by default, tries to use the maximum size possible, which is
+     somewhat around 16KB. This sets the maximum. When less response data is
+     available, smaller frames will be sent.
diff --git a/changes-entries/h2_pr66646.txt b/changes-entries/h2_pr66646.txt
new file mode 100644 (file)
index 0000000..6bf23cf
--- /dev/null
@@ -0,0 +1,6 @@
+  *) mod_http2: fixed a bug that could lead to a crash in main connection
+     output handling. This occured only when the last request on a HTTP/2
+     connection had been processed and the session decided to shut down.
+     This could lead to an attempt to send a final GOAWAY while the previous
+     write was still in progress. See PR 66646.
+     [Stefan Eissing]
diff --git a/changes-entries/h2_v2.0.18.txt b/changes-entries/h2_v2.0.18.txt
new file mode 100644 (file)
index 0000000..38d7f5e
--- /dev/null
@@ -0,0 +1,13 @@
+ * mod_proxy_http2: fixed using the wrong "bucket_alloc" from the backend
+   connection when sending data on the frontend one. This caused crashes
+   or infinite loops in rare situations.
+ * mod_proxy_http2: fixed a bug in retry/response handling that could lead
+   to wrong status codes or HTTP messages send at the end of response bodies
+   exceeding the announced content-length.
+ * mod_proxy_http2: fix retry handling to not leak temporary errors.
+   On detecting that that an existing connection was shutdown by the other
+   side, a 503 response leaked even though the request was retried on a
+   fresh connection.
+ * mod_http2: fixed a bug that did cleanup of consumed and pending buckets in
+   the wrong order when a bucket_beam was destroyed.
+   [Stefan Eissing]
index 2df6af2a00285f2c1cf2e842fbe3d0b375df3ca1..0703bc0cf61b7793b6e52a57863bfd6795387c1c 100644 (file)
@@ -1002,4 +1002,82 @@ H2TLSCoolDownSecs 0
         </usage>
     </directivesynopsis>
     
+    <directivesynopsis>
+        <name>H2StreamTimeout</name>
+        <description>Maximum time waiting when sending/receiving data to stream processing</description>
+        <syntax>H2StreamTimeout <var>time-interval</var>[s]</syntax>
+        <default>Value of <directive module="core">Timeout</directive></default>
+        <contextlist>
+            <context>server config</context>
+            <context>virtual host</context>
+            <context>directory</context>
+        </contextlist>
+        <compatibility>Available in version 2.5.1 and later.</compatibility>
+
+        <usage>
+            <p>
+                <directive>H2StreamTimeout</directive> specifies the maximum time that
+                a stream being processed will wait for its data to be sent/received.
+            </p>
+        </usage>
+    </directivesynopsis>
+
+    <directivesynopsis>
+        <name>H2MaxDataFrameLen</name>
+        <description>Maximum bytes inside a single HTTP/2 DATA frame</description>
+        <syntax>H2MaxDataFrameLen <em>n</em></syntax>
+        <default>H2MaxDataFrameLen 0</default>
+        <contextlist>
+            <context>server config</context>
+            <context>virtual host</context>
+        </contextlist>
+        <compatibility>Available in version 2.5.1 and later.</compatibility>
+
+        <usage>
+            <p>
+                <directive>H2MaxDataFrameLen</directive> limits the maximum
+                amount of response body bytes placed into a single HTTP/2 DATA
+                frame. Setting this to 0 places no limit (but the max size
+                allowed by the protocol is observed).
+            </p><p>
+                The module, by default, tries to use the maximum size possible,
+                which is somewhat around 16KB. This sets the maximum. When less
+                response data is availble, smaller frames will be sent.
+            </p>
+        </usage>
+    </directivesynopsis>
+
+    <directivesynopsis>
+        <name>H2EarlyHint</name>
+        <description>Add a response header to be picked up in 103 Early Hints</description>
+        <syntax>H2EarlyHint <em>name</em> <em>value</em></syntax>
+        <contextlist>
+            <context>server config</context>
+            <context>virtual host</context>
+            <context>directory</context>
+            <context>.htaccess</context>
+        </contextlist>
+        <compatibility>Available in version 2.5.1 and later.</compatibility>
+
+        <usage>
+            <p>
+                <directive>H2EarlyHint</directive> allows adding a response
+                header before the real request processing is started. Such headers
+                are picked up for "103 Early Hints" intermediate responses. The main
+                purpose is to send "preload" information to client browsers.
+            </p><p>
+                <em>name</em> and <em>value</em> must be valid HTTP header fields
+                or will lead to failed responses. <directive>H2EarlyHints</directive>
+                must still be enabled to allow 103 intermediate responses to be sent.
+                This directive can be repeated several times and header fields of the
+                same names add.
+            </p>
+            <example><title>Example</title>
+                <highlight language="config">
+H2EarlyHint Link "&lt;/my.css&gt;;rel=preload;as=style"
+                </highlight>
+            </example>
+        </usage>
+    </directivesynopsis>
+
 </modulesynopsis>
index cbf7f348da7e3dd1f4b359d50627a7e7b88224ca..72baea3e337918049d76e44998096741886ee911 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <httpd.h>
 #include <http_protocol.h>
+#include <http_request.h>
 #include <http_log.h>
 
 #include "h2_private.h"
@@ -156,6 +157,23 @@ static void purge_consumed_buckets(h2_bucket_beam *beam)
      * from sender thread only */
     while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) {
         b = H2_BLIST_FIRST(&beam->buckets_consumed);
+        if(AP_BUCKET_IS_EOR(b)) {
+          APR_BUCKET_REMOVE(b);
+          H2_BLIST_INSERT_TAIL(&beam->buckets_eor, b);
+        }
+        else {
+          apr_bucket_delete(b);
+        }
+    }
+}
+
+static void purge_eor_buckets(h2_bucket_beam *beam)
+{
+    apr_bucket *b;
+    /* delete all sender buckets in purge brigade, needs to be called
+     * from sender thread only */
+    while (!H2_BLIST_EMPTY(&beam->buckets_eor)) {
+        b = H2_BLIST_FIRST(&beam->buckets_eor);
         apr_bucket_delete(b);
     }
 }
@@ -254,8 +272,8 @@ static void beam_shutdown(h2_bucket_beam *beam, apr_shutdown_how_e how)
 
     /* shutdown sender (or both)? */
     if (how != APR_SHUTDOWN_READ) {
-        h2_blist_cleanup(&beam->buckets_to_send);
         purge_consumed_buckets(beam);
+        h2_blist_cleanup(&beam->buckets_to_send);
     }
 }
 
@@ -263,6 +281,7 @@ static apr_status_t beam_cleanup(void *data)
 {
     h2_bucket_beam *beam = data;
     beam_shutdown(beam, APR_SHUTDOWN_READWRITE);
+    purge_eor_buckets(beam);
     beam->pool = NULL; /* the pool is clearing now */
     return APR_SUCCESS;
 }
@@ -295,6 +314,7 @@ apr_status_t h2_beam_create(h2_bucket_beam **pbeam, conn_rec *from,
 
     H2_BLIST_INIT(&beam->buckets_to_send);
     H2_BLIST_INIT(&beam->buckets_consumed);
+    H2_BLIST_INIT(&beam->buckets_eor);
     beam->tx_mem_limits = 1;
     beam->max_buf_size = max_buf_size;
     beam->timeout = timeout;
@@ -565,6 +585,9 @@ cleanup:
         rv = APR_ECONNABORTED;
     }
     H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "end send", sender_bb);
+    if(rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv) && sender_bb != NULL) {
+        apr_brigade_cleanup(sender_bb);
+    }
     apr_thread_mutex_unlock(beam->lock);
     return rv;
 }
index 2a9d5f0f01856cf7c14bd1881865c464febbebbe..94e788a03e88471b703c137d1b83027e9524507e 100644 (file)
@@ -48,6 +48,7 @@ struct h2_bucket_beam {
     apr_pool_t *pool;
     h2_blist buckets_to_send;
     h2_blist buckets_consumed;
+    h2_blist buckets_eor;
 
     apr_size_t max_buf_size;
     apr_interval_time_t timeout;
index ade8836635887b4d477ed0ed1b515600ba1df611..55d463f51b80728f04f12354ff1b9c065dbe3ade 100644 (file)
@@ -260,9 +260,22 @@ static apr_status_t read_to_scratch(h2_c1_io *io, apr_bucket *b)
 static apr_status_t pass_output(h2_c1_io *io, int flush)
 {
     conn_rec *c = io->session->c1;
-    apr_off_t bblen;
+    apr_off_t bblen = 0;
     apr_status_t rv;
-    
+
+    if (io->is_passing) {
+        /* recursive call, may be triggered by an H2EOS bucket
+         * being destroyed and triggering sending more data? */
+        AP_DEBUG_ASSERT(0);
+        ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(10456)
+                      "h2_c1_io(%ld): recursive call of h2_c1_io_pass. "
+                      "Denied to prevent output corruption. This "
+                      "points to a bug in the HTTP/2 implementation.",
+                      c->id);
+        return APR_EGENERAL;
+    }
+    io->is_passing = 1;
+
     append_scratch(io);
     if (flush) {
         if (!APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output))) {
@@ -271,17 +284,16 @@ static apr_status_t pass_output(h2_c1_io *io, int flush)
         }
     }
     if (APR_BRIGADE_EMPTY(io->output)) {
-        return APR_SUCCESS;
+        rv = APR_SUCCESS;
+        goto cleanup;
     }
-    
+
     io->unflushed = !APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output));
     apr_brigade_length(io->output, 0, &bblen);
     C1_IO_BB_LOG(c, 0, APLOG_TRACE2, "out", io->output);
-    
+
     rv = ap_pass_brigade(c->output_filters, io->output);
     if (APR_SUCCESS != rv) goto cleanup;
-
-    io->buffered_len = 0;
     io->bytes_written += (apr_size_t)bblen;
 
     if (io->write_size < WRITE_SIZE_MAX
@@ -309,6 +321,8 @@ cleanup:
                       c->id, (long)bblen);
     }
     apr_brigade_cleanup(io->output);
+    io->buffered_len = 0;
+    io->is_passing = 0;
     return rv;
 }
 
index d891ffb44dfd7a7e269a9ee0b96fefb70c8816a7..c4dac38e1ee4b3e68b68174c53e00c16251e129c 100644 (file)
@@ -44,7 +44,8 @@ typedef struct {
     apr_off_t buffered_len;
     apr_off_t flush_threshold;
     unsigned int is_flushed : 1;
-    
+    unsigned int is_passing : 1;
+
     char *scratch;
     apr_size_t ssize;
     apr_size_t slen;
index 4553d3a48de19c14bd1710990d72f9ff1cee9360..0c2edba55bf14f59e4f8f34cce06a5a63f5f5613 100644 (file)
@@ -133,10 +133,22 @@ apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s)
                              APR_PROTO_TCP, pool);
 }
 
+static void h2_c2_log_io(conn_rec *c2, apr_off_t bytes_sent)
+{
+    if (bytes_sent && h2_c_logio_add_bytes_out) {
+        h2_c_logio_add_bytes_out(c2, bytes_sent);
+    }
+}
+
 void h2_c2_destroy(conn_rec *c2)
 {
+    h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
     ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
                   "h2_c2(%s): destroy", c2->log_id);
+    if(!c2->aborted && conn_ctx && conn_ctx->bytes_sent) {
+      h2_c2_log_io(c2, conn_ctx->bytes_sent);
+    }
     apr_pool_destroy(c2->pool);
 }
 
@@ -146,6 +158,10 @@ void h2_c2_abort(conn_rec *c2, conn_rec *from)
 
     AP_DEBUG_ASSERT(conn_ctx);
     AP_DEBUG_ASSERT(conn_ctx->stream_id);
+    if(!c2->aborted && conn_ctx->bytes_sent) {
+      h2_c2_log_io(c2, conn_ctx->bytes_sent);
+    }
+
     if (conn_ctx->beam_in) {
         h2_beam_abort(conn_ctx->beam_in, from);
     }
@@ -326,42 +342,13 @@ receive:
 
 static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb)
 {
-    apr_off_t written, header_len = 0;
+    apr_off_t written = 0;
     apr_status_t rv;
 
-    if (h2_c_logio_add_bytes_out) {
-        /* mod_logio wants to report the number of bytes  written in a
-         * response, including header and footer fields. Since h2 converts
-         * those during c1 processing into the HPACKed h2 HEADER frames,
-         * we need to give mod_logio something here and count just the
-         * raw lengths of all headers in the buckets. */
-        apr_bucket *b;
-        for (b = APR_BRIGADE_FIRST(bb);
-             b != APR_BRIGADE_SENTINEL(bb);
-             b = APR_BUCKET_NEXT(b)) {
-#if AP_HAS_RESPONSE_BUCKETS
-            if (AP_BUCKET_IS_RESPONSE(b)) {
-                header_len += (apr_off_t)response_length_estimate(b->data);
-            }
-            if (AP_BUCKET_IS_HEADERS(b)) {
-                header_len += (apr_off_t)headers_length_estimate(b->data);
-            }
-#else
-            if (H2_BUCKET_IS_HEADERS(b)) {
-                header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
-            }
-#endif /* AP_HAS_RESPONSE_BUCKETS */
-        }
-    }
-
     rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ, &written);
-
     if (APR_STATUS_IS_EAGAIN(rv)) {
         rv = APR_SUCCESS;
     }
-    if (written && h2_c_logio_add_bytes_out) {
-        h2_c_logio_add_bytes_out(c2, written + header_len);
-    }
     return rv;
 }
 
@@ -403,30 +390,56 @@ static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
     return rv;
 }
 
-static void check_push(request_rec *r, const char *tag)
+static int addn_headers(void *udata, const char *name, const char *value)
 {
-    apr_array_header_t *push_list = h2_config_push_list(r);
+    apr_table_t *dest = udata;
+    apr_table_addn(dest, name, value);
+    return 1;
+}
 
-    if (!r->expecting_100 && push_list && push_list->nelts > 0) {
-        int i, old_status;
-        const char *old_line;
+static void check_early_hints(request_rec *r, const char *tag)
+{
+    apr_array_header_t *push_list = h2_config_push_list(r);
+    apr_table_t *early_headers = h2_config_early_headers(r);
+
+    if (!r->expecting_100 &&
+        ((push_list && push_list->nelts > 0) ||
+         (early_headers && !apr_is_empty_table(early_headers)))) {
+        int have_hints = 0, i;
+
+        if (push_list && push_list->nelts > 0) {
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+                          "%s, early announcing %d resources for push",
+                          tag, push_list->nelts);
+            for (i = 0; i < push_list->nelts; ++i) {
+                h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+                apr_table_add(r->headers_out, "Link",
+                               apr_psprintf(r->pool, "<%s>; rel=preload%s",
+                                            push->uri_ref, push->critical? "; critical" : ""));
+            }
+            have_hints = 1;
+        }
+        if (early_headers && !apr_is_empty_table(early_headers)) {
+            apr_table_do(addn_headers, r->headers_out, early_headers, NULL);
+            have_hints = 1;
+        }
 
-        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
-                      "%s, early announcing %d resources for push",
-                      tag, push_list->nelts);
-        for (i = 0; i < push_list->nelts; ++i) {
-            h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
-            apr_table_add(r->headers_out, "Link",
-                           apr_psprintf(r->pool, "<%s>; rel=preload%s",
-                                        push->uri_ref, push->critical? "; critical" : ""));
+        if (have_hints) {
+          int old_status;
+          const char *old_line;
+
+          if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 &&
+              h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
+              apr_table_setn(r->connection->notes, H2_PUSH_MODE_NOTE, "0");
+          }
+          old_status = r->status;
+          old_line = r->status_line;
+          r->status = 103;
+          r->status_line = "103 Early Hints";
+          ap_send_interim_response(r, 1);
+          r->status = old_status;
+          r->status_line = old_line;
         }
-        old_status = r->status;
-        old_line = r->status_line;
-        r->status = 103;
-        r->status_line = "103 Early Hints";
-        ap_send_interim_response(r, 1);
-        r->status = old_status;
-        r->status_line = old_line;
     }
 }
 
@@ -439,7 +452,7 @@ static int c2_hook_fixups(request_rec *r)
         return DECLINED;
     }
 
-    check_push(r, "late_fixup");
+    check_early_hints(r, "late_fixup");
 
     return DECLINED;
 }
index 37254fc1d7b3499adeaf55cfaf589388c35972b4..5b1838b773041f41f72ff398f6f49767c428d0dc 100644 (file)
@@ -511,10 +511,10 @@ static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
 {
     apr_bucket *b;
     apr_status_t status;
-
     h2_headers *response = h2_headers_create(parser->http_status,
                                              make_table(parser),
-                                             NULL, 0, parser->pool);
+                                             parser->c->notes,
+                                             0, parser->pool);
     apr_brigade_cleanup(parser->tmp);
     b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
     APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
index eea4be2c595eaea129dedbe47e1876dab25cd0ea..670833ec129918153481639b18396c55ad095d86 100644 (file)
@@ -70,11 +70,13 @@ typedef struct h2_config {
     int push_diary_size;             /* # of entries in push diary */
     int copy_files;                  /* if files shall be copied vs setaside on output */
     apr_array_header_t *push_list;   /* list of h2_push_res configurations */
+    apr_table_t *early_headers;      /* HTTP headers for a 103 response */
     int early_hints;                 /* support status code 103 */
     int padding_bits;
     int padding_always;
     int output_buffered;
     apr_interval_time_t stream_timeout;/* beam timeout */
+    int max_data_frame_len;          /* max # bytes in a single h2 DATA frame */
 } h2_config;
 
 typedef struct h2_dir_config {
@@ -82,6 +84,7 @@ typedef struct h2_dir_config {
     int h2_upgrade;                  /* Allow HTTP/1 upgrade to h2/h2c */
     int h2_push;                     /* if HTTP/2 server push is enabled */
     apr_array_header_t *push_list;   /* list of h2_push_res configurations */
+    apr_table_t *early_headers;      /* HTTP headers for a 103 response */
     int early_hints;                 /* support status code 103 */
     apr_interval_time_t stream_timeout;/* beam timeout */
 } h2_dir_config;
@@ -105,11 +108,13 @@ static h2_config defconf = {
     256,                    /* push diary size */
     0,                      /* copy files across threads */
     NULL,                   /* push list */
+    NULL,                   /* early headers */
     0,                      /* early hints, http status 103 */
     0,                      /* padding bits */
     1,                      /* padding always */
     1,                      /* stream output buffered */
     -1,                     /* beam timeout */
+    0,                      /* max DATA frame len, 0 == no extra limit */
 };
 
 static h2_dir_config defdconf = {
@@ -117,6 +122,7 @@ static h2_dir_config defdconf = {
     -1,                     /* HTTP/1 Upgrade support */
     -1,                     /* HTTP/2 server push enabled */
     NULL,                   /* push list */
+    NULL,                   /* early headers */
     -1,                     /* early hints, http status 103 */
     -1,                     /* beam timeout */
 };
@@ -148,11 +154,13 @@ void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
     conf->push_diary_size      = DEF_VAL;
     conf->copy_files           = DEF_VAL;
     conf->push_list            = NULL;
+    conf->early_headers        = NULL;
     conf->early_hints          = DEF_VAL;
     conf->padding_bits         = DEF_VAL;
     conf->padding_always       = DEF_VAL;
     conf->output_buffered      = DEF_VAL;
     conf->stream_timeout       = DEF_VAL;
+    conf->max_data_frame_len   = DEF_VAL;
     return conf;
 }
 
@@ -191,10 +199,17 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
     else {
         n->push_list        = add->push_list? add->push_list : base->push_list;
     }
+    if (add->early_headers && base->early_headers) {
+        n->early_headers    = apr_table_overlay(pool, add->early_headers, base->early_headers);
+    }
+    else {
+        n->early_headers    = add->early_headers? add->early_headers : base->early_headers;
+    }
     n->early_hints          = H2_CONFIG_GET(add, base, early_hints);
     n->padding_bits         = H2_CONFIG_GET(add, base, padding_bits);
     n->padding_always       = H2_CONFIG_GET(add, base, padding_always);
     n->stream_timeout       = H2_CONFIG_GET(add, base, stream_timeout);
+    n->max_data_frame_len   = H2_CONFIG_GET(add, base, max_data_frame_len);
     return n;
 }
 
@@ -232,6 +247,12 @@ void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
     else {
         n->push_list        = add->push_list? add->push_list : base->push_list;
     }
+    if (add->early_headers && base->early_headers) {
+        n->early_headers    = apr_table_overlay(pool, add->early_headers, base->early_headers);
+    }
+    else {
+        n->early_headers    = add->early_headers? add->early_headers : base->early_headers;
+    }
     n->early_hints          = H2_CONFIG_GET(add, base, early_hints);
     n->stream_timeout         = H2_CONFIG_GET(add, base, stream_timeout);
     return n;
@@ -278,6 +299,8 @@ static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t v
             return H2_CONFIG_GET(conf, &defconf, output_buffered);
         case H2_CONF_STREAM_TIMEOUT:
             return H2_CONFIG_GET(conf, &defconf, stream_timeout);
+        case H2_CONF_MAX_DATA_FRAME_LEN:
+            return H2_CONFIG_GET(conf, &defconf, max_data_frame_len);
         default:
             return DEF_VAL;
     }
@@ -337,6 +360,9 @@ static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
         case H2_CONF_OUTPUT_BUFFER:
             H2_CONFIG_SET(conf, output_buffered, val);
             break;
+        case H2_CONF_MAX_DATA_FRAME_LEN:
+            H2_CONFIG_SET(conf, max_data_frame_len, val);
+            break;
         default:
             break;
     }
@@ -502,6 +528,18 @@ apr_array_header_t *h2_config_push_list(request_rec *r)
     return sconf? sconf->push_list : NULL;
 }
 
+apr_table_t *h2_config_early_headers(request_rec *r)
+{
+    const h2_config *sconf;
+    const h2_dir_config *conf = h2_config_rget(r);
+
+    if (conf && conf->early_headers) {
+        return conf->early_headers;
+    }
+    sconf = h2_config_sget(r->server);
+    return sconf? sconf->early_headers : NULL;
+}
+
 const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
 {
     const h2_config *conf = h2_config_get(c);
@@ -583,6 +621,17 @@ static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
     return NULL;
 }
 
+static const char *h2_conf_set_max_data_frame_len(cmd_parms *cmd,
+                                                  void *dirconf, const char *value)
+{
+    int val = (int)apr_atoi64(value);
+    if (val < 0) {
+        return "value must be 0 or larger";
+    }
+    CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_DATA_FRAME_LEN, val);
+    return NULL;
+}
+
 static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
                                                    void *dirconf, const char *value)
 {
@@ -812,6 +861,37 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
     return NULL;
 }
 
+static const char *h2_conf_add_early_hint(cmd_parms *cmd, void *dirconf,
+                                          const char *name, const char *value)
+{
+    apr_table_t *hds, **phds;
+
+    if(!name || !*name)
+      return "Early Hint header name must not be empty";
+    if(!value)
+      return "Early Hint header value must not be empty";
+    while (apr_isspace(*value))
+        ++value;
+    if(!*value)
+      return "Early Hint header value must not be empty/only space";
+    if (*ap_scan_http_field_content(value))
+      return "Early Hint header value contains invalid characters";
+
+    if (cmd->path) {
+        phds = &((h2_dir_config*)dirconf)->early_headers;
+    }
+    else {
+        phds = &(h2_config_sget(cmd->server))->early_headers;
+    }
+    hds = *phds;
+    if (!hds) {
+      *phds = hds = apr_table_make(cmd->pool, 10);
+    }
+    apr_table_add(hds, name, value);
+
+    return NULL;
+}
+
 static const char *h2_conf_set_early_hints(cmd_parms *cmd,
                                            void *dirconf, const char *value)
 {
@@ -937,6 +1017,10 @@ const command_rec h2_cmds[] = {
                   RSRC_CONF, "set stream output buffer on/off"),
     AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
                   RSRC_CONF, "set stream timeout"),
+    AP_INIT_TAKE1("H2MaxDataFrameLen", h2_conf_set_max_data_frame_len, NULL,
+                  RSRC_CONF, "maximum number of bytes in a single HTTP/2 DATA frame"),
+    AP_INIT_TAKE2("H2EarlyHint", h2_conf_add_early_hint, NULL,
+                   OR_FILEINFO|OR_AUTHCFG, "add a a 'Link:' header for a 103 Early Hints response."),
     AP_END_CMD
 };
 
index 6d2e65f926a5683b6e61c3011f9e2f056192bb18..5a783712848ae108b8e701d5d1010fae8aca7534 100644 (file)
@@ -43,6 +43,7 @@ typedef enum {
     H2_CONF_PADDING_ALWAYS,
     H2_CONF_OUTPUT_BUFFER,
     H2_CONF_STREAM_TIMEOUT,
+    H2_CONF_MAX_DATA_FRAME_LEN,
 } h2_config_var_t;
 
 struct apr_hash_t;
@@ -86,6 +87,7 @@ int h2_config_rgeti(request_rec *r, h2_config_var_t var);
 apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
 
 apr_array_header_t *h2_config_push_list(request_rec *r);
+apr_table_t *h2_config_early_headers(request_rec *r);
 
 
 void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
index 35987bce3f6f4b073c7d6d318bbbaf7a08c072fc..90dc9f627dd62417615aa14e9a1f563b39bf85f8 100644 (file)
@@ -61,6 +61,7 @@ struct h2_conn_ctx_t {
     int has_final_response;          /* final HTTP response passed on out */
     apr_status_t last_err;           /* APR_SUCCES or last error encountered in filters */
 
+    apr_off_t bytes_sent;            /* c2: bytes acutaly sent via c1 */
     /* atomic */ apr_uint32_t started; /* c2: processing was started */
     apr_time_t started_at;           /* c2: when processing started */
     /* atomic */ apr_uint32_t done;  /* c2: processing has finished */
index cbc7b01a34286d0feb92f5f9c80f4d9e9c250145..0fc1d91d6a265279c63f3d779312a4d389627d45 100644 (file)
@@ -144,6 +144,9 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status,
                                const apr_table_t *header, apr_pool_t *pool)
 {
     h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, headers->status, r,
+                  "h2_headers_rcreate(%ld): status=%d",
+                  (long)r->connection->id, status);
     if (headers->status == HTTP_FORBIDDEN) {
         request_rec *r_prev;
         for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
index 99c47ea8ef9fdee77612f7bb7f38d2496146eda7..6e2d5776f2fce5f3ba5fa5068aa82478556b741e 100644 (file)
@@ -441,6 +441,8 @@ static int m_stream_cancel_iter(void *ctx, void *val) {
     return 0;
 }
 
+static void c1_purge_streams(h2_mplx *m);
+
 void h2_mplx_c1_destroy(h2_mplx *m)
 {
     apr_status_t status;
@@ -509,7 +511,9 @@ void h2_mplx_c1_destroy(h2_mplx *m)
                       h2_ihash_count(m->shold));
         h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
     }
-    
+
+    c1_purge_streams(m);
+
     m->c1->aborted = old_aborted;
     H2_MPLX_LEAVE(m);
 
@@ -585,6 +589,15 @@ static void c1_purge_streams(h2_mplx *m)
     apr_array_clear(m->spurge);
 }
 
+void h2_mplx_c1_going_keepalive(h2_mplx *m)
+{
+    H2_MPLX_ENTER_ALWAYS(m);
+    if (m->spurge->nelts) {
+        c1_purge_streams(m);
+    }
+    H2_MPLX_LEAVE(m);
+}
+
 apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
                             stream_ev_callback *on_stream_input,
                             stream_ev_callback *on_stream_output,
index 1f79aa8248a473538c8d5a18e8ca2f8c3c3ade84..ecb4de9cc6f20368007d31bf47a971521c631107 100644 (file)
@@ -212,6 +212,14 @@ const struct h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id);
  */
 apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2);
 
+
+/**
+ * Session processing is entering KEEPALIVE, e.g. giving control
+ * to the MPM for monitoring incoming socket events only.
+ * Last chance for maintenance work before losing control.
+ */
+void h2_mplx_c1_going_keepalive(h2_mplx *m);
+
 #define H2_MPLX_MSG(m, msg)     \
     "h2_mplx(%d-%lu): "msg, m->child_num, (unsigned long)m->id
 
index 378a1ded693dce236e6697ccf13cd1795c47d5bd..4835ec3e6cda0ab70d866b3ce27efafbd41eb79b 100644 (file)
@@ -37,6 +37,7 @@ typedef struct h2_proxy_stream {
 
     const char *url;
     request_rec *r;
+    conn_rec *cfront;
     h2_proxy_request *req;
     const char *real_server_uri;
     const char *p_server_uri;
@@ -401,7 +402,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
             char *s = apr_pstrndup(stream->r->pool, v, vlen);
             
             apr_table_setn(stream->r->notes, "proxy-status", s);
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, 
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
                           "h2_proxy_stream(%s-%d): got status %s", 
                           stream->session->id, stream->id, s);
             stream->r->status = (int)apr_atoi64(s);
@@ -413,7 +414,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
         return APR_SUCCESS;
     }
     
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, 
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
                   "h2_proxy_stream(%s-%d): on_header %s: %s", 
                   stream->session->id, stream->id, n, v);
     if (!h2_proxy_res_ignore_header(n, nlen)) {
@@ -425,7 +426,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
         h2_proxy_util_camel_case_header(hname, nlen);
         hvalue = apr_pstrndup(stream->pool, v, vlen);
         
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, 
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
                       "h2_proxy_stream(%s-%d): got header %s: %s", 
                       stream->session->id, stream->id, hname, hvalue);
         process_proxy_header(headers, stream, hname, hvalue);
@@ -532,22 +533,21 @@ static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
         h2_proxy_stream_end_headers_out(stream);
     }
     stream->data_received += len;
-    
-    b = apr_bucket_transient_create((const char*)data, len, 
-                                    stream->r->connection->bucket_alloc);
+    b = apr_bucket_transient_create((const char*)data, len,
+                                    stream->cfront->bucket_alloc);
     APR_BRIGADE_INSERT_TAIL(stream->output, b);
     /* always flush after a DATA frame, as we have no other indication
      * of buffer use */
-    b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+    b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
     APR_BRIGADE_INSERT_TAIL(stream->output, b);
-    
+
     status = ap_pass_brigade(stream->r->output_filters, stream->output);
     ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359)
                   "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld"
                   " total", session->id, stream_id, (long)len,
                   (long)stream->data_received);
     if (status != APR_SUCCESS) {
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03344)
                       "h2_proxy_session(%s): passing output on stream %d", 
                       session->id, stream->id);
         nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
@@ -827,12 +827,13 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
     stream->pool = r->pool;
     stream->url = url;
     stream->r = r;
+    stream->cfront = r->connection;
     stream->standalone = standalone;
     stream->session = session;
     stream->state = H2_STREAM_ST_IDLE;
     
-    stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
-    stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+    stream->input = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc);
+    stream->output = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc);
     
     stream->req = h2_proxy_req_create(1, stream->pool);
 
@@ -856,7 +857,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
             /* port info missing and port is not default for scheme: append */
             authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
         }
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
                       "authority=%s from uri.hostname=%s and uri.port=%d",
                       authority, puri.hostname, puri.port);
     }
@@ -943,7 +944,7 @@ static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *st
     rv = nghttp2_submit_request(session->ngh2, NULL, 
                                 hd->nv, hd->nvlen, pp, stream);
                                 
-    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03363)
                   "h2_proxy_session(%s): submit %s%s -> %d", 
                   session->id, stream->req->authority, stream->req->path,
                   rv);
@@ -1088,7 +1089,7 @@ apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
 static void stream_resume(h2_proxy_stream *stream)
 {
     h2_proxy_session *session = stream->session;
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, 
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
                   "h2_proxy_stream(%s-%d): resuming", 
                   session->id, stream->id);
     stream->suspended = 0;
@@ -1129,7 +1130,7 @@ static apr_status_t check_suspended(h2_proxy_session *session)
                 return APR_SUCCESS;
             }
             else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
-                ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c, 
+                ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, stream->cfront,
                               APLOGNO(03382) "h2_proxy_stream(%s-%d): check input", 
                               session->id, stream_id);
                 stream_resume(stream);
@@ -1366,27 +1367,39 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
         /* if the stream's connection is aborted, do not send anything
          * more on it. */
         apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
-        int touched = (stream->data_sent ||
+        int touched = (stream->data_sent || stream->data_received ||
                        stream_id <= session->last_stream_id);
-        if (!session->c->aborted) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+        if (!stream->cfront->aborted) {
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03364)
                           "h2_proxy_sesssion(%s): stream(%d) closed "
                           "(touched=%d, error=%d)",
                           session->id, stream_id, touched, stream->error_code);
 
             if (status != APR_SUCCESS) {
-              /* stream failed, error reporting is done by caller
-               * of proxy_session, e.g. mod_proxy_http2 which also
-               * decides about retries. */
+              /* stream failed. If we have received (and forwarded) response
+               * data already, we need to append an error buckt to inform
+               * consumers.
+               * Otherwise, we have an early fail on the connection and may
+               * retry this request on a new one. In that case, keep the
+               * output virgin so that a new attempt can be made. */
+              if (stream->data_received) {
+                int http_status = ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+                b = ap_bucket_error_create(http_status, NULL, stream->r->pool,
+                                           stream->cfront->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                ap_pass_brigade(stream->r->output_filters, stream->output);
+              }
             }
             else if (!stream->data_received) {
                 /* if the response had no body, this is the time to flush
                  * an empty brigade which will also write the response headers */
                 h2_proxy_stream_end_headers_out(stream);
                 stream->data_received = 1;
-                b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+                b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
                 APR_BRIGADE_INSERT_TAIL(stream->output, b);
-                b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+                b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
                 APR_BRIGADE_INSERT_TAIL(stream->output, b);
                 ap_pass_brigade(stream->r->output_filters, stream->output);
             }
@@ -1396,7 +1409,7 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
         h2_proxy_ihash_remove(session->streams, stream_id);
         h2_proxy_iq_remove(session->suspended, stream_id);
         if (session->done) {
-            session->done(session, stream->r, status, touched);
+            session->done(session, stream->r, status, touched, stream->error_code);
         }
     }
     
@@ -1666,9 +1679,9 @@ static int done_iter(void *udata, void *val)
 {
     cleanup_iter_ctx *ctx = udata;
     h2_proxy_stream *stream = val;
-    int touched = (stream->data_sent || 
+    int touched = (stream->data_sent || stream->data_received ||
                    stream->id <= ctx->session->last_stream_id);
-    ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched);
+    ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched, stream->error_code);
     return 1;
 }
 
@@ -1687,6 +1700,12 @@ void h2_proxy_session_cleanup(h2_proxy_session *session,
     }
 }
 
+int h2_proxy_session_is_reusable(h2_proxy_session *session)
+{
+    return (session->state != H2_PROXYS_ST_DONE) &&
+           h2_proxy_ihash_empty(session->streams);
+}
+
 static int ping_arrived_iter(void *udata, void *val)
 {
     h2_proxy_stream *stream = val;
index f40e5ee52e9a25e83bc90ea974dfe51848400f5b..3bc16d70b3ec808d62c73db3ab8c694b03aa6944 100644 (file)
@@ -68,7 +68,8 @@ typedef enum {
 
 typedef struct h2_proxy_session h2_proxy_session;
 typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
-                                   apr_status_t status, int touched);
+                                   apr_status_t status, int touched,
+                                   int error_code);
 
 struct h2_proxy_session {
     const char *id;
@@ -130,4 +131,6 @@ void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
 
 #define H2_PROXY_REQ_URL_NOTE   "h2-proxy-req-url"
 
+int h2_proxy_session_is_reusable(h2_proxy_session *s);
+
 #endif /* h2_proxy_session_h */
index 20e94cd8a3c64ee380980dcf0b3cbc5a9ce34912..3a6c42b0921cb773d10d4fbcccd66d923b8f0cfc 100644 (file)
@@ -304,6 +304,7 @@ static void assign_headers(request_rec *r, const h2_request *req,
     const char *cl;
 
     r->headers_in = apr_table_clone(r->pool, req->headers);
+
     if (req->authority) {
         /* for internal handling, we have to simulate that :authority
          * came in as Host:, RFC 9113 ch. says that mismatches between
@@ -367,12 +368,18 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
     /* Time to populate r with the data we have. */
     r->request_time = req->request_time;
     AP_DEBUG_ASSERT(req->authority);
-    if (req->scheme && (ap_cstr_casecmp(req->scheme,
-                        ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
-                        || !ap_cstr_casecmp("CONNECT", req->method))) {
-        /* Client sent a non-matching ':scheme' pseudo header. Forward this
-         * via an absolute URI in the request line.
-         */
+    if (!apr_strnatcasecmp("CONNECT", req->method)) {
+      /* CONNECT MUST NOT have scheme or path */
+      r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+                                    req->method, req->authority);
+    }
+    else if (req->scheme && ap_cstr_casecmp(req->scheme, "http")
+             && ap_cstr_casecmp(req->scheme, "https")) {
+        /* FIXME: we also need to create absolute uris when we are
+         * in a forward proxy configuration! But there is currently
+         * no way to detect that. */
+        /* Client sent a ':scheme' pseudo header for something else
+         * than what we handle by default. Make an absolute URI. */
         r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0",
                                       req->method, req->scheme, req->authority,
                                       req->path ? req->path : "");
index 7ba49cf8d5ee0eedfb91ead76c6b2bd9c63942ed..603e47ce85e1f85e0abb2b2e4891f47efb23eebb 100644 (file)
@@ -902,7 +902,8 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
     
     session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
     session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-    
+    session->max_data_frame_len = h2_config_sgeti(s, H2_CONF_MAX_DATA_FRAME_LEN);
+
     session->out_c1_blocked = h2_iq_create(session->pool, (int)session->max_stream_count);
     session->ready_to_process = h2_iq_create(session->pool, (int)session->max_stream_count);
 
@@ -983,13 +984,15 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
                       H2_SSSN_LOG(APLOGNO(03200), session, 
                                   "created, max_streams=%d, stream_mem=%d, "
                                   "workers_limit=%d, workers_max=%d, "
-                                  "push_diary(type=%d,N=%d)"),
+                                  "push_diary(type=%d,N=%d), "
+                                  "max_data_frame_len=%d"),
                       (int)session->max_stream_count, 
                       (int)session->max_stream_mem,
                       session->mplx->processing_limit,
                       session->mplx->processing_max,
                       session->push_diary->dtype, 
-                      (int)session->push_diary->N);
+                      (int)session->push_diary->N,
+                      (int)session->max_data_frame_len);
     }
     
     apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
@@ -1278,8 +1281,11 @@ static apr_status_t h2_session_send(h2_session *session)
                 goto cleanup;
             }
         }
-        if (h2_c1_io_needs_flush(&session->io)) {
+        if (h2_c1_io_needs_flush(&session->io) ||
+            ngrv == NGHTTP2_ERR_WOULDBLOCK) {
             rv = h2_c1_io_assure_flushed(&session->io);
+            if (rv != APR_SUCCESS)
+                goto cleanup;
             pending = 0;
         }
     }
@@ -1636,10 +1642,6 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream)
             h2_mplx_c1_stream_cleanup(session->mplx, stream, &session->open_streams);
             ++session->streams_done;
             update_child_status(session, SERVER_BUSY_WRITE, "done", stream);
-            if (session->open_streams == 0) {
-                h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
-                                          0, "stream done");
-            }
             break;
         default:
             break;
@@ -1944,7 +1946,8 @@ leaving:
         ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
                       H2_SSSN_MSG(session, "process returns")); 
     }
-    
+    h2_mplx_c1_going_keepalive(session->mplx);
+
     if (session->state == H2_SESSION_ST_DONE) {
         if (session->local.error) {
             char buffer[128];
index fbddfdd2a36ad32d9b57b4cc1b147b3ec2231854..3328509de8ac0ac390c38b707513d6252100734f 100644 (file)
@@ -103,7 +103,8 @@ typedef struct h2_session {
     
     apr_size_t max_stream_count;    /* max number of open streams */
     apr_size_t max_stream_mem;      /* max buffer memory for a single stream */
-    
+    apr_size_t max_data_frame_len;  /* max amount of bytes for a single DATA frame */
+
     apr_size_t idle_frames;         /* number of rcvd frames that kept session in idle state */
     apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
     
index cf6f79897dda303cd6be170d62fd5f8e8b8d4779..635caf7cb3c70790b08839e443573e11be94a314 100644 (file)
@@ -166,6 +166,7 @@ static int on_frame_recv(h2_stream_state_t state, int frame_type)
 
 static int on_event(h2_stream* stream, h2_stream_event_t ev)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (stream->monitor && stream->monitor->on_event) {
         stream->monitor->on_event(stream->monitor->ctx, stream, ev);
     }
@@ -392,6 +393,7 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
 {
     int new_state;
     
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
                   H2_STRM_MSG(stream, "dispatch event %d"), ev);
     new_state = on_event(stream, ev);
@@ -425,6 +427,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
     apr_status_t status = APR_SUCCESS;
     int new_state, eos = 0;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     new_state = on_frame_send(stream->state, ftype);
     if (new_state < 0) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -435,6 +438,12 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
 
     ++stream->out_frames;
     stream->out_frame_octets += frame_len;
+    if(stream->c2) {
+      h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+      if(conn_ctx)
+        conn_ctx->bytes_sent = stream->out_frame_octets;
+    }
+
     switch (ftype) {
         case NGHTTP2_DATA:
             eos = (flags & NGHTTP2_FLAG_END_STREAM);
@@ -468,6 +477,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
     apr_status_t status = APR_SUCCESS;
     int new_state, eos = 0;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     new_state = on_frame_recv(stream->state, ftype);
     if (new_state < 0) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -522,6 +532,7 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
     h2_session *session = stream->session;
     apr_status_t status = APR_SUCCESS;
     
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     stream->in_data_frames++;
     if (len > 0) {
         if (APLOGctrace3(session->c1)) {
@@ -542,11 +553,38 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
     return status;
 }
 
+#ifdef AP_DEBUG
+static apr_status_t stream_pool_destroy(void *data)
+{
+    h2_stream *stream = data;
+    switch (stream->magic) {
+    case H2_STRM_MAGIC_OK:
+        ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1,
+                      H2_STRM_MSG(stream, "was not destroyed explicitly"));
+        AP_DEBUG_ASSERT(0);
+        break;
+    case H2_STRM_MAGIC_SDEL:
+        /* stream has been explicitly destroyed, as it should */
+        H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_PDEL);
+        break;
+    case H2_STRM_MAGIC_PDEL:
+        ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1,
+                      H2_STRM_MSG(stream, "already pool destroyed"));
+        AP_DEBUG_ASSERT(0);
+        break;
+    default:
+        AP_DEBUG_ASSERT(0);
+    }
+    return APR_SUCCESS;
+}
+#endif
+
 h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
                             h2_stream_monitor *monitor, int initiated_on)
 {
     h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
-    
+
+    H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_OK);
     stream->id           = id;
     stream->initiated_on = initiated_on;
     stream->created      = apr_time_now();
@@ -554,6 +592,12 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
     stream->pool         = pool;
     stream->session      = session;
     stream->monitor      = monitor;
+#ifdef AP_DEBUG
+    if (id) { /* stream 0 has special lifetime */
+        apr_pool_cleanup_register(pool, stream, stream_pool_destroy,
+                                  apr_pool_cleanup_null);
+    }
+#endif
 
 #ifdef H2_NG2_LOCAL_WIN_SIZE
     if (id) {
@@ -575,6 +619,7 @@ void h2_stream_cleanup(h2_stream *stream)
      * end of the in/out notifications get closed.
      */
     ap_assert(stream);
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (stream->out_buffer) {
         apr_brigade_cleanup(stream->out_buffer);
     }
@@ -583,13 +628,16 @@ void h2_stream_cleanup(h2_stream *stream)
 void h2_stream_destroy(h2_stream *stream)
 {
     ap_assert(stream);
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c1,
                   H2_STRM_MSG(stream, "destroy"));
+    H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_SDEL);
     apr_pool_destroy(stream->pool);
 }
 
 void h2_stream_rst(h2_stream *stream, int error_code)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     stream->rst_error = error_code;
     if (stream->c2) {
         h2_c2_abort(stream->c2, stream->session->c1);
@@ -605,6 +653,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
     h2_request *req;
     apr_status_t status;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_assert(stream->request == NULL);
     ap_assert(stream->rtmp == NULL);
     if (stream->rst_error) {
@@ -626,6 +675,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
 
 void h2_stream_set_request(h2_stream *stream, const h2_request *r)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_assert(stream->request == NULL);
     ap_assert(stream->rtmp == NULL);
     stream->rtmp = h2_request_clone(stream->pool, r);
@@ -685,6 +735,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
     int error = 0, was_added = 0;
     apr_status_t status = APR_SUCCESS;
     
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (stream->response) {
         return APR_EINVAL;    
     }
@@ -791,6 +842,7 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
     int is_http_or_https;
     h2_request *req = stream->rtmp;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     status = h2_request_end_headers(req, stream->pool, raw_bytes);
     if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) {
         goto cleanup;
@@ -1039,6 +1091,7 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
 {
     apr_status_t rv = APR_SUCCESS;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (stream->rst_error) {
         return APR_ECONNRESET;
     }
@@ -1133,6 +1186,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
     apr_array_header_t *pushes;
     int i;
     
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     pushes = h2_push_collect_update(stream, stream->request, response);
     if (pushes && !apr_is_empty_array(pushes)) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -1152,6 +1206,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
 
 apr_table_t *h2_stream_get_trailers(h2_stream *stream)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     return NULL;
 }
 
@@ -1163,6 +1218,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
                                           h2_headers *response)
 #endif
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (response && stream->initiated_on) {
         const char *ctype = apr_table_get(response->headers, "content-type");
         if (ctype) {
@@ -1176,6 +1232,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
 int h2_stream_is_ready(h2_stream *stream)
 {
     /* Have we sent a response or do we have the response in our buffer? */
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (stream->response) {
         return 1;
     }
@@ -1187,11 +1244,13 @@ int h2_stream_is_ready(h2_stream *stream)
 
 int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     return stream->state == state;
 }
 
 int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     switch (state) {
         case H2_SS_IDLE:
             return 1; /* by definition */
@@ -1214,6 +1273,7 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
 {
     h2_session *session = stream->session;
     
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (amount > 0) {
         apr_off_t consumed = amount;
         
@@ -1339,6 +1399,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
                       H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
         return NGHTTP2_ERR_CALLBACK_FAILURE;
     }
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (!stream->output || !stream->response || !stream->out_buffer) {
         return NGHTTP2_ERR_DEFERRED;
     }
@@ -1361,6 +1422,11 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
             length = chunk_len;
         }
     }
+    /* We allow configurable max DATA frame length. */
+    if (stream->session->max_data_frame_len > 0
+        && length > stream->session->max_data_frame_len) {
+      length = stream->session->max_data_frame_len;
+    }
 
     /* How much data do we have in our buffers that we can write?
      * if not enough, receive more. */
@@ -1465,6 +1531,7 @@ static apr_status_t stream_do_response(h2_stream *stream)
 #endif
     nghttp2_data_provider provider, *pprovider = NULL;
 
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_assert(!stream->response);
     ap_assert(stream->out_buffer);
 
@@ -1562,6 +1629,8 @@ static apr_status_t stream_do_response(h2_stream *stream)
          * denies it, submit resources to push */
         const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
         if (!s || strcmp(s, "0")) {
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+                          H2_STRM_MSG(stream, "submit pushes, note=%s"), s);
             h2_stream_submit_pushes(stream, resp);
         }
     }
@@ -1653,6 +1722,7 @@ void h2_stream_on_output_change(h2_stream *stream)
 
     /* stream->pout_recv_write signalled a change. Check what has happend, read
      * from it and act on seeing a response/data. */
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     if (!stream->output) {
         /* c2 has not assigned the output beam to the stream (yet). */
         ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
@@ -1701,6 +1771,7 @@ void h2_stream_on_output_change(h2_stream *stream)
 
 void h2_stream_on_input_change(h2_stream *stream)
 {
+    H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
     ap_assert(stream->input);
     h2_beam_report_consumption(stream->input);
     if (h2_stream_is_at(stream, H2_SS_CLOSED_L)
index 695d56ac5e1cf0797ff16fc4c7000cb29b32dc07..638dbdac0d158aa841adfb7c3e3d4e6d00f6e605 100644 (file)
@@ -63,7 +63,22 @@ typedef struct h2_stream_monitor {
                                              trigger a state change */
 } h2_stream_monitor;
 
+#ifdef AP_DEBUG
+#define H2_STRM_MAGIC_OK     0x5354524d
+#define H2_STRM_MAGIC_SDEL   0x5344454c
+#define H2_STRM_MAGIC_PDEL   0x5044454c
+
+#define H2_STRM_ASSIGN_MAGIC(s,m)  ((s)->magic = m)
+#define H2_STRM_ASSERT_MAGIC(s,m)  ap_assert((s)->magic == m)
+#else
+#define H2_STRM_ASSIGN_MAGIC(s,m)  ((void)0)
+#define H2_STRM_ASSERT_MAGIC(s,m)  ((void)0)
+#endif
+
 struct h2_stream {
+#ifdef AP_DEBUG
+    uint32_t magic;
+#endif
     int id;                     /* http2 stream identifier */
     int initiated_on;           /* initiating stream id (PUSH) or 0 */
     apr_pool_t *pool;           /* the memory pool for this stream */
index a30f27ce9d965e7b8fd282165c3bded3bde03286..3799701723c99921a4f510747f76f8002b14d9df 100644 (file)
@@ -104,9 +104,10 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
         /* We also allow switching only for requests that have no body.
          */
         p = apr_table_get(r->headers_in, "Content-Length");
-        if (p && strcmp(p, "0")) {
+        if ((p && strcmp(p, "0"))
+            || (!p && apr_table_get(r->headers_in, "Transfer-Encoding"))) {
             ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087)
-                          "upgrade with content-length: %s, declined", p);
+                          "upgrade with body declined");
             return DECLINED;
         }
     }
index c9610899015e56879e8990d1c132d1c61dda0fb0..7de3144ec7f96109ee1079cb8c536dcd539bb730 100644 (file)
@@ -27,7 +27,7 @@
  * @macro
  * Version number of the http2 module as c string
  */
-#define MOD_HTTP2_VERSION "2.0.11"
+#define MOD_HTTP2_VERSION "2.0.19-git"
 
 /**
  * @macro
@@ -35,7 +35,7 @@
  * release. This is a 24 bit number with 8 bits for major number, 8 bits
  * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
  */
-#define MOD_HTTP2_VERSION_NUM 0x02000b
+#define MOD_HTTP2_VERSION_NUM 0x020013
 
 
 #endif /* mod_h2_h2_version_h */
index 5abccab09729deeb0f901c6429aee5e45d2cf163..8c0905f917c472bdc6e9419572cc13b731cdbce6 100644 (file)
@@ -50,8 +50,7 @@ static int (*is_h2)(conn_rec *c);
 
 typedef struct h2_proxy_ctx {
     const char *id;
-    conn_rec *master;
-    conn_rec *owner;
+    conn_rec *cfront;
     apr_pool_t *pool;
     server_rec *server;
     const char *proxy_func;
@@ -69,7 +68,7 @@ typedef struct h2_proxy_ctx {
     apr_status_t r_status;     /* status of request work */
     int r_done;                /* request was processed, not necessarily successfully */
     int r_may_retry;           /* request may be retried */
-    h2_proxy_session *session; /* current http2 session against backend */
+    int has_reusable_session;  /* http2 session is live and clean */
 } h2_proxy_ctx;
 
 static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
@@ -160,10 +159,15 @@ static int proxy_http2_canon(request_rec *r, char *url)
         }
         else {
             core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ #ifdef PROXY_CANONENC_NOENCODEDSLASHENCODING
             int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
 
             path = ap_proxy_canonenc_ex(r->pool, url, (int)strlen(url),
                                         enc_path, flags, r->proxyreq);
+#else
+            path = ap_proxy_canonenc(r->pool, url, (int)strlen(url),
+                                     enc_path, 0, r->proxyreq);
+#endif
             if (!path) {
                 return HTTP_BAD_REQUEST;
             }
@@ -227,79 +231,81 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
 }
 
 static void request_done(h2_proxy_ctx *ctx, request_rec *r,
-                         apr_status_t status, int touched)
+                         apr_status_t status, int touched, int error_code)
 {   
     if (r == ctx->r) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, 
-                      "h2_proxy_session(%s): request done, touched=%d",
-                      ctx->id, touched);
+                      "h2_proxy_session(%s): request done, touched=%d, error=%d",
+                      ctx->id, touched, error_code);
         ctx->r_done = 1;
         if (touched) ctx->r_may_retry = 0;
-        ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
-                         : HTTP_SERVICE_UNAVAILABLE);
+        ctx->r_status = error_code? HTTP_BAD_GATEWAY :
+            ((status == APR_SUCCESS)? OK :
+             ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE));
     }
 }    
 
 static void session_req_done(h2_proxy_session *session, request_rec *r,
-                             apr_status_t status, int touched)
+                             apr_status_t status, int touched, int error_code)
 {
-    request_done(session->user_data, r, status, touched);
+    request_done(session->user_data, r, status, touched, error_code);
 }
 
 static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
     apr_status_t status = OK;
+    h2_proxy_session *session;
     int h2_front;
     
     /* Step Four: Send the Request in a new HTTP/2 stream and
      * loop until we got the response or encounter errors.
      */
-    h2_front = is_h2? is_h2(ctx->owner) : 0;
-    ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
-                                          h2_front, 30, 
-                                          h2_proxy_log2((int)ctx->req_buffer_size), 
-                                          session_req_done);
-    if (!ctx->session) {
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, 
+    ctx->has_reusable_session = 0; /* don't know yet */
+    h2_front = is_h2? is_h2(ctx->cfront) : 0;
+    session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
+                                     h2_front, 30,
+                                     h2_proxy_log2((int)ctx->req_buffer_size),
+                                     session_req_done);
+    if (!session) {
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront,
                       APLOGNO(03372) "session unavailable");
         return HTTP_SERVICE_UNAVAILABLE;
     }
     
-    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
-                  "eng(%s): run session %s", ctx->id, ctx->session->id);
-    ctx->session->user_data = ctx;
+    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03373)
+                  "eng(%s): run session %s", ctx->id, session->id);
+    session->user_data = ctx;
     
     ctx->r_done = 0;
-    add_request(ctx->session, ctx->r);
+    add_request(session, ctx->r);
     
-    while (!ctx->owner->aborted && !ctx->r_done) {
+    while (!ctx->cfront->aborted && !ctx->r_done) {
     
-        status = h2_proxy_session_process(ctx->session);
+        status = h2_proxy_session_process(session);
         if (status != APR_SUCCESS) {
             /* Encountered an error during session processing */
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, 
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
                           APLOGNO(03375) "eng(%s): end of session %s", 
-                          ctx->id, ctx->session->id);
+                          ctx->id, session->id);
             /* Any open stream of that session needs to
              * a) be reopened on the new session iff safe to do so
              * b) reported as done (failed) otherwise
              */
-            h2_proxy_session_cleanup(ctx->session, session_req_done);
+            h2_proxy_session_cleanup(session, session_req_done);
             goto out;
         }
     }
     
 out:
-    if (ctx->owner->aborted) {
+    if (ctx->cfront->aborted) {
         /* master connection gone */
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, 
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
                       APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
         /* cancel all ongoing requests */
-        h2_proxy_session_cancel_all(ctx->session);
-        h2_proxy_session_process(ctx->session);
+        h2_proxy_session_cancel_all(session);
+        h2_proxy_session_process(session);
     }
-    
-    ctx->session->user_data = NULL;
-    ctx->session = NULL;
+    ctx->has_reusable_session = h2_proxy_session_is_reusable(session);
+    session->user_data = NULL;
     return status;
 }
 
@@ -344,9 +350,8 @@ static int proxy_http2_handler(request_rec *r,
     }
 
     ctx = apr_pcalloc(r->pool, sizeof(*ctx));
-    ctx->master = r->connection->master? r->connection->master : r->connection;
-    ctx->id = apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
-    ctx->owner = r->connection;
+    ctx->id = apr_psprintf(r->pool, "%ld", (long)r->connection->id);
+    ctx->cfront = r->connection;
     ctx->pool = r->pool;
     ctx->server = r->server;
     ctx->proxy_func = proxy_func;
@@ -359,7 +364,7 @@ static int proxy_http2_handler(request_rec *r,
     ctx->r_done = 0;
     ctx->r_may_retry =  1;
     
-    ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+    ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, ctx);
 
     /* scheme says, this is for us. */
     apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
@@ -367,7 +372,7 @@ static int proxy_http2_handler(request_rec *r,
                   "H2: serving URL %s", url);
     
 run_connect:    
-    if (ctx->owner->aborted) goto cleanup;
+    if (ctx->cfront->aborted) goto cleanup;
 
     /* Get a proxy_conn_rec from the worker, might be a new one, might
      * be one still open from another request, or it might fail if the
@@ -395,7 +400,7 @@ run_connect:
      * backend->hostname. */
     if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker, 
                                  ctx->server)) {
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03352)
                       "H2: failed to make connection to backend: %s",
                       ctx->p_conn->hostname);
         goto cleanup;
@@ -404,7 +409,7 @@ run_connect:
     /* Step Three: Create conn_rec for the socket we have open now. */
     status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
     if (status != OK) {
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, APLOGNO(03353)
                       "setup new connection: is_ssl=%d %s %s %s", 
                       ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname, 
                       locurl, ctx->p_conn->hostname);
@@ -419,10 +424,10 @@ run_connect:
                        "proxy-request-alpn-protos", "h2");
     }
 
-    if (ctx->owner->aborted) goto cleanup;
+    if (ctx->cfront->aborted) goto cleanup;
     status = ctx_run(ctx);
 
-    if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->owner->aborted) {
+    if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->cfront->aborted) {
         /* Not successfully processed, but may retry, tear down old conn and start over */
         if (ctx->p_conn) {
             ctx->p_conn->close = 1;
@@ -436,15 +441,16 @@ run_connect:
         if (reconnects < 2) {
             goto run_connect;
         } 
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(10023)
                       "giving up after %d reconnects, request-done=%d",
                       reconnects, ctx->r_done);
     }
     
 cleanup:
     if (ctx->p_conn) {
-        if (status != APR_SUCCESS) {
-            /* close socket when errors happened or session shut down (EOF) */
+        if (status != APR_SUCCESS || !ctx->has_reusable_session) {
+            /* close socket when errors happened or session is not "clean",
+             * meaning in a working condition with no open streams */
             ctx->p_conn->close = 1;
         }
 #if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
@@ -454,8 +460,8 @@ cleanup:
         ctx->p_conn = NULL;
     }
 
-    ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
-    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, 
+    ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, NULL);
+    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
                   APLOGNO(03377) "leaving handler");
     return ctx->r_status;
 }
index c0b0fa1389eaaa7262579ed982d327e0230fac73..ce86399a5d4110b8948188643e7a844d8e1f19f2 100644 (file)
@@ -11,6 +11,150 @@ class TestRanges:
 
     LOGFILE = ""
 
+    @pytest.fixture(autouse=True, scope='class')
+    def _class_scope(self, env):
+        TestRanges.LOGFILE = os.path.join(env.server_logs_dir, "test_008")
+        TestRanges.SRCDIR = os.path.dirname(inspect.getfile(TestRanges))
+        if os.path.isfile(TestRanges.LOGFILE):
+            os.remove(TestRanges.LOGFILE)
+        destdir = os.path.join(env.gen_dir, 'apache/htdocs/test1')
+        env.make_data_file(indir=destdir, fname="data-100m", fsize=100*1024*1024)
+        conf = H2Conf(env=env)
+        conf.add([
+            "CustomLog logs/test_008 combined"
+        ])
+        conf.add_vhost_cgi()
+        conf.add_vhost_test1()
+        conf.install()
+        assert env.apache_restart() == 0
+
+    def test_h2_008_01(self, env):
+        # issue: #203
+        resource = "data-1k"
+        full_length = 1000
+        chunk = 200
+        self.curl_upload_and_verify(env, resource, ["-v", "--http2"])
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", f"/files/{resource}?01full")
+        r = env.curl_get(url, 5, options=["--http2"])
+        assert r.response["status"] == 200
+        url = env.mkurl("https", "cgi", f"/files/{resource}?01range")
+        r = env.curl_get(url, 5, options=["--http1.1", "-H", "Range: bytes=0-{0}".format(chunk-1)])
+        assert 206 == r.response["status"]
+        assert chunk == len(r.response["body"].decode('utf-8'))
+        r = env.curl_get(url, 5, options=["--http2", "-H", "Range: bytes=0-{0}".format(chunk-1)])
+        assert 206 == r.response["status"]
+        assert chunk == len(r.response["body"].decode('utf-8'))
+        # Restart for logs to be flushed out
+        assert env.apache_restart() == 0
+        # now check what response lengths have actually been reported
+        detected = {}
+        for line in open(TestRanges.LOGFILE).readlines():
+            e = json.loads(line)
+            if e['request'] == f'GET /files/{resource}?01full HTTP/2.0':
+                assert e['bytes_rx_I'] > 0
+                assert e['bytes_resp_B'] == full_length
+                assert e['bytes_tx_O'] > full_length
+                detected['h2full'] = 1
+            elif e['request'] == f'GET /files/{resource}?01range HTTP/2.0':
+                assert e['bytes_rx_I'] > 0
+                assert e['bytes_resp_B'] == chunk
+                assert e['bytes_tx_O'] > chunk
+                assert e['bytes_tx_O'] < chunk + 256 # response + frame stuff
+                detected['h2range'] = 1
+            elif e['request'] == f'GET /files/{resource}?01range HTTP/1.1':
+                assert e['bytes_rx_I'] > 0         # input bytes received
+                assert e['bytes_resp_B'] == chunk  # response bytes sent (payload)
+                assert e['bytes_tx_O'] > chunk     # output bytes sent
+                detected['h1range'] = 1
+        assert 'h1range' in detected, f'HTTP/1.1 range request not found in {TestRanges.LOGFILE}'
+        assert 'h2range' in detected, f'HTTP/2 range request not found in {TestRanges.LOGFILE}'
+        assert 'h2full' in detected, f'HTTP/2 full request not found in {TestRanges.LOGFILE}'
+
+    def test_h2_008_02(self, env, repeat):
+        path = '/002.jpg'
+        res_len = 90364
+        url = env.mkurl("https", "test1", f'{path}?02full')
+        r = env.curl_get(url, 5)
+        assert r.response["status"] == 200
+        assert "HTTP/2" == r.response["protocol"]
+        h = r.response["header"]
+        assert "accept-ranges" in h
+        assert "bytes" == h["accept-ranges"]
+        assert "content-length" in h
+        clen = h["content-length"]
+        assert int(clen) == res_len
+        # get the first 1024 bytes of the resource, 206 status, but content-length as original
+        url = env.mkurl("https", "test1", f'{path}?02range')
+        r = env.curl_get(url, 5, options=["-H", "range: bytes=0-1023"])
+        assert 206 == r.response["status"]
+        assert "HTTP/2" == r.response["protocol"]
+        assert 1024 == len(r.response["body"])
+        assert "content-length" in h
+        assert clen == h["content-length"]
+        # Restart for logs to be flushed out
+        assert env.apache_restart() == 0
+        # now check what response lengths have actually been reported
+        found = False
+        for line in open(TestRanges.LOGFILE).readlines():
+            e = json.loads(line)
+            if e['request'] == f'GET {path}?02range HTTP/2.0':
+                assert e['bytes_rx_I'] > 0
+                assert e['bytes_resp_B'] == 1024
+                assert e['bytes_tx_O'] > 1024
+                assert e['bytes_tx_O'] < 1024 + 256  # response  and frame stuff
+                found = True
+                break
+        assert found, f'request not found in {self.LOGFILE}'
+
+    # send a paced curl download that aborts in the middle of the transfer
+    def test_h2_008_03(self, env, repeat):
+        path = '/data-100m'
+        url = env.mkurl("https", "test1", f'{path}?03broken')
+        r = env.curl_get(url, 5, options=[
+            '--limit-rate', '2k', '-m', '2'
+        ])
+        assert r.exit_code != 0, f'{r}'
+        found = False
+        for line in open(TestRanges.LOGFILE).readlines():
+            e = json.loads(line)
+            if e['request'] == f'GET {path}?03broken HTTP/2.0':
+                assert e['bytes_rx_I'] > 0
+                assert e['bytes_resp_B'] == 100*1024*1024
+                assert e['bytes_tx_O'] > 1024
+                assert e['bytes_tx_O'] < 5*1024*1024  # curl buffers, but not that much
+                found = True
+                break
+        assert found, f'request not found in {self.LOGFILE}'
+
+    # upload and GET again using curl, compare to original content
+    def curl_upload_and_verify(self, env, fname, options=None):
+        url = env.mkurl("https", "cgi", "/upload.py")
+        fpath = os.path.join(env.gen_dir, fname)
+        r = env.curl_upload(url, fpath, options=options)
+        assert r.exit_code == 0, f"{r}"
+        assert 200 <= r.response["status"] < 300
+
+        r2 = env.curl_get(r.response["header"]["location"])
+        assert r2.exit_code == 0
+        assert r2.response["status"] == 200
+        with open(os.path.join(TestRanges.SRCDIR, fpath), mode='rb') as file:
+            src = file.read()
+        assert src == r2.response["body"]
+
+import inspect
+import json
+import os
+import pytest
+
+from .env import H2Conf, H2TestEnv
+
+
+@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
+class TestRanges:
+
+    LOGFILE = ""
+
     @pytest.fixture(autouse=True, scope='class')
     def _class_scope(self, env):
         TestRanges.LOGFILE = os.path.join(env.server_logs_dir, "test_008")
index eca28bc17fc888a8f39d95296d27be3e081d30e3..d73327c2b3e4d24e0c2439ab0a565395a4874312 100644 (file)
@@ -6,6 +6,80 @@ import pytest
 from .env import H2Conf, H2TestEnv
 
 
+@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
+class TestTiming:
+
+    LOGFILE = ""
+
+    @pytest.fixture(autouse=True, scope='class')
+    def _class_scope(self, env):
+        TestTiming.LOGFILE = os.path.join(env.server_logs_dir, "test_009")
+        if os.path.isfile(TestTiming.LOGFILE):
+            os.remove(TestTiming.LOGFILE)
+        conf = H2Conf(env=env)
+        conf.add([
+            "CustomLog logs/test_009 combined"
+        ])
+        conf.add_vhost_cgi()
+        conf.add_vhost_test1()
+        conf.install()
+        assert env.apache_restart() == 0
+
+    # check that we get a positive time_taken reported on a simple GET
+    def test_h2_009_01(self, env):
+        path = '/002.jpg'
+        url = env.mkurl("https", "test1", f'{path}?01')
+        args = [
+            env.h2load, "-n", "1", "-c", "1", "-m", "1",
+            f"--connect-to=localhost:{env.https_port}",
+            f"--base-uri={url}", url
+        ]
+        r = env.run(args)
+        # Restart for logs to be flushed out
+        assert env.apache_restart() == 0
+        found = False
+        for line in open(TestTiming.LOGFILE).readlines():
+            e = json.loads(line)
+            if e['request'] == f'GET {path}?01 HTTP/2.0':
+                assert e['time_taken'] > 0
+                found = True
+        assert found, f'request not found in {TestTiming.LOGFILE}'
+
+    # test issue #253, where time_taken in a keepalive situation is not
+    # reported until the next request arrives
+    def test_h2_009_02(self, env):
+        baseurl = env.mkurl("https", "test1", '/')
+        tscript = os.path.join(env.gen_dir, 'h2load-timing-009_02')
+        with open(tscript, 'w') as fd:
+            fd.write('\n'.join([
+                f'0.0\t/002.jpg?02a',        # 1st request right away
+                f'1000.0\t/002.jpg?02b',     # 2nd a second later
+            ]))
+        args = [
+            env.h2load,
+            f'--timing-script-file={tscript}',
+            f"--connect-to=localhost:{env.https_port}",
+            f"--base-uri={baseurl}"
+        ]
+        r = env.run(args)
+        # Restart for logs to be flushed out
+        assert env.apache_restart() == 0
+        found = False
+        for line in open(TestTiming.LOGFILE).readlines():
+            e = json.loads(line)
+            if e['request'] == f'GET /002.jpg?02a HTTP/2.0':
+                assert e['time_taken'] > 0
+                assert e['time_taken'] < 500 * 1000, f'time for 1st request not reported correctly'
+                found = True
+        assert found, f'request not found in {TestTiming.LOGFILE}'
+import inspect
+import json
+import os
+import pytest
+
+from .env import H2Conf, H2TestEnv
+
+
 @pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
 class TestTiming:
 
index f53f8eb9bff2516d5f1642ddfde259c94324a791..d6360939bea0a0516ed7730a4d156d34f26047af 100644 (file)
@@ -32,8 +32,6 @@ class TestFrameLengths:
         99, 1024, 8192
     ])
     def test_h2_107_01(self, env, data_frame_len):
-        if not env.httpd_is_at_least('2.5.0'):
-            pytest.skip(f'needs r1907696+r1907697 from trunk')
         conf = H2Conf(env, extras={
             f'cgi.{env.http_tld}': [
                 f'H2MaxDataFrameLen {data_frame_len}',
index 88a8ece3f6e98667b91a3da1ef95f080db6cf1bf..deb123d86b3e2f842813d1ddb4a4eb092db727e7 100644 (file)
@@ -61,6 +61,12 @@ class TestProxy:
         ])
         self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length:"])
 
+    def test_h2_500_11(self, env):
+        self.curl_upload_and_verify(env, "data-1k", [
+            "--http1.1", "-H", "Content-Length:", "-H", "Transfer-Encoding: chunked"
+        ])
+        self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length:"])
+
     # POST some data using nghttp and see it echo'ed properly back
     def nghttp_post_and_verify(self, env, fname, options=None):
         url = env.mkurl("https", "cgi", "/proxy/echo.py")