]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
Sync with v2.0.10 from github:
authorStefan Eissing <icing@apache.org>
Tue, 11 Oct 2022 14:54:08 +0000 (14:54 +0000)
committerStefan Eissing <icing@apache.org>
Tue, 11 Oct 2022 14:54:08 +0000 (14:54 +0000)
 * Extensive testing in production done by Alessandro Bianchi (@alexskynet)
   on the v2.0.x versions for stability. Many thanks!
 * refactored stream response handling to reflect the different phases
   (response/data/trailers) more clearly and help resolving cpu busy loops.
 * Adding more negative tests for handling of errored responses to cover
   edge cases.
 * mod_http2: fixed handling of response where neiter an EOS nor an ERROR was
   received as a cause to reset the stream.
 * mod_proxy_http2: generating error buckets for fault response bodies, to
   signal failure to fron when response header were already sent.

v2.0.9
--------------------------------------------------------------------------------
 * Fixed a bug where errors during reponse body handling did not lead to
   a proper RST_STREAM. Instead processing went into an infinite loop.
   Extended test cases to catch this condition.

v2.0.8
--------------------------------------------------------------------------------
 * Delaying input setup of a stream just before processing starts. This allows
   any EOS indicator arriving from the client before that to take effect.
   Without knowing that a stream has no input, internal processing has to
   simulate chunked encoding. This is not wrong, but somewhat more expensive
   and mod_security has been reported to be allergic to seeing 'chunked'
   on some requests. See <https://bz.apache.org/bugzilla/show_bug.cgi?id=66282>.
 * mod_proxy_http2: fixed #235 by no longer forwarding 'Host:' header when
   request ':authority' is known. Improved test case that did not catch that
   the previous 'fix' was incorrect.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1904522 13f79535-47bb-0310-9956-ffa450edef68

37 files changed:
modules/http2/h2.h
modules/http2/h2_bucket_beam.c
modules/http2/h2_c2.c
modules/http2/h2_c2_filter.c
modules/http2/h2_config.c
modules/http2/h2_conn_ctx.h
modules/http2/h2_mplx.c
modules/http2/h2_mplx.h
modules/http2/h2_proxy_session.c
modules/http2/h2_push.c
modules/http2/h2_request.c
modules/http2/h2_request.h
modules/http2/h2_session.c
modules/http2/h2_stream.c
modules/http2/h2_stream.h
modules/http2/h2_util.c
modules/http2/h2_util.h
modules/http2/h2_version.h
modules/http2/h2_workers.c
modules/http2/mod_http2.c
modules/http2/mod_proxy_http2.c
test/modules/http2/env.py
test/modules/http2/htdocs/cgi/alive.json [new file with mode: 0644]
test/modules/http2/htdocs/cgi/hello.py
test/modules/http2/mod_h2test/mod_h2test.c
test/modules/http2/test_003_get.py
test/modules/http2/test_105_timeout.py
test/modules/http2/test_202_trailer.py
test/modules/http2/test_203_rfc9113.py [new file with mode: 0644]
test/modules/http2/test_401_early_hints.py
test/modules/http2/test_500_proxy.py
test/modules/http2/test_600_h2proxy.py
test/pyhttpd/conf.py
test/pyhttpd/config.ini.in
test/pyhttpd/env.py
test/pyhttpd/nghttp.py
test/pyhttpd/result.py

index cff49e15f0f7a025759f517829f19a20885a3256..250e7260e8fd93aa534ee28af4a75a793e67fa18 100644 (file)
@@ -156,7 +156,6 @@ struct h2_request {
     apr_table_t *headers;
 
     apr_time_t request_time;
-    unsigned int chunked : 1;   /* iff request body needs to be forwarded as chunked */
     apr_off_t raw_bytes;        /* RAW network bytes that generated this request - if known. */
     int http_status;            /* Store a possible HTTP status code that gets
                                  * defined before creating the dummy HTTP/1.1
index 84b985b94985c21cc19aa87a26b7c45b0f50c954..cbf7f348da7e3dd1f4b359d50627a7e7b88224ca 100644 (file)
@@ -529,7 +529,10 @@ apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
     space_left = calc_space_left(beam);
     while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
         rv = append_bucket(beam, sender_bb, block, &space_left, pwritten);
-        if (!beam->aborted && APR_EAGAIN == rv) {
+        if (beam->aborted) {
+            goto cleanup;
+        }
+        else if (APR_EAGAIN == rv) {
             /* bucket was not added, as beam buffer has no space left.
              * Trigger event callbacks, so receiver can know there is something
              * to receive before we do a conditional wait. */
@@ -548,6 +551,7 @@ apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
         }
     }
 
+cleanup:
     if (beam->send_cb && !buffer_is_empty(beam)) {
         beam->send_cb(beam->send_ctx, beam);
     }
index 53e511a33ac3b71bfb73a5d32800530e138f53bb..44a08d075e19c3830b306d97830c3e27804e7e6c 100644 (file)
@@ -464,13 +464,18 @@ static int c2_post_read_request(request_rec *r)
 {
     h2_conn_ctx_t *conn_ctx;
     conn_rec *c2 = r->connection;
+    apr_time_t timeout;
 
     if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
         return DECLINED;
     }
     /* Now that the request_rec is fully initialized, set relevant params */
     conn_ctx->server = r->server;
-    h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
+    timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
+    if (timeout <= 0) {
+        timeout = r->server->timeout;
+    }
+    h2_conn_ctx_set_timeout(conn_ctx, timeout);
     /* We only handle this one request on the connection and tell everyone
      * that there is no need to keep it "clean" if something fails. Also,
      * this prevents mod_reqtimeout from doing funny business with monitoring
@@ -651,8 +656,10 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
     const h2_request *req = conn_ctx->request;
     conn_state_t *cs = c->cs;
     request_rec *r;
+    const char *tenc;
+    apr_time_t timeout;
 
-    r = h2_create_request_rec(conn_ctx->request, c);
+    r = h2_create_request_rec(conn_ctx->request, c, conn_ctx->beam_in == NULL);
     if (!r) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                       "h2_c2(%s-%d): create request_rec failed, r=NULL",
@@ -666,13 +673,18 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
         goto cleanup;
     }
 
+    tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+    conn_ctx->input_chunked = tenc && ap_is_chunked(r->pool, tenc);
+
     ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                   "h2_c2(%s-%d): created request_rec for %s",
                   conn_ctx->id, conn_ctx->stream_id, r->the_request);
     conn_ctx->server = r->server;
-
-    /* the request_rec->server carries the timeout value that applies */
-    h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
+    timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
+    if (timeout <= 0) {
+        timeout = r->server->timeout;
+    }
+    h2_conn_ctx_set_timeout(conn_ctx, timeout);
 
     if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
index 728761212a599d5e50145daaa746cd5470ee82c7..e1d7cb72bf7755e52e6914b28aea669fce69d7d2 100644 (file)
@@ -635,7 +635,7 @@ apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
                  */
                 int result = ap_map_http_request_error(conn_ctx->last_err,
                                                        HTTP_INTERNAL_SERVER_ERROR);
-                request_rec *r = h2_create_request_rec(conn_ctx->request, f->c);
+                request_rec *r = h2_create_request_rec(conn_ctx->request, f->c, 1);
                 ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
                 b = ap_bucket_eor_create(f->c->bucket_alloc, r);
                 APR_BRIGADE_INSERT_TAIL(bb, b);
@@ -918,7 +918,7 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
                   "readbytes=%ld, exp=%d",
                   conn_ctx->id, conn_ctx->stream_id, mode, block,
                   (long)readbytes, r->expecting_100);
-    if (!conn_ctx->request->chunked) {
+    if (!conn_ctx->input_chunked) {
         status = ap_get_brigade(f->next, bb, mode, block, readbytes);
         /* pipe data through, just take care of trailers */
         for (b = APR_BRIGADE_FIRST(bb);
index 026e255fb5ff69f4ab4f6d79c6bce3963b5e1c21..40ef8b4cebd09f3c85a84832d88efc5da1154072 100644 (file)
@@ -574,6 +574,9 @@ static const char *h2_conf_set_max_worker_idle_limit(cmd_parms *cmd,
     if (rv != APR_SUCCESS) {
         return "Invalid idle limit value";
     }
+    if (timeout <= 0) {
+        timeout = DEF_VAL;
+    }
     CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_LIMIT, timeout);
     return NULL;
 }
index dff627db2d4efda3329a59b533c983462b1dac8e..35987bce3f6f4b073c7d6d318bbbaf7a08c072fc 100644 (file)
@@ -53,6 +53,7 @@ struct h2_conn_ctx_t {
     const struct h2_request *request; /* c2: the request to process */
     struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
     struct h2_bucket_beam *beam_in;  /* c2: data in or NULL, borrowed from request stream */
+    unsigned int input_chunked;      /* c2: if input needs HTTP/1.1 chunking applied */
 
     apr_file_t *pipe_in[2];          /* c2: input produced notification pipe */
     apr_pollfd_t pfd;                /* c1: poll socket input, c2: NUL */
index 0ce1d410ed8bed2dc673f2b9774cc8c9a4740de8..99c47ea8ef9fdee77612f7bb7f38d2496146eda7 100644 (file)
@@ -665,6 +665,10 @@ static apr_status_t c1_process_stream(h2_mplx *m,
                       H2_STRM_MSG(stream, "process, ready already"));
     }
     else {
+        /* last chance to set anything up before stream is processed
+         * by worker threads. */
+        rv = h2_stream_prepare_processing(stream);
+        if (APR_SUCCESS != rv) goto cleanup;
         h2_iq_add(m->q, stream->id, cmp, session);
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
                       H2_STRM_MSG(stream, "process, added to q"));
@@ -791,23 +795,21 @@ static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream, h2_
         h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2);
     }
 
-    conn_ctx->beam_in = stream->input;
-    h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2);
-    h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
-    h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
-
+    memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in));
+    if (stream->input) {
+        conn_ctx->beam_in = stream->input;
+        h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2);
+        h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
+        h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
 #if H2_USE_PIPES
-    if (!conn_ctx->pipe_in[H2_PIPE_OUT]) {
         action = "create input write pipe";
         rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in[H2_PIPE_OUT],
                                         &conn_ctx->pipe_in[H2_PIPE_IN],
                                         APR_READ_BLOCK,
                                         c2->pool, c2->pool);
         if (APR_SUCCESS != rv) goto cleanup;
-    }
-#else
-    memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in));
 #endif
+    }
 
 cleanup:
     stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL;
index 8b1feb80f77493b795c6e5cd5d5578c4b9ea3cac..1f79aa8248a473538c8d5a18e8ca2f8c3c3ade84 100644 (file)
@@ -169,7 +169,7 @@ void h2_mplx_c1_process(h2_mplx *m,
 apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
                                     struct h2_session *session);
 
-typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
+typedef void stream_ev_callback(void *ctx, struct h2_stream *stream);
 
 /**
  * Poll the primary connection for input and the active streams for output.
index bea353dca1b2bd8b89be589d1a3c5463c806645e..3da57f9e739b86ab8b3993e00576d7ec7f2dfb23 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <mpm_common.h>
 #include <httpd.h>
+#include <http_protocol.h>
 #include <mod_proxy.h>
 
 #include "mod_http2.h"
@@ -854,14 +855,18 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
                       "authority=%s from uri.hostname=%s and uri.port=%d",
                       authority, puri.hostname, puri.port);
     }
-    
+    /* See #235, we use only :authority when available and remove Host:
+     * since differing values are not acceptable, see RFC 9113 ch. 8.3.1 */
+    if (authority && strlen(authority)) {
+        apr_table_unset(r->headers_in, "Host");
+    }
+
     /* we need this for mapping relative uris in headers ("Link") back
      * to local uris */
     stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority); 
     stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority); 
     path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
 
-
     h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
                 authority, path, r->headers_in);
 
@@ -890,7 +895,6 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
                              r->server->server_hostname);
         }
     }
-    apr_table_unset(r->headers_in, "Host");
 
     /* Tuck away all already existing cookies */
     stream->saves = apr_table_make(r->pool, 2);
@@ -1350,33 +1354,42 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
                            const char *msg)
 {
     h2_proxy_stream *stream;
-    
+    apr_bucket *b;
+
     stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
     if (stream) {
-        int touched = (stream->data_sent || 
-                       stream_id <= session->last_stream_id);
+        /* if the stream's connection is aborted, do not send anything
+         * more on it. */
         apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
-                      "h2_proxy_sesssion(%s): stream(%d) closed "
-                      "(touched=%d, error=%d)", 
-                      session->id, stream_id, touched, stream->error_code);
-        
-        if (status != APR_SUCCESS) {
-            stream->r->status = 500;
-        }
-        else if (!stream->data_received) {
-            apr_bucket *b;
-            /* if the response had no body, this is the time to flush
-             * an empty brigade which will also write the response headers */
-            h2_proxy_stream_end_headers_out(stream);
-            stream->data_received = 1;
-            b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
-            APR_BRIGADE_INSERT_TAIL(stream->output, b);
-            b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
-            APR_BRIGADE_INSERT_TAIL(stream->output, b);
-            ap_pass_brigade(stream->r->output_filters, stream->output);
+        int touched = (stream->data_sent ||
+                       stream_id <= session->last_stream_id);
+        if (!session->c->aborted) {
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+                          "h2_proxy_sesssion(%s): stream(%d) closed "
+                          "(touched=%d, error=%d)",
+                          session->id, stream_id, touched, stream->error_code);
+
+            if (status != APR_SUCCESS) {
+                b = ap_bucket_error_create(HTTP_SERVICE_UNAVAILABLE, NULL, stream->r->pool,
+                                           stream->r->connection->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                ap_pass_brigade(stream->r->output_filters, stream->output);
+            }
+            else if (!stream->data_received) {
+                /* if the response had no body, this is the time to flush
+                 * an empty brigade which will also write the response headers */
+                h2_proxy_stream_end_headers_out(stream);
+                stream->data_received = 1;
+                b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+                APR_BRIGADE_INSERT_TAIL(stream->output, b);
+                ap_pass_brigade(stream->r->output_filters, stream->output);
+            }
         }
-        
+
         stream->state = H2_STREAM_ST_CLOSED;
         h2_proxy_ihash_remove(session->streams, stream_id);
         h2_proxy_iq_remove(session->suspended, stream_id);
index 7604df66788e16346bc4540d49a39fd1a0ad69b1..462c47002aeacc0014e51e62fc8d63ba8d8055f8 100644 (file)
@@ -351,7 +351,7 @@ static int add_push(link_ctx *ctx)
                 req = h2_request_create(0, ctx->pool, method, ctx->req->scheme,
                                         ctx->req->authority, path, headers);
                 /* atm, we do not push on pushes */
-                h2_request_end_headers(req, ctx->pool, 1, 0);
+                h2_request_end_headers(req, ctx->pool, 0);
                 push->req = req;
                 if (has_param(ctx, "critical")) {
                     h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
index dec3338ee08fcf1976de14790b7d6867b33e5465..7a820663ebb6eca5bfb2ff2cf24825f07fbf81ce 100644 (file)
@@ -185,14 +185,13 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
     return status;
 }
 
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes)
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+                                    size_t raw_bytes)
 {
-    const char *s;
-
-    /* rfc7540, ch. 8.1.2.3:
-     * - if we have :authority, it overrides any Host header
-     * - :authority MUST be omitted when converting h1->h2, so we
-     *   might get a stream without, but then Host needs to be there */
+    /* rfc7540, ch. 8.1.2.3: without :authority, Host: must be there */
+    if (req->authority && !strlen(req->authority)) {
+        req->authority = NULL;
+    }
     if (!req->authority) {
         const char *host = apr_table_get(req->headers, "Host");
         if (!host) {
@@ -203,40 +202,6 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
     else {
         apr_table_setn(req->headers, "Host", req->authority);
     }
-
-#if AP_HAS_RESPONSE_BUCKETS
-    if (eos) {
-        s = apr_table_get(req->headers, "Content-Length");
-        if (!s && apr_table_get(req->headers, "Content-Type")) {
-            /* If we have a content-type, but already seen eos, no more
-             * data will come. Signal a zero content length explicitly.
-             */
-            apr_table_setn(req->headers, "Content-Length", "0");
-        }
-    }
-#else /* AP_HAS_RESPONSE_BUCKETS */
-    s = apr_table_get(req->headers, "Content-Length");
-    if (!s) {
-        /* HTTP/2 does not need a Content-Length for framing, but our
-         * internal request processing is used to HTTP/1.1, so we
-         * need to either add a Content-Length or a Transfer-Encoding
-         * if any content can be expected. */
-        if (!eos) {
-            /* We have not seen a content-length and have no eos,
-             * simulate a chunked encoding for our HTTP/1.1 infrastructure,
-             * in case we have "H2SerializeHeaders on" here
-             */
-            req->chunked = 1;
-            apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
-        }
-        else if (apr_table_get(req->headers, "Content-Type")) {
-            /* If we have a content-type, but already seen eos, no more
-             * data will come. Signal a zero content length explicitly.
-             */
-            apr_table_setn(req->headers, "Content-Length", "0");
-        }
-    }
-#endif /* else AP_HAS_RESPONSE_BUCKETS */
     req->raw_bytes += raw_bytes;
 
     return APR_SUCCESS;
@@ -333,7 +298,59 @@ apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
 }
 #endif
 
-request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
+static void assign_headers(request_rec *r, const h2_request *req,
+                           int no_body)
+{
+    const char *cl;
+
+    r->headers_in = apr_table_copy(r->pool, req->headers);
+    if (req->authority) {
+        /* for internal handling, we have to simulate that :authority
+         * came in as Host:, RFC 9113 ch. says that mismatches between
+         * :authority and Host: SHOULD be rejected as malformed. However,
+         * we are more lenient and just replace any Host: if we have
+         * an :authority.
+         */
+        const char *orig_host = apr_table_get(req->headers, "Host");
+        if (orig_host && strcmp(req->authority, orig_host)) {
+            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO()
+                          "overwriting 'Host: %s' with :authority: %s'",
+                          orig_host, req->authority);
+            apr_table_setn(r->subprocess_env, "H2_ORIGINAL_HOST", orig_host);
+        }
+        apr_table_setn(r->headers_in, "Host", req->authority);
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+                      "set 'Host: %s' from :authority", req->authority);
+    }
+
+    cl = apr_table_get(req->headers, "Content-Length");
+    if (no_body) {
+        if (!cl && apr_table_get(req->headers, "Content-Type")) {
+            /* If we have a content-type, but already seen eos, no more
+             * data will come. Signal a zero content length explicitly.
+             */
+            apr_table_setn(req->headers, "Content-Length", "0");
+        }
+    }
+#if !AP_HAS_RESPONSE_BUCKETS
+    else if (!cl) {
+        /* there may be a body and we have internal HTTP/1.1 processing.
+         * If the Content-Length is unspecified, we MUST simulate
+         * chunked Transfer-Encoding.
+         *
+         * HTTP/2 does not need a Content-Length for framing. Ideally
+         * all clients set the EOS flag on the header frame if they
+         * do not intent to send a body. However, forwarding proxies
+         * might just no know at the time and send an empty DATA
+         * frame with EOS much later.
+         */
+        apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked");
+    }
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+}
+
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
+                                   int no_body)
 {
     int access_status = HTTP_OK;
 
@@ -344,6 +361,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
 #endif
 
 #if AP_MODULE_MAGIC_AT_LEAST(20120211, 107)
+    assign_headers(r, req, no_body);
     ap_run_pre_read_request(r, c);
 
     /* Time to populate r with the data we have. */
@@ -371,8 +389,6 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
         r->the_request = apr_psprintf(r->pool, "%s / HTTP/2.0", req->method);
     }
 
-    r->headers_in = apr_table_copy(r->pool, req->headers);
-
     /* Start with r->hostname = NULL, ap_check_request_header() will get it
      * form Host: header, otherwise we get complains about port numbers.
      */
@@ -397,7 +413,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
     {
         const char *s;
 
-        r->headers_in = apr_table_clone(r->pool, req->headers);
+        assign_headers(r, req, no_body);
         ap_run_pre_read_request(r, c);
 
         /* Time to populate r with the data we have. */
index 40ae1c5c691dd3eddf5d9f58f9cc794a2788ef59..7e20b697246da2f89da3d8cc7bc9256b8a364e25 100644 (file)
@@ -35,7 +35,8 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
                                     const char *name, size_t nlen,
                                     const char *value, size_t vlen);
 
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes);
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+                                     size_t raw_bytes);
 
 h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
 
@@ -45,9 +46,11 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
  *
  * @param req the h2 request to process
  * @param conn the connection to process the request on
+ * @param no_body != 0 iff the request is known to have no body
  * @return the request_rec representing the request
  */
-request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn);
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn,
+                                   int no_body);
 
 #if AP_HAS_RESPONSE_BUCKETS
 apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r);
index 852168142a7a15be9ce37b48272bda0892cbdb6f..d0b3a475985e85c5da14f68af20edba30b09081e 100644 (file)
@@ -89,7 +89,7 @@ void h2_session_event(h2_session *session, h2_session_event_t ev,
 
 static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
 {
-    int unprocessed = (!h2_stream_was_closed(stream)
+    int unprocessed = (!h2_stream_is_at_or_past(stream, H2_SS_CLOSED)
                        && (H2_STREAM_CLIENT_INITIATED(stream->id)? 
                            (!stream->session->local.accepting
                             && stream->id > stream->session->local.accepted_max)
@@ -1298,58 +1298,37 @@ cleanup:
 /**
  * A streams input state has changed.
  */
-static apr_status_t on_stream_input(void *ctx, h2_stream *stream)
+static void on_stream_input(void *ctx, h2_stream *stream)
 {
     h2_session *session = ctx;
-    apr_status_t rv = APR_EAGAIN;
 
     ap_assert(stream);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
                   H2_STRM_MSG(stream, "on_input change"));
-
+    update_child_status(session, SERVER_BUSY_READ, "read", stream);
     if (stream->id == 0) {
         /* input on primary connection available? read */
-        rv = h2_c1_read(session);
+        h2_c1_read(session);
     }
     else {
-        ap_assert(stream->input);
-        if (stream->state == H2_SS_CLOSED_L
-            && !h2_mplx_c1_stream_is_running(session->mplx, stream)) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
-                          H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
-            nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
-                                      stream->id, NGHTTP2_NO_ERROR);
-            update_child_status(session, SERVER_BUSY_WRITE, "reset", stream);
-            goto cleanup;
-        }
-        update_child_status(session, SERVER_BUSY_READ, "read", stream);
-        h2_beam_report_consumption(stream->input);
-        if (stream->state == H2_SS_CLOSED_R) {
-            /* TODO: remove this stream from input polling */
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
-                          H2_STRM_MSG(stream, "should not longer be input polled"));
-        }
+        h2_stream_on_input_change(stream);
     }
-cleanup:
-    return rv;
 }
 
 /**
  * A streams output state has changed.
  */
-static apr_status_t on_stream_output(void *ctx, h2_stream *stream)
+static void on_stream_output(void *ctx, h2_stream *stream)
 {
     h2_session *session = ctx;
 
     ap_assert(stream);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
                   H2_STRM_MSG(stream, "on_output change"));
-    if (stream->id == 0) {
-        /* we dont poll output of stream 0, this should not be called */
-        return APR_SUCCESS;
+    if (stream->id != 0) {
+        update_child_status(session, SERVER_BUSY_WRITE, "write", stream);
+        h2_stream_on_output_change(stream);
     }
-    update_child_status(session, SERVER_BUSY_WRITE, "write", stream);
-    return h2_stream_read_output(stream);
 }
 
 
@@ -1878,6 +1857,9 @@ apr_status_t h2_session_process(h2_session *session, int async)
                         apr_time_t timeout = (session->open_streams == 0)?
                             session->s->keep_alive_timeout :
                             session->s->timeout;
+                        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+                                      H2_SSSN_MSG(session, "polling timeout=%d"),
+                                      (int)apr_time_sec(timeout));
                         status = h2_mplx_c1_poll(session->mplx, timeout,
                                                  on_stream_input,
                                                  on_stream_output, session);
@@ -1922,10 +1904,16 @@ apr_status_t h2_session_process(h2_session *session, int async)
             if (session->open_streams == 0) {
                 h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
                                           0, "streams really done");
+                if (session->state != H2_SESSION_ST_WAIT) {
+                    break;
+                }
             }
             /* No IO happening and input is exhausted. Make sure we have
              * flushed any possibly pending output and then wait with
              * the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+                          H2_SSSN_MSG(session, "polling timeout=%d, open_streams=%d"),
+                          (int)apr_time_sec(session->s->timeout), session->open_streams);
             status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
                                      on_stream_input, on_stream_output, session);
             if (APR_STATUS_IS_TIMEUP(status)) {
index abcbce355ce3960c13956292e81eac935c155806..d915b5295c1e884e1d827ddfafa80571db05843b 100644 (file)
@@ -196,33 +196,45 @@ static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
     }
 }
 
-apr_status_t h2_stream_setup_input(h2_stream *stream)
+static void stream_setup_input(h2_stream *stream)
 {
-    /* already done? */
-    if (stream->input != NULL) goto cleanup;
-
+    if (stream->input != NULL) return;
+    ap_assert(!stream->input_closed);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
                   H2_STRM_MSG(stream, "setup input beam"));
     h2_beam_create(&stream->input, stream->session->c1,
                    stream->pool, stream->id,
                    "input", 0, stream->session->s->timeout);
-cleanup:
+}
+
+apr_status_t h2_stream_prepare_processing(h2_stream *stream)
+{
+    /* Right before processing starts, last chance to decide if
+     * there is need to an input beam. */
+    if (!stream->input_closed) {
+        stream_setup_input(stream);
+    }
     return APR_SUCCESS;
 }
 
+static int input_buffer_is_empty(h2_stream *stream)
+{
+    return !stream->in_buffer || APR_BRIGADE_EMPTY(stream->in_buffer);
+}
+
 static apr_status_t input_flush(h2_stream *stream)
 {
     apr_status_t status = APR_SUCCESS;
     apr_off_t written;
 
-    if (!stream->in_buffer) goto cleanup;
+    if (input_buffer_is_empty(stream)) goto cleanup;
 
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
                   H2_STRM_MSG(stream, "flush input"));
     status = h2_beam_send(stream->input, stream->session->c1,
                           stream->in_buffer, APR_BLOCK_READ, &written);
     stream->in_last_write = apr_time_now();
-    if (APR_SUCCESS != status && stream->state == H2_SS_CLOSED_L) {
+    if (APR_SUCCESS != status && h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1,
                       H2_STRM_MSG(stream, "send input error"));
         h2_stream_dispatch(stream, H2_SEV_IN_ERROR);
@@ -234,6 +246,7 @@ cleanup:
 static void input_append_bucket(h2_stream *stream, apr_bucket *b)
 {
     if (!stream->in_buffer) {
+        stream_setup_input(stream);
         stream->in_buffer = apr_brigade_create(
             stream->pool, stream->session->c1->bucket_alloc);
     }
@@ -243,6 +256,7 @@ static void input_append_bucket(h2_stream *stream, apr_bucket *b)
 static void input_append_data(h2_stream *stream, const char *data, apr_size_t len)
 {
     if (!stream->in_buffer) {
+        stream_setup_input(stream);
         stream->in_buffer = apr_brigade_create(
             stream->pool, stream->session->c1->bucket_alloc);
     }
@@ -278,12 +292,14 @@ static apr_status_t close_input(h2_stream *stream)
     }
 
     stream->input_closed = 1;
-    b = apr_bucket_eos_create(c->bucket_alloc);
-    input_append_bucket(stream, b);
-    input_flush(stream);
-    h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
-                  H2_STRM_MSG(stream, "input flush + EOS"));
+    if (stream->input) {
+        b = apr_bucket_eos_create(c->bucket_alloc);
+        input_append_bucket(stream, b);
+        input_flush(stream);
+        h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+                      H2_STRM_MSG(stream, "input flush + EOS"));
+    }
 
 cleanup:
     return rv;
@@ -467,7 +483,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
             
         case NGHTTP2_HEADERS:
             eos = (flags & NGHTTP2_FLAG_END_STREAM);
-            if (stream->state == H2_SS_OPEN) {
+            if (h2_stream_is_at_or_past(stream, H2_SS_OPEN)) {
                 /* trailer HEADER */
                 if (!eos) {
                     h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
@@ -546,7 +562,6 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
                 stream->session->ngh2, stream->id);
     }
 #endif
-    h2_stream_setup_input(stream);
     ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
                   H2_STRM_LOG(APLOGNO(03082), stream, "created"));
     on_state_enter(stream);
@@ -776,8 +791,10 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
     int is_http_or_https;
     h2_request *req = stream->rtmp;
 
-    status = h2_request_end_headers(req, stream->pool, eos, raw_bytes);
-    if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) goto cleanup;
+    status = h2_request_end_headers(req, stream->pool, raw_bytes);
+    if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) {
+        goto cleanup;
+    }
 
     /* keep on returning APR_SUCCESS for error responses, so that we
      * send it and do not RST the stream.
@@ -903,6 +920,23 @@ static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb)
     return NULL;
 }
 
+static void stream_do_error_bucket(h2_stream *stream, apr_bucket *b)
+{
+    int err = ((ap_bucket_error *)(b->data))->status;
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+                  H2_STRM_MSG(stream, "error bucket received, err=%d"), err);
+    if (err >= 500) {
+        err = NGHTTP2_INTERNAL_ERROR;
+    }
+    else if (err >= 400) {
+        err = NGHTTP2_STREAM_CLOSED;
+    }
+    else {
+        err = NGHTTP2_PROTOCOL_ERROR;
+    }
+    h2_stream_rst(stream, err);
+}
+
 static apr_status_t buffer_output_receive(h2_stream *stream)
 {
     apr_status_t rv = APR_EAGAIN;
@@ -913,6 +947,10 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
     if (!stream->output) {
         goto cleanup;
     }
+    if (stream->rst_error) {
+        rv = APR_ECONNRESET;
+        goto cleanup;
+    }
 
     if (!stream->out_buffer) {
         stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc);
@@ -951,7 +989,6 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
                 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
                               H2_STRM_MSG(stream, "out_buffer, receive unsuccessful"));
             }
-            goto cleanup;
         }
     }
 
@@ -968,6 +1005,10 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
                 else if (APR_BUCKET_IS_EOS(b)) {
                     stream->output_eos = 1;
                 }
+                else if (AP_BUCKET_IS_ERROR(b)) {
+                    stream_do_error_bucket(stream, b);
+                    break;
+                }
             }
             else if (b->length == 0) {  /* zero length data */
                 APR_BUCKET_REMOVE(b);
@@ -1008,215 +1049,66 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
     return rv;
 }
 
-static apr_status_t buffer_output_process_headers(h2_stream *stream)
+static apr_status_t stream_do_trailers(h2_stream *stream)
 {
     conn_rec *c1 = stream->session->c1;
-    apr_status_t rv = APR_EAGAIN;
-    int ngrv = 0, is_empty;
+    int ngrv;
     h2_ngheader *nh = NULL;
     apr_bucket *b, *e;
 #if AP_HAS_RESPONSE_BUCKETS
-    ap_bucket_response *resp = NULL;
     ap_bucket_headers *headers = NULL;
 #else
-    h2_headers *headers = NULL, *resp = NULL;
+    h2_headers *headers = NULL;
 #endif
+    apr_status_t rv;
 
-    if (!stream->out_buffer) goto cleanup;
+    ap_assert(stream->response);
+    ap_assert(stream->out_buffer);
 
     b = APR_BRIGADE_FIRST(stream->out_buffer);
     while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
         e = APR_BUCKET_NEXT(b);
         if (APR_BUCKET_IS_METADATA(b)) {
 #if AP_HAS_RESPONSE_BUCKETS
-            if (AP_BUCKET_IS_RESPONSE(b)) {
-                resp = b->data;
-                APR_BUCKET_REMOVE(b);
-                apr_bucket_destroy(b);
-                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
-                              H2_STRM_MSG(stream, "process response %d"),
-                              resp->status);
-                b = e;
-                break;
-            }
-            else if (AP_BUCKET_IS_HEADERS(b)) {
+            if (AP_BUCKET_IS_HEADERS(b)) {
                 headers = b->data;
-                APR_BUCKET_REMOVE(b);
-                apr_bucket_destroy(b);
-                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
-                              H2_STRM_MSG(stream, "process headers"));
-                b = e;
-                break;
-            }
 #else /* AP_HAS_RESPONSE_BUCKETS */
             if (H2_BUCKET_IS_HEADERS(b)) {
                 headers = h2_bucket_headers_get(b);
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
                 APR_BUCKET_REMOVE(b);
                 apr_bucket_destroy(b);
                 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
-                              H2_STRM_MSG(stream, "process headers, response %d"),
-                              headers->status);
-                if (!stream->response) {
-                    resp = headers;
-                    headers = NULL;
-                }
-                b = e;
+                              H2_STRM_MSG(stream, "process trailers"));
+                break;
+            }
+            else if (APR_BUCKET_IS_EOS(b)) {
                 break;
             }
-#endif /* else AP_HAS_RESPONSE_BUCKETS */
         }
         else {
-            if (!stream->response) {
-                /* data buckets before response headers, an error */
-                rv = APR_EINVAL;
-            }
-            /* data bucket, need to send those before processing
-             * any subsequent headers (trailers) */
-            goto cleanup;
+            break;
         }
         b = e;
     }
 
-    if (resp) {
-        nghttp2_data_provider provider, *pprovider = NULL;
-
-        if (resp->status < 100) {
-            h2_stream_rst(stream, resp->status);
-            goto cleanup;
-        }
-
-        if (resp->status == HTTP_FORBIDDEN && resp->notes) {
-            const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
-            if (cause) {
-                /* This request triggered a TLS renegotiation that is not allowed
-                 * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
-                 */
-                ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
-                              H2_STRM_LOG(APLOGNO(03061), stream,
-                              "renegotiate forbidden, cause: %s"), cause);
-                h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
-                goto cleanup;
-            }
-        }
-
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
-                      H2_STRM_LOG(APLOGNO(03073), stream,
-                      "submit response %d"), resp->status);
-
-        /* If this stream is not a pushed one itself,
-         * and HTTP/2 server push is enabled here,
-         * and the response HTTP status is not sth >= 400,
-         * and the remote side has pushing enabled,
-         * -> find and perform any pushes on this stream
-         *    *before* we submit the stream response itself.
-         *    This helps clients avoid opening new streams on Link
-         *    resp that get pushed right afterwards.
-         *
-         * *) the response code is relevant, as we do not want to
-         *    make pushes on 401 or 403 codes and friends.
-         *    And if we see a 304, we do not push either
-         *    as the client, having this resource in its cache, might
-         *    also have the pushed ones as well.
-         */
-        if (!stream->initiated_on
-            && !stream->response
-            && stream->request && stream->request->method
-            && !strcmp("GET", stream->request->method)
-            && (resp->status < 400)
-            && (resp->status != 304)
-            && h2_session_push_enabled(stream->session)) {
-            /* PUSH is possible and enabled on server, unless the request
-             * denies it, submit resources to push */
-            const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
-            if (!s || strcmp(s, "0")) {
-                h2_stream_submit_pushes(stream, resp);
-            }
-        }
-
-        if (!stream->pref_priority) {
-            stream->pref_priority = h2_stream_get_priority(stream, resp);
-        }
-        h2_session_set_prio(stream->session, stream, stream->pref_priority);
-
-        if (resp->status == 103
-            && !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
-            /* suppress sending this to the client, it might have triggered
-             * pushes and served its purpose nevertheless */
-            rv = APR_SUCCESS;
-            goto cleanup;
-        }
-        if (resp->status >= 200) {
-            stream->response = resp;
-        }
-
-        /* Do we know if this stream has no response body? */
-        is_empty = 0;
-        while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
-            if (APR_BUCKET_IS_METADATA(b)) {
-#if AP_HAS_RESPONSE_BUCKETS
-                if (AP_BUCKET_IS_HEADERS(b)) {
-                    break;
-                }
-#else
-                if (H2_BUCKET_IS_HEADERS(b)) {
-                    break;
-                }
-#endif
-                else if (APR_BUCKET_IS_EOS(b)) {
-                    is_empty = 1;
-                    break;
-                }
-            }
-            else {  /* data, not empty */
-                break;
-            }
-            b = APR_BUCKET_NEXT(b);
-        }
-
-        if (!is_empty) {
-            memset(&provider, 0, sizeof(provider));
-            provider.source.fd = stream->id;
-            provider.read_callback = stream_data_cb;
-            pprovider = &provider;
-        }
-
-        rv = h2_res_create_ngheader(&nh, stream->pool, resp);
-        if (APR_SUCCESS != rv) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
-                          H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
-            h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
-            goto cleanup;
-        }
-        ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
-                                       nh->nv, nh->nvlen, pprovider);
-        if (stream->initiated_on) {
-            ++stream->session->pushes_submitted;
-        }
-        else {
-            ++stream->session->responses_submitted;
-        }
+    if (!headers) {
+        rv = APR_EAGAIN;
+        goto cleanup;
     }
-    else if (headers) {
-        if (!stream->response) {
-            h2_stream_rst(stream, HTTP_INTERNAL_SERVER_ERROR);
-            goto cleanup;
-        }
-        rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
-        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
-                      H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
-                      (int)nh->nvlen);
-        if (APR_SUCCESS != rv) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
-                          H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
-            h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
-            goto cleanup;
-        }
 
-        ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
-        stream->sent_trailers = 1;
+    rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+                  H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+                  (int)nh->nvlen);
+    if (APR_SUCCESS != rv) {
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+                      H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+        h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+        goto cleanup;
     }
 
-cleanup:
+    ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
     if (nghttp2_is_fatal(ngrv)) {
         rv = APR_EGENERAL;
         h2_session_dispatch_event(stream->session,
@@ -1225,6 +1117,9 @@ cleanup:
                       APLOGNO(02940) "submit_response: %s",
                       nghttp2_strerror(rv));
     }
+    stream->sent_trailers = 1;
+
+cleanup:
     return rv;
 }
 
@@ -1290,12 +1185,26 @@ int h2_stream_is_ready(h2_stream *stream)
     return 0;
 }
 
-int h2_stream_was_closed(const h2_stream *stream)
+int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state)
 {
-    switch (stream->state) {
+    return stream->state == state;
+}
+
+int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state)
+{
+    switch (state) {
+        case H2_SS_IDLE:
+            return 1; /* by definition */
+        case H2_SS_RSVD_R: /*fall through*/
+        case H2_SS_RSVD_L: /*fall through*/
+        case H2_SS_OPEN:
+            return stream->state == state || stream->state >= H2_SS_OPEN;
+        case H2_SS_CLOSED_R: /*fall through*/
+        case H2_SS_CLOSED_L: /*fall through*/
         case H2_SS_CLOSED:
+            return stream->state == state || stream->state >= H2_SS_CLOSED;
         case H2_SS_CLEANUP:
-            return 1;
+            return stream->state == state;
         default:
             return 0;
     }
@@ -1415,35 +1324,25 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
     apr_status_t rv;
     h2_stream *stream;
 
-    /* nghttp2 wants to send more DATA for the stream. We need
-     * to find out how much of the requested length we can send without
-     * blocking.
-     * Indicate EOS when we encounter it or DEFERRED if the stream
-     * should be suspended. Beware of trailers.
-     */
+    /* nghttp2 wants to send more DATA for the stream.
+     * we should have submitted the final response at this time
+     * after receiving output via stream_do_responses() */
     ap_assert(session);
     (void)ng2s;
     (void)buf;
     (void)source;
     stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
-    if (!stream || !stream->output) {
+
+    if (!stream) {
         ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1,
                       APLOGNO(02937)
                       H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
         return NGHTTP2_ERR_CALLBACK_FAILURE;
     }
-    if (!stream->response) {
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
-                      APLOGNO(10299)
-                      H2_SSSN_STRM_MSG(session, stream_id, "data_cb, no response seen yet"));
+    if (!stream->output || !stream->response || !stream->out_buffer) {
         return NGHTTP2_ERR_DEFERRED;
     }
     if (stream->rst_error) {
-        return NGHTTP2_ERR_CALLBACK_FAILURE;
-    }
-    if (!stream->out_buffer) {
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
-                      H2_SSSN_STRM_MSG(session, stream_id, "suspending"));
         return NGHTTP2_ERR_DEFERRED;
     }
     if (h2_c1_io_needs_flush(&session->io)) {
@@ -1463,68 +1362,76 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
         }
     }
 
-    /* How much data do we have in our buffers that we can write? */
-check_and_receive:
+    /* How much data do we have in our buffers that we can write?
+     * if not enough, receive more. */
     buf_len = output_data_buffered(stream, &eos, &header_blocked);
-    while (buf_len < (apr_off_t)length && !eos && !header_blocked) {
+    if (buf_len < (apr_off_t)length && !eos
+           && !header_blocked && !stream->rst_error) {
         /* read more? */
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
                       H2_SSSN_STRM_MSG(session, stream_id,
                       "need more (read len=%ld, %ld in buffer)"),
                       (long)length, (long)buf_len);
         rv = buffer_output_receive(stream);
-        if (APR_EOF == rv) {
-            eos = 1;
-            rv = APR_SUCCESS;
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+                      H2_SSSN_STRM_MSG(session, stream_id,
+                      "buffer_output_received"));
+        if (APR_STATUS_IS_EAGAIN(rv)) {
+            /* currently, no more is available */
         }
-
-        if (APR_SUCCESS == rv) {
-            /* re-assess */
+        else if (APR_SUCCESS == rv) {
+            /* got some, re-assess */
             buf_len = output_data_buffered(stream, &eos, &header_blocked);
         }
-        else if (APR_STATUS_IS_EAGAIN(rv)) {
-            /* currently, no more is available */
-            break;
+        else if (APR_EOF == rv) {
+            if (!stream->output_eos) {
+                /* Seeing APR_EOF without an EOS bucket received before indicates
+                 * that stream output is incomplete. Commonly, we expect to see
+                 * an ERROR bucket to have been generated. But faulty handlers
+                 * may not have generated one.
+                 * We need to RST the stream bc otherwise the client thinks
+                 * it is all fine. */
+                 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+                               H2_SSSN_STRM_MSG(session, stream_id, "rst stream"));
+                 h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+                 return NGHTTP2_ERR_CALLBACK_FAILURE;
+            }
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+                          H2_SSSN_STRM_MSG(session, stream_id,
+                          "eof on receive (read len=%ld, %ld in buffer)"),
+                          (long)length, (long)buf_len);
+            eos = 1;
+            rv = APR_SUCCESS;
         }
-        else if (APR_SUCCESS != rv) {
+        else {
             ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
                           H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
             return NGHTTP2_ERR_CALLBACK_FAILURE;
         }
     }
 
+    if (stream->rst_error) {
+        return NGHTTP2_ERR_DEFERRED;
+    }
+
     if (buf_len == 0 && header_blocked) {
-        /* we are blocked from having data to send by a HEADER bucket sitting
-         * at buffer start. Send it and check again what DATA we can send. */
-        rv = buffer_output_process_headers(stream);
-        if (APR_SUCCESS == rv) {
-            goto check_and_receive;
-        }
-        else if (APR_STATUS_IS_EAGAIN(rv)) {
-            /* unable to send the HEADER at this time. */
-            eos = 0;
-            goto cleanup;
-        }
-        else {
+        rv = stream_do_trailers(stream);
+        if (APR_SUCCESS != rv && !APR_STATUS_IS_EAGAIN(rv)) {
             ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
                           H2_STRM_LOG(APLOGNO(10300), stream,
-                          "data_cb, error processing headers"));
+                          "data_cb, error processing trailers"));
             return NGHTTP2_ERR_CALLBACK_FAILURE;
         }
+        length = 0;
+        eos = 0;
     }
-
-    if (buf_len > (apr_off_t)length) {
+    else if (buf_len > (apr_off_t)length) {
         eos = 0;  /* Any EOS we have in the buffer does not apply yet */
     }
     else {
         length = (size_t)buf_len;
     }
 
-    if (stream->sent_trailers) {
-        /* We already sent trailers and will/can not send more DATA. */
-        eos = 0;
-    }
-
     if (length) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
                       H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"),
@@ -1538,14 +1445,208 @@ check_and_receive:
         return NGHTTP2_ERR_DEFERRED;
     }
 
-cleanup:
     if (eos) {
         *data_flags |= NGHTTP2_DATA_FLAG_EOF;
     }
     return length;
 }
 
-apr_status_t h2_stream_read_output(h2_stream *stream)
+static apr_status_t stream_do_response(h2_stream *stream)
+{
+    conn_rec *c1 = stream->session->c1;
+    apr_status_t rv = APR_EAGAIN;
+    int ngrv, is_empty = 0;
+    h2_ngheader *nh = NULL;
+    apr_bucket *b, *e;
+#if AP_HAS_RESPONSE_BUCKETS
+    ap_bucket_response *resp = NULL;
+#else
+    h2_headers *resp = NULL;
+#endif
+    nghttp2_data_provider provider, *pprovider = NULL;
+
+    ap_assert(!stream->response);
+    ap_assert(stream->out_buffer);
+
+    b = APR_BRIGADE_FIRST(stream->out_buffer);
+    while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+        e = APR_BUCKET_NEXT(b);
+        if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
+            if (AP_BUCKET_IS_RESPONSE(b)) {
+                resp = b->data;
+#else /* AP_HAS_RESPONSE_BUCKETS */
+            if (H2_BUCKET_IS_HEADERS(b)) {
+                resp = h2_bucket_headers_get(b);
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+                APR_BUCKET_REMOVE(b);
+                apr_bucket_destroy(b);
+                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+                              H2_STRM_MSG(stream, "process response %d"),
+                              resp->status);
+                is_empty = (e != APR_BRIGADE_SENTINEL(stream->out_buffer)
+                            && APR_BUCKET_IS_EOS(e));
+                break;
+            }
+            else if (APR_BUCKET_IS_EOS(b)) {
+                h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+                rv = APR_EINVAL;
+                goto cleanup;
+            }
+            else if (AP_BUCKET_IS_ERROR(b)) {
+                stream_do_error_bucket(stream, b);
+                rv = APR_EINVAL;
+                goto cleanup;
+            }
+        }
+        else {
+            /* data buckets before response headers, an error */
+            h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+            rv = APR_EINVAL;
+            goto cleanup;
+        }
+        b = e;
+    }
+
+    if (!resp) {
+        rv = APR_EAGAIN;
+        goto cleanup;
+    }
+
+    if (resp->status < 100) {
+        h2_stream_rst(stream, resp->status);
+        goto cleanup;
+    }
+
+    if (resp->status == HTTP_FORBIDDEN && resp->notes) {
+        const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
+        if (cause) {
+            /* This request triggered a TLS renegotiation that is not allowed
+             * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+             */
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
+                          H2_STRM_LOG(APLOGNO(03061), stream,
+                          "renegotiate forbidden, cause: %s"), cause);
+            h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
+            goto cleanup;
+        }
+    }
+
+    ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+                  H2_STRM_LOG(APLOGNO(03073), stream,
+                  "submit response %d"), resp->status);
+
+    /* If this stream is not a pushed one itself,
+     * and HTTP/2 server push is enabled here,
+     * and the response HTTP status is not sth >= 400,
+     * and the remote side has pushing enabled,
+     * -> find and perform any pushes on this stream
+     *    *before* we submit the stream response itself.
+     *    This helps clients avoid opening new streams on Link
+     *    resp that get pushed right afterwards.
+     *
+     * *) the response code is relevant, as we do not want to
+     *    make pushes on 401 or 403 codes and friends.
+     *    And if we see a 304, we do not push either
+     *    as the client, having this resource in its cache, might
+     *    also have the pushed ones as well.
+     */
+    if (!stream->initiated_on
+        && !stream->response
+        && stream->request && stream->request->method
+        && !strcmp("GET", stream->request->method)
+        && (resp->status < 400)
+        && (resp->status != 304)
+        && h2_session_push_enabled(stream->session)) {
+        /* PUSH is possible and enabled on server, unless the request
+         * denies it, submit resources to push */
+        const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
+        if (!s || strcmp(s, "0")) {
+            h2_stream_submit_pushes(stream, resp);
+        }
+    }
+
+    if (!stream->pref_priority) {
+        stream->pref_priority = h2_stream_get_priority(stream, resp);
+    }
+    h2_session_set_prio(stream->session, stream, stream->pref_priority);
+
+    if (resp->status == 103
+        && !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
+        /* suppress sending this to the client, it might have triggered
+         * pushes and served its purpose nevertheless */
+        rv = APR_SUCCESS;
+        goto cleanup;
+    }
+    if (resp->status >= 200) {
+        stream->response = resp;
+    }
+
+    if (!is_empty) {
+        memset(&provider, 0, sizeof(provider));
+        provider.source.fd = stream->id;
+        provider.read_callback = stream_data_cb;
+        pprovider = &provider;
+    }
+
+    rv = h2_res_create_ngheader(&nh, stream->pool, resp);
+    if (APR_SUCCESS != rv) {
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+                      H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
+        h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+        goto cleanup;
+    }
+
+    ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
+                                   nh->nv, nh->nvlen, pprovider);
+    if (nghttp2_is_fatal(ngrv)) {
+        rv = APR_EGENERAL;
+        h2_session_dispatch_event(stream->session,
+                                 H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv));
+        ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+                      APLOGNO(02940) "submit_response: %s",
+                      nghttp2_strerror(rv));
+        goto cleanup;
+    }
+
+    if (stream->initiated_on) {
+        ++stream->session->pushes_submitted;
+    }
+    else {
+        ++stream->session->responses_submitted;
+    }
+
+cleanup:
+    return rv;
+}
+
+static void stream_do_responses(h2_stream *stream)
+{
+    h2_session *session = stream->session;
+    conn_rec *c1 = session->c1;
+    apr_status_t rv;
+
+    ap_assert(!stream->response);
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+                  H2_STRM_MSG(stream, "do_response"));
+    rv = buffer_output_receive(stream);
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+                  H2_SSSN_STRM_MSG(session, stream->id,
+                  "buffer_output_received2"));
+    if (APR_SUCCESS != rv && APR_EAGAIN != rv) {
+        h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+    }
+    else {
+        /* process all headers sitting at the buffer head. */
+        do {
+            rv = stream_do_response(stream);
+        } while (APR_SUCCESS == rv
+                 && !stream->rst_error
+                 && !stream->response);
+    }
+}
+
+void h2_stream_on_output_change(h2_stream *stream)
 {
     conn_rec *c1 = stream->session->c1;
     apr_status_t rv = APR_EAGAIN;
@@ -1556,47 +1657,56 @@ apr_status_t h2_stream_read_output(h2_stream *stream)
         /* c2 has not assigned the output beam to the stream (yet). */
         ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
                       H2_STRM_MSG(stream, "read_output, no output beam registered"));
-        rv = APR_EAGAIN;
-        goto cleanup;
     }
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
-                  H2_STRM_MSG(stream, "read_output"));
-
-    if (h2_stream_was_closed(stream)) {
+    else if (h2_stream_is_at_or_past(stream, H2_SS_CLOSED)) {
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
                       H2_STRM_LOG(APLOGNO(10301), stream, "already closed"));
-        rv = APR_EOF;
-        goto cleanup;
     }
-    else if (stream->state == H2_SS_CLOSED_L) {
+    else if (h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
         /* We have delivered a response to a stream that was not closed
          * by the client. This could be a POST with body that we negate
          * and we need to RST_STREAM to end if. */
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
                       H2_STRM_LOG(APLOGNO(10313), stream, "remote close missing"));
-        nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
-                                  stream->id, NGHTTP2_NO_ERROR);
-        rv = APR_EOF;
-        goto cleanup;
+        h2_stream_rst(stream, H2_ERR_NO_ERROR);
     }
-
-    rv = buffer_output_receive(stream);
-    if (APR_SUCCESS != rv && APR_EAGAIN != rv) goto cleanup;
-
-    /* process all headers sitting at the buffer head. */
-    while (1) {
-        rv = buffer_output_process_headers(stream);
-        if (APR_EAGAIN == rv) {
-            rv = APR_SUCCESS;
-            break;
+    else {
+        /* stream is not closed, a change in output happened. There are
+         * two modes of operation here:
+         * 1) the final response has been submitted. nghttp2 is invoking
+         *    stream_data_cb() to progress the stream. This handles DATA,
+         *    trailers, EOS and ERRORs.
+         *    When stream_data_cb() runs out of things to send, it returns
+         *    NGHTTP2_ERR_DEFERRED and nghttp2 *suspends* further processing
+         *    until we tell it to resume.
+         * 2) We have not seen the *final* response yet. The stream can not
+         *    send any response DATA. The nghttp2 stream_data_cb() is not
+         *    invoked. We need to receive output, expecting not DATA but
+         *    RESPONSEs (intermediate may arrive) and submit those. On
+         *    the final response, nghttp2 will start calling stream_data_cb().
+         */
+        if (stream->response) {
+            nghttp2_session_resume_data(stream->session->ngh2, stream->id);
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+                          H2_STRM_MSG(stream, "resumed"));
+        }
+        else {
+            stream_do_responses(stream);
+            if (!stream->rst_error) {
+                nghttp2_session_resume_data(stream->session->ngh2, stream->id);
+            }
         }
-        if (APR_SUCCESS != rv) goto cleanup;
     }
+}
 
-    nghttp2_session_resume_data(stream->session->ngh2, stream->id);
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
-                  H2_STRM_MSG(stream, "resumed"));
-
-cleanup:
-    return rv;
+void h2_stream_on_input_change(h2_stream *stream)
+{
+    ap_assert(stream->input);
+    h2_beam_report_consumption(stream->input);
+    if (h2_stream_is_at(stream, H2_SS_CLOSED_L)
+        && !h2_mplx_c1_stream_is_running(stream->session->mplx, stream)) {
+        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1,
+                      H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+        h2_stream_rst(stream, H2_ERR_NO_ERROR);
+    }
 }
index 5b5ef35c51c0af6e1ddf56ee4b624a8c5180a5a0..695d56ac5e1cf0797ff16fc4c7000cb29b32dc07 100644 (file)
@@ -87,7 +87,7 @@ struct h2_stream {
     apr_bucket_brigade *in_buffer;
     int in_window_size;
     apr_time_t in_last_write;
-
+    
     struct h2_bucket_beam *output;
     apr_bucket_brigade *out_buffer;
 
@@ -100,7 +100,7 @@ struct h2_stream {
     unsigned int output_eos : 1; /* output EOS in buffer/sent */
 
     conn_rec *c2;               /* connection processing stream */
-
+    
     const h2_priority *pref_priority; /* preferred priority for this stream */
     apr_off_t out_frames;       /* # of frames sent out */
     apr_off_t out_frame_octets; /* # of RAW frame octets sent out */
@@ -109,7 +109,7 @@ struct h2_stream {
     apr_off_t in_data_frames;   /* # of DATA frames received */
     apr_off_t in_data_octets;   /* # of DATA octets (payload) received */
     apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */
-
+    
     h2_stream_monitor *monitor; /* optional monitor for stream states */
 };
 
@@ -121,13 +121,13 @@ struct h2_stream {
  * @param id      the stream identifier
  * @param pool    the memory pool to use for this stream
  * @param session the session this stream belongs to
- * @param monitor an optional monitor to be called for events and
+ * @param monitor an optional monitor to be called for events and 
  *                state transisitions
  * @param initiated_on the id of the stream this one was initiated on (PUSH)
  *
  * @return the newly opened stream
  */
-h2_stream *h2_stream_create(int id, apr_pool_t *pool,
+h2_stream *h2_stream_create(int id, apr_pool_t *pool, 
                             struct h2_session *session,
                             h2_stream_monitor *monitor,
                             int initiated_on);
@@ -138,9 +138,9 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool,
 void h2_stream_destroy(h2_stream *stream);
 
 /**
- * Setup the input for the stream.
+ * Perform any late initialization before stream starts processing.
  */
-apr_status_t h2_stream_setup_input(h2_stream *stream);
+apr_status_t h2_stream_prepare_processing(h2_stream *stream);
 
 /*
  * Set a new monitor for this stream, replacing any existing one. Can
@@ -155,6 +155,22 @@ void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor);
  */
 void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
 
+/**
+ * Determine if stream is at given state.
+ * @param stream the stream to check
+ * @param state the state to look for
+ * @return != 0 iff stream is at given state.
+ */
+int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state);
+
+/**
+ * Determine if stream is reached given state or is past this state.
+ * @param stream the stream to check
+ * @param state the state to look for
+ * @return != 0 iff stream is at or past given state.
+ */
+int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state);
+
 /**
  * Cleanup references into requst processing.
  *
@@ -170,7 +186,7 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount);
 
 /**
  * Set complete stream headers from given h2_request.
- *
+ * 
  * @param stream stream to write request to
  * @param r the request with all the meta data
  * @param eos != 0 iff stream input is closed
@@ -179,16 +195,16 @@ void h2_stream_set_request(h2_stream *stream, const h2_request *r);
 
 /**
  * Set complete stream header from given request_rec.
- *
+ * 
  * @param stream stream to write request to
  * @param r the request with all the meta data
  * @param eos != 0 iff stream input is closed
  */
-apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+apr_status_t h2_stream_set_request_rec(h2_stream *stream, 
                                        request_rec *r, int eos);
 
 /*
- * Add a HTTP/2 header (including pseudo headers) or trailer
+ * Add a HTTP/2 header (including pseudo headers) or trailer 
  * to the given stream, depending on stream state.
  *
  * @param stream stream to write the header to
@@ -200,7 +216,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
 apr_status_t h2_stream_add_header(h2_stream *stream,
                                   const char *name, size_t nlen,
                                   const char *value, size_t vlen);
-
+                                  
 /* End the construction of request headers */
 apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
 
@@ -228,24 +244,20 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
 void h2_stream_rst(h2_stream *stream, int error_code);
 
 /**
- * Determine if stream was closed already. This is true for
- * states H2_SS_CLOSED, H2_SS_CLEANUP. But not true
- * for H2_SS_CLOSED_L and H2_SS_CLOSED_R.
- *
- * @param stream the stream to check on
- * @return != 0 iff stream has been closed
+ * Stream input signals change. Take necessary actions.
+ * @param stream the stream to read output for
  */
-int h2_stream_was_closed(const h2_stream *stream);
+void h2_stream_on_input_change(h2_stream *stream);
 
 /**
- * Inspect the c2 output for response(s) and data.
+ * Stream output signals change. Take necessary actions.
  * @param stream the stream to read output for
  */
-apr_status_t h2_stream_read_output(h2_stream *stream);
+void h2_stream_on_output_change(h2_stream *stream);
 
 /**
  * Read a maximum number of bytes into the bucket brigade.
- *
+ * 
  * @param stream the stream to read from
  * @param bb the brigade to append output to
  * @param plen (in-/out) max. number of bytes to append and on return actual
@@ -255,7 +267,7 @@ apr_status_t h2_stream_read_output(h2_stream *stream);
  *         APR_EAGAIN if not data is available and end of stream has not been
  *         reached yet.
  */
-apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, 
                                apr_off_t *plen, int *peos);
 
 /**
index 47b1309763123eb65ee0b65a799d7ec92c7a2c5d..f40bce1b8779fadd54fd1ccf7e96192381bce27d 100644 (file)
@@ -1881,4 +1881,4 @@ apr_size_t response_length_estimate(ap_bucket_response *resp)
     return len;
 }
 
-#endif /* AP_HAS_RESPONSE_BUCKETS */
\ No newline at end of file
+#endif /* AP_HAS_RESPONSE_BUCKETS */
index 1582fca8e341fe4cdaadcf23fb736876ee86bc4e..f6bd44bba66eadc3ff26f48a7fa76e9e41f6a8a7 100644 (file)
@@ -408,11 +408,11 @@ apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
 apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
                                     const struct h2_request *req);
 #else
-apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
-                                     struct h2_headers *headers);
-apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
-                                    struct h2_headers *headers);
-apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, 
+                                     struct h2_headers *headers); 
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, 
+                                    struct h2_headers *headers); 
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, 
                                     const struct h2_request *req);
 #endif
 
index f0da61ed405db2bf7debcefdb971df880f467384..c939b8c8af73efec03b88af753ebc3da25e74834 100644 (file)
@@ -27,7 +27,7 @@
  * @macro
  * Version number of the http2 module as c string
  */
-#define MOD_HTTP2_VERSION "2.0.8-dev"
+#define MOD_HTTP2_VERSION "2.0.10"
 
 /**
  * @macro
@@ -35,7 +35,7 @@
  * release. This is a 24 bit number with 8 bits for major number, 8 bits
  * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
  */
-#define MOD_HTTP2_VERSION_NUM 0x020008
+#define MOD_HTTP2_VERSION_NUM 0x02000a
 
 
 #endif /* mod_h2_h2_version_h */
index 215d8fa22c0b8d6be235de9eca264fc9eedadcf0..e7e2039b90a04a971e94bfa6cb81e708c157a864 100644 (file)
@@ -432,6 +432,7 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
 
     ap_assert(s);
     ap_assert(pchild);
+    ap_assert(idle_limit > 0);
 
     /* let's have our own pool that will be parent to all h2_worker
      * instances we create. This happens in various threads, but always
@@ -458,7 +459,7 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
     workers->pool = pool;
     workers->min_active = min_active;
     workers->max_slots = max_slots;
-    workers->idle_limit = (idle_limit > 0)? idle_limit : apr_time_from_sec(10);
+    workers->idle_limit = idle_limit;
     workers->dynamic = (workers->min_active < workers->max_slots);
 
     ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
index ddef2bee06d81f2381273d63b268da1cb284692d..8a1ee3faa5d0fa0b30a0a463e5ac1fee396b4ac1 100644 (file)
@@ -335,8 +335,7 @@ static int h2_h2_fixups(request_rec *r)
     if (r->connection->master) {
         h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
         unsigned int i;
-        apr_interval_time_t stream_timeout;
-        
+
         for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
             h2_var_def *vdef = &H2_VARS[i];
             if (vdef->subprocess) {
@@ -345,10 +344,6 @@ static int h2_h2_fixups(request_rec *r)
                                             r, ctx));
             }
         }
-        stream_timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
-        if (stream_timeout > 0) {
-            h2_conn_ctx_set_timeout(ctx, stream_timeout);
-        }
     }
     return DECLINED;
 }
index afba3bb32de60f510a8894d80dd30a9cb3b768eb..3faf03472bbe3c82b477af9835c3ba1dfc0fedfc 100644 (file)
@@ -249,7 +249,7 @@ static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
     ctx->r_done = 0;
     add_request(ctx->session, ctx->r);
     
-    while (!ctx->master->aborted && !ctx->r_done) {
+    while (!ctx->owner->aborted && !ctx->r_done) {
     
         status = h2_proxy_session_process(ctx->session);
         if (status != APR_SUCCESS) {
@@ -267,16 +267,13 @@ static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
     }
     
 out:
-    if (ctx->master->aborted) {
+    if (ctx->owner->aborted) {
         /* master connection gone */
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, 
                       APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
         /* cancel all ongoing requests */
         h2_proxy_session_cancel_all(ctx->session);
         h2_proxy_session_process(ctx->session);
-        if (!ctx->master->aborted) {
-            status = ctx->r_status = APR_SUCCESS;
-        }
     }
     
     ctx->session->user_data = NULL;
@@ -348,7 +345,7 @@ static int proxy_http2_handler(request_rec *r,
                   "H2: serving URL %s", url);
     
 run_connect:    
-    if (ctx->master->aborted) goto cleanup;
+    if (ctx->owner->aborted) goto cleanup;
 
     /* Get a proxy_conn_rec from the worker, might be a new one, might
      * be one still open from another request, or it might fail if the
@@ -400,10 +397,10 @@ run_connect:
                        "proxy-request-alpn-protos", "h2");
     }
 
-    if (ctx->master->aborted) goto cleanup;
+    if (ctx->owner->aborted) goto cleanup;
     status = ctx_run(ctx);
 
-    if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) {
+    if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->owner->aborted) {
         /* Not successfully processed, but may retry, tear down old conn and start over */
         if (ctx->p_conn) {
             ctx->p_conn->close = 1;
index 55dce591071c0192d2703759e6f01101f212212c..537d3bf37f21eb0c72caf6bdf8c90bb096e3bb29 100644 (file)
@@ -95,6 +95,9 @@ class H2TestEnv(HttpdTestEnv):
             'AH02429',  # invalid chars in response header names, see test_h2_200
             'AH02430',  # invalid chars in response header values, see test_h2_200
             'AH10373',  # SSL errors on uncompleted handshakes, see test_h2_105
+            'AH01247',  # mod_cgid sometimes freaks out on load tests
+            'AH01110',  # error by proxy reading response
+            'AH10400',  # warning that 'enablereuse' has not effect in certain configs test_h2_600
         ])
         self.httpd_error_log.add_ignored_patterns([
             re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
@@ -126,6 +129,9 @@ class H2Conf(HttpdConf):
                 "<Location \"/h2test/delay\">",
                 "    SetHandler h2test-delay",
                 "</Location>",
+                "<Location \"/h2test/error\">",
+                "    SetHandler h2test-error",
+                "</Location>",
             ]
         }))
 
diff --git a/test/modules/http2/htdocs/cgi/alive.json b/test/modules/http2/htdocs/cgi/alive.json
new file mode 100644 (file)
index 0000000..defe2c2
--- /dev/null
@@ -0,0 +1,4 @@
+{
+    "host" : "cgi",
+    "alive" : true
+}
index f9aed3f1a470851ec78ab02d9e7ba49673b78e4b..20974bfdd3f143dd3a0f49ab33f2b24bfad99305 100644 (file)
@@ -6,12 +6,15 @@ print("Content-Type: application/json")
 print()
 print("{")
 print("  \"https\" : \"%s\"," % (os.getenv('HTTPS', '')))
-print("  \"x_host\" : \"%s\"," % (os.getenv('X_HOST', '')))
-print("  \"host\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
+print("  \"host\" : \"%s\"," % (os.getenv('X_HOST', '') \
+    if 'X_HOST' in os.environ else os.getenv('SERVER_NAME', '')))
+print("  \"server\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
+print("  \"h2_original_host\" : \"%s\"," % (os.getenv('H2_ORIGINAL_HOST', '')))
 print("  \"port\" : \"%s\"," % (os.getenv('SERVER_PORT', '')))
 print("  \"protocol\" : \"%s\"," % (os.getenv('SERVER_PROTOCOL', '')))
 print("  \"ssl_protocol\" : \"%s\"," % (os.getenv('SSL_PROTOCOL', '')))
 print("  \"h2\" : \"%s\"," % (os.getenv('HTTP2', '')))
-print("  \"h2push\" : \"%s\"" % (os.getenv('H2PUSH', '')))
+print("  \"h2push\" : \"%s\"," % (os.getenv('H2PUSH', '')))
+print("  \"h2_stream_id\" : \"%s\"" % (os.getenv('H2_STREAM_ID', '')))
 print("}")
 
index 0b0f057e4a03d8bb09403181c5051a1951569d50..b5ee8ad6e4e2eba8062454012a4427e4cdf68018 100644 (file)
@@ -280,7 +280,7 @@ static int h2test_delay_handler(request_rec *r)
 
 cleanup:
     ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
-                  "delay_handler: request cleanup, r->status=%d, aborted=%d",
+                  "delay_handler: request cleanup, r->status=%d, aborte=%d",
                   r->status, c->aborted);
     if (rv == APR_SUCCESS
         || r->status != HTTP_OK
@@ -297,7 +297,6 @@ static int h2test_trailer_handler(request_rec *r)
     apr_bucket *b;
     apr_status_t rv;
     char buffer[8192];
-    int i, chunks = 3;
     long l;
     int body_len = 0;
 
@@ -345,7 +344,7 @@ static int h2test_trailer_handler(request_rec *r)
 
 cleanup:
     ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
-                  "trailer_handler: request cleanup, r->status=%d, aborted=%d",
+                  "trailer_handler: request cleanup, r->status=%d, aborte=%d",
                   r->status, c->aborted);
     if (rv == APR_SUCCESS
         || r->status != HTTP_OK
@@ -355,6 +354,154 @@ cleanup:
     return AP_FILTER_ERROR;
 }
 
+static int status_from_str(const char *s, apr_status_t *pstatus)
+{
+    if (!strcmp("timeout", s)) {
+        *pstatus = APR_TIMEUP;
+        return 1;
+    }
+    else if (!strcmp("reset", s)) {
+        *pstatus = APR_ECONNRESET;
+        return 1;
+    }
+    return 0;
+}
+
+static int h2test_error_handler(request_rec *r)
+{
+    conn_rec *c = r->connection;
+    apr_bucket_brigade *bb;
+    apr_bucket *b;
+    apr_status_t rv;
+    char buffer[8192];
+    int i, chunks = 3, error_bucket = 1;
+    long l;
+    apr_time_t delay = 0, body_delay = 0;
+    apr_array_header_t *args = NULL;
+    int http_status = 200;
+    apr_status_t error = APR_SUCCESS, body_error = APR_SUCCESS;
+
+    if (strcmp(r->handler, "h2test-error")) {
+        return DECLINED;
+    }
+    if (r->method_number != M_GET && r->method_number != M_POST) {
+        return DECLINED;
+    }
+
+    if (r->args) {
+        args = apr_cstr_split(r->args, "&", 1, r->pool);
+        for (i = 0; i < args->nelts; ++i) {
+            char *s, *val, *arg = APR_ARRAY_IDX(args, i, char*);
+            s = strchr(arg, '=');
+            if (s) {
+                *s = '\0';
+                val = s + 1;
+                if (!strcmp("status", arg)) {
+                    http_status = (int)apr_atoi64(val);
+                    if (val > 0) {
+                        continue;
+                    }
+                }
+                else if (!strcmp("error", arg)) {
+                    if (status_from_str(val, &error)) {
+                        continue;
+                    }
+                }
+                else if (!strcmp("error_bucket", arg)) {
+                    error_bucket = (int)apr_atoi64(val);
+                    if (val >= 0) {
+                        continue;
+                    }
+                }
+                else if (!strcmp("body_error", arg)) {
+                    if (status_from_str(val, &body_error)) {
+                        continue;
+                    }
+                }
+                else if (!strcmp("delay", arg)) {
+                    rv = duration_parse(&delay, r->args, "s");
+                    if (APR_SUCCESS == rv) {
+                        continue;
+                    }
+                }
+                else if (!strcmp("body_delay", arg)) {
+                    rv = duration_parse(&body_delay, r->args, "s");
+                    if (APR_SUCCESS == rv) {
+                        continue;
+                    }
+                }
+            }
+            ap_die(HTTP_BAD_REQUEST, r);
+            return OK;
+        }
+    }
+
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "error_handler: processing request, %s",
+                  r->args? r->args : "(no args)");
+    r->status = http_status;
+    r->clength = -1;
+    r->chunked = 1;
+    apr_table_unset(r->headers_out, "Content-Length");
+    /* Discourage content-encodings */
+    apr_table_unset(r->headers_out, "Content-Encoding");
+    apr_table_setn(r->subprocess_env, "no-brotli", "1");
+    apr_table_setn(r->subprocess_env, "no-gzip", "1");
+
+    ap_set_content_type(r, "application/octet-stream");
+    bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+    if (delay) {
+        apr_sleep(delay);
+    }
+    if (error != APR_SUCCESS) {
+        return ap_map_http_request_error(error, HTTP_BAD_REQUEST);
+    }
+    /* flush response */
+    b = apr_bucket_flush_create(c->bucket_alloc);
+    APR_BRIGADE_INSERT_TAIL(bb, b);
+    rv = ap_pass_brigade(r->output_filters, bb);
+    if (APR_SUCCESS != rv) goto cleanup;
+
+    memset(buffer, 'X', sizeof(buffer));
+    l = sizeof(buffer);
+    for (i = 0; i < chunks; ++i) {
+        if (body_delay) {
+            apr_sleep(body_delay);
+        }
+        rv = apr_brigade_write(bb, NULL, NULL, buffer, l);
+        if (APR_SUCCESS != rv) goto cleanup;
+        rv = ap_pass_brigade(r->output_filters, bb);
+        if (APR_SUCCESS != rv) goto cleanup;
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+                      "error_handler: passed %ld bytes as response body", l);
+        if (body_error != APR_SUCCESS) {
+            rv = body_error;
+            goto cleanup;
+        }
+    }
+    /* we are done */
+    b = apr_bucket_eos_create(c->bucket_alloc);
+    APR_BRIGADE_INSERT_TAIL(bb, b);
+    rv = ap_pass_brigade(r->output_filters, bb);
+    apr_brigade_cleanup(bb);
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "error_handler: response passed");
+
+cleanup:
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
+                  "error_handler: request cleanup, r->status=%d, aborted=%d",
+                  r->status, c->aborted);
+    if (rv == APR_SUCCESS) {
+        return OK;
+    }
+    if (error_bucket) {
+        http_status = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+        b = ap_bucket_error_create(http_status, NULL, r->pool, c->bucket_alloc);
+        APR_BRIGADE_INSERT_TAIL(bb, b);
+        ap_pass_brigade(r->output_filters, bb);
+    }
+    return AP_FILTER_ERROR;
+}
+
 /* Install this module into the apache2 infrastructure.
  */
 static void h2test_hooks(apr_pool_t *pool)
@@ -375,5 +522,6 @@ static void h2test_hooks(apr_pool_t *pool)
     ap_hook_handler(h2test_echo_handler, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_handler(h2test_delay_handler, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_handler(h2test_trailer_handler, NULL, NULL, APR_HOOK_MIDDLE);
+    ap_hook_handler(h2test_error_handler, NULL, NULL, APR_HOOK_MIDDLE);
 }
 
index 30f18d3524ef7f31acd40eab558d90fb2c14f638..5928448d2a8b5398a23767cc53dd4546f62e82e4 100644 (file)
@@ -237,3 +237,31 @@ content-type: text/html
         assert r.response['status'] == 200
         assert 'date' in r.response['header']
         assert 'server' in r.response['header']
+
+    # lets do some error tests
+    def test_h2_003_70(self, env):
+        url = env.mkurl("https", "cgi", "/h2test/error?status=500")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 500
+        url = env.mkurl("https", "cgi", "/h2test/error?error=timeout")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 408
+
+    # produce an error during response body
+    def test_h2_003_71(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout")
+        r = env.curl_get(url)
+        assert r.exit_code != 0, f"{r}"
+        url = env.mkurl("https", "cgi", "/h2test/error?body_error=reset")
+        r = env.curl_get(url)
+        assert r.exit_code != 0, f"{r}"
+
+    # produce an error, fail to generate an error bucket
+    def test_h2_003_72(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout&error_bucket=0")
+        r = env.curl_get(url)
+        assert r.exit_code != 0, f"{r}"
index dfa5f2dbc7741c7e5d41a954c5cc360aeb9186bb..13aa8ed07afd765eae8fc783bdd6805ba6c8518c 100644 (file)
@@ -146,4 +146,4 @@ class TestTimeout:
                     break
             piper.close()
             assert piper.response
-            assert piper.response['status'] == 408
+            assert piper.response['status'] == 408, f"{piper.response}"
index 8571955dd7ae89ac1f676ec027e31815701999b7..4b4fc42c788a76b01c3b537d52b68b6a7423d586 100644 (file)
@@ -86,7 +86,7 @@ class TestTrailers:
         url = env.mkurl("https", "cgi", "/h2test/trailer?0")
         r = env.nghttp().get(url)
         assert r.response["status"] == 200
-        assert len(r.response["body"]) == 0
+        assert len(r.response["body"]) == 0, f'{r.response["body"]}'
         assert 'trailer' in r.response
         assert 'trailer-content-length' in r.response['trailer']
         assert r.response['trailer']['trailer-content-length'] == '0'
diff --git a/test/modules/http2/test_203_rfc9113.py b/test/modules/http2/test_203_rfc9113.py
new file mode 100644 (file)
index 0000000..326462f
--- /dev/null
@@ -0,0 +1,42 @@
+import pytest
+
+from pyhttpd.env import HttpdTestEnv
+from .env import H2Conf
+
+
+class TestRfc9113:
+
+    @pytest.fixture(autouse=True, scope='class')
+    def _class_scope(self, env):
+        H2Conf(env).add_vhost_test1().install()
+        assert env.apache_restart() == 0
+
+    # by default, we ignore leading/trailing ws
+    # tests with leading ws are not present as curl seems to silently eat those
+    def test_h2_203_01_ws_ignore(self, env):
+        url = env.mkurl("https", "test1", "/")
+        r = env.curl_get(url, options=['-H', 'trailing-space: must not  '])
+        assert r.exit_code == 0, f'curl output: {r.stderr}'
+        assert r.response["status"] == 200, f'curl output: {r.stdout}'
+        r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
+        assert r.exit_code == 0, f'curl output: {r.stderr}'
+        assert r.response["status"] == 200, f'curl output: {r.stdout}'
+
+    # When enabled, leading/trailing make the stream RST
+    # tests with leading ws are not present as curl seems to silently eat those
+    def test_h2_203_02_ws_reject(self, env):
+        if not env.h2load_is_at_least('1.50.0'):
+            pytest.skip(f'need nghttp2 >= 1.50.0')
+        conf = H2Conf(env)
+        conf.add([
+            "H2HeaderStrictness rfc9113"
+        ])
+        conf.add_vhost_test1()
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "test1", "/")
+        r = env.curl_get(url, options=['-H', 'trailing-space: must not  '])
+        assert r.exit_code != 0, f'curl output: {r.stderr}'
+        r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
+        assert r.exit_code != 0, f'curl output: {r.stderr}'
+
index 1b851d308036e7c5e9a1e332d2ecd1134349a783..f73dcc4c8c8f6adc0f8454aa9645cefd5e54f87a 100644 (file)
@@ -26,7 +26,7 @@ class TestEarlyHints:
         assert env.apache_restart() == 0
 
     # H2EarlyHints enabled in general, check that it works for H2PushResource
-    def test_h2_401_31(self, env):
+    def test_h2_401_31(self, env, repeat):
         url = env.mkurl("https", "hints", "/006-hints.html")
         r = env.nghttp().get(url)
         assert r.response["status"] == 200
@@ -38,7 +38,7 @@ class TestEarlyHints:
         assert early["header"]["link"]
 
     # H2EarlyHints enabled in general, but does not trigger on added response headers
-    def test_h2_401_32(self, env):
+    def test_h2_401_32(self, env, repeat):
         url = env.mkurl("https", "hints", "/006-nohints.html")
         r = env.nghttp().get(url)
         assert r.response["status"] == 200
index 5eec0522635e4e77273ffb686cc2c00bd0f698f4..d10bcbcb0ced6598bcb2872aee9455a0c5be5e53 100644 (file)
@@ -126,3 +126,28 @@ class TestProxy:
     def test_h2_500_24(self, env):
         for i in range(100):
             self.nghttp_upload_stat(env, "data-1k", ["--no-content-length"])
+
+    # lets do some error tests
+    def test_h2_500_30(self, env):
+        url = env.mkurl("https", "cgi", "/proxy/h2test/error?status=500")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 500
+        url = env.mkurl("https", "cgi", "/proxy/h2test/error?error=timeout")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 408
+
+    # produce an error during response body
+    def test_h2_500_31(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout")
+        r = env.curl_get(url)
+        assert r.exit_code != 0, r
+
+    # produce an error, fail to generate an error bucket
+    def test_h2_500_32(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout&error_bucket=0")
+        r = env.curl_get(url)
+        assert r.exit_code != 0, r
index 0f368eda03ce8ff0d4fa1d6dd8a2c9c3b6c6f9cc..854195e4b9e543611610e67dd7eb670e7900821e 100644 (file)
@@ -23,7 +23,7 @@ class TestH2Proxy:
         assert r.response["json"]["ssl_protocol"] != ""
         assert r.response["json"]["h2"] == "on"
         assert r.response["json"]["h2push"] == "off"
-        assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
+        assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
 
     def test_h2_600_02(self, env):
         conf = H2Conf(env, extras={
@@ -42,7 +42,8 @@ class TestH2Proxy:
         assert r.response["json"]["protocol"] == "HTTP/2.0"
         assert r.response["json"]["https"] == ""
         # the proxied backend sees Host header as passed on front
-        assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
+        assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
+        assert r.response["json"]["h2_original_host"] == ""
 
     def test_h2_600_03(self, env):
         conf = H2Conf(env, extras={
@@ -61,4 +62,116 @@ class TestH2Proxy:
         assert r.response["json"]["protocol"] == "HTTP/2.0"
         assert r.response["json"]["https"] == ""
         # the proxied backend sees Host as using in connecting to it
-        assert r.response["json"]["x_host"] == f"127.0.0.1:{env.http_port}"
+        assert r.response["json"]["host"] == f"127.0.0.1:{env.http_port}"
+        assert r.response["json"]["h2_original_host"] == ""
+
+    # check that connection reuse actually happens as configured
+    @pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
+    def test_h2_600_04(self, env, enable_reuse):
+        conf = H2Conf(env, extras={
+            f'cgi.{env.http_tld}': [
+                f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
+                f"  h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
+            ]
+        })
+        conf.add_vhost_cgi()
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
+        r = env.curl_get(url, 5)
+        assert r.response["status"] == 200
+        assert r.json["h2_stream_id"] == "1"
+        # httpd 2.5.0 disables reuse, not matter the config
+        if enable_reuse == "on" and not env.httpd_is_at_least("2.5.0"):
+            # reuse is not guarantueed for each request, but we expect some
+            # to do it and run on a h2 stream id > 1
+            reused = False
+            for _ in range(10):
+                r = env.curl_get(url, 5)
+                assert r.response["status"] == 200
+                if int(r.json["h2_stream_id"]) > 1:
+                    reused = True
+                    break
+            assert reused
+        else:
+            r = env.curl_get(url, 5)
+            assert r.response["status"] == 200
+            assert r.json["h2_stream_id"] == "1"
+
+    # do some flexible setup from #235 to proper connection selection
+    @pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
+    def test_h2_600_05(self, env, enable_reuse):
+        conf = H2Conf(env, extras={
+            f'cgi.{env.http_tld}': [
+                f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
+                f"  h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
+            ]
+        })
+        conf.add_vhost_cgi()
+        conf.add([
+            f'Listen {env.http_port2}',
+            'UseCanonicalName On',
+            'UseCanonicalPhysicalPort On'
+        ])
+        conf.start_vhost(domains=[f'cgi.{env.http_tld}'],
+                         port=5004, doc_root="htdocs/cgi")
+        conf.add("AddHandler cgi-script .py")
+        conf.end_vhost()
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
+        r = env.curl_get(url, 5)
+        assert r.response["status"] == 200
+        assert int(r.json["port"]) == env.http_port
+        # going to another backend port must create a new connection and
+        # we should see stream id one again
+        url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port2}/hello.py")
+        r = env.curl_get(url, 5)
+        assert r.response["status"] == 200
+        exp_port = env.http_port if enable_reuse == "on" \
+                                    and not env.httpd_is_at_least("2.5.0")\
+            else env.http_port2
+        assert int(r.json["port"]) == exp_port
+
+    # lets do some error tests
+    def test_h2_600_30(self, env):
+        conf = H2Conf(env)
+        conf.add_vhost_cgi(h2proxy_self=True)
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?status=500")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 500
+        url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?error=timeout")
+        r = env.curl_get(url)
+        assert r.exit_code == 0, r
+        assert r.response['status'] == 408
+
+    # produce an error during response body
+    def test_h2_600_31(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        conf = H2Conf(env)
+        conf.add_vhost_cgi(h2proxy_self=True)
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout")
+        r = env.curl_get(url)
+        # depending on when the error is detect in proxying, if may RST the
+        # stream (exit_code != 0) or give a 503 response.
+        if r.exit_code == 0:
+            assert r.response['status'] == 503
+
+    # produce an error, fail to generate an error bucket
+    def test_h2_600_32(self, env, repeat):
+        pytest.skip("needs fix in core protocol handling")
+        conf = H2Conf(env)
+        conf.add_vhost_cgi(h2proxy_self=True)
+        conf.install()
+        assert env.apache_restart() == 0
+        url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout&error_bucket=0")
+        r = env.curl_get(url)
+        # depending on when the error is detect in proxying, if may RST the
+        # stream (exit_code != 0) or give a 503 response.
+        if r.exit_code == 0:
+            assert r.response['status'] == 503
index ae34e78b4ba1c6deb35d6b343c44bc0d1cb1b4c3..cd3363fb73a75b4a1d2f39f0ff9f028b54ccc140 100644 (file)
@@ -157,8 +157,6 @@ class HttpdConf(object):
         self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
                          port=self.env.https_port, doc_root="htdocs/cgi")
         self.add_proxies("cgi", proxy_self=proxy_self, h2proxy_self=h2proxy_self)
-        if domain in self._extras:
-            self.add(self._extras[domain])
         self.end_vhost()
         self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
                          port=self.env.http_port, doc_root="htdocs/cgi")
index 80cab2ba32547134334aae036a543c862b7d9679..e1ae0707ab0003497ed67eb47c666c3cd0a44d57 100644 (file)
@@ -25,6 +25,7 @@ gen_dir = @abs_srcdir@/../gen
 http_port = 5002
 https_port = 5001
 proxy_port = 5003
+http_port2 = 5004
 http_tld = tests.httpd.apache.org
 test_dir = @abs_srcdir@
 test_src_dir = @abs_srcdir@
index 991ead9e113da1d481abc8b0523327011295a01c..af856effe439458a1590bf9d90bcac03b3def0df 100644 (file)
@@ -244,6 +244,7 @@ class HttpdTestEnv:
             self._h2load = 'h2load'
 
         self._http_port = int(self.config.get('test', 'http_port'))
+        self._http_port2 = int(self.config.get('test', 'http_port2'))
         self._https_port = int(self.config.get('test', 'https_port'))
         self._proxy_port = int(self.config.get('test', 'proxy_port'))
         self._http_tld = self.config.get('test', 'http_tld')
@@ -345,6 +346,10 @@ class HttpdTestEnv:
     def http_port(self) -> int:
         return self._http_port
 
+    @property
+    def http_port2(self) -> int:
+        return self._http_port2
+
     @property
     def https_port(self) -> int:
         return self._https_port
index 6dea97b55c477cc8becf5356ca363f34c4965260..fe4a1aedff366df7d24e6355464cc740ebe2b489 100644 (file)
@@ -121,7 +121,8 @@ class Nghttp:
                                 prev["previous"] = response["previous"]
                             response["previous"] = prev
                     response[hkey] = s["header"]
-                    s["header"] = {} 
+                    s["header"] = {}
+                    body = ''
                 continue
             
             m = re.match(r'(.*)\[.*] recv DATA frame <length=(\d+), .*stream_id=(\d+)>', l)
index 5942d35d9a5a047daa8943bdffdf8ae181520c24..04ea825a31b3c4d1a2baf2681f9cbd0b8124bf46 100644 (file)
@@ -9,21 +9,21 @@ class ExecResult:
                  stdout: bytes, stderr: bytes = None, duration: timedelta = None):
         self._args = args
         self._exit_code = exit_code
-        self._raw = stdout if stdout else b''
-        self._stdout = stdout.decode() if stdout is not None else ""
-        self._stderr = stderr.decode() if stderr is not None else ""
+        self._stdout = stdout if stdout is not None else b''
+        self._stderr = stderr if stderr is not None else b''
         self._duration = duration if duration is not None else timedelta()
         self._response = None
         self._results = {}
         self._assets = []
         # noinspection PyBroadException
         try:
-            self._json_out = json.loads(self._stdout)
+            out = self._stdout.decode()
+            self._json_out = json.loads(out)
         except:
             self._json_out = None
 
     def __repr__(self):
-        return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self.stdout}, stderr={self.stderr}]"
+        return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self._stdout}, stderr={self._stderr}]"
 
     @property
     def exit_code(self) -> int:
@@ -35,11 +35,11 @@ class ExecResult:
 
     @property
     def outraw(self) -> bytes:
-        return self._raw
+        return self._stdout
 
     @property
     def stdout(self) -> str:
-        return self._stdout
+        return self._stdout.decode()
 
     @property
     def json(self) -> Optional[Dict]:
@@ -48,7 +48,7 @@ class ExecResult:
 
     @property
     def stderr(self) -> str:
-        return self._stderr
+        return self._stderr.decode()
 
     @property
     def duration(self) -> timedelta: