]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
*) core/mod_http/mod_http2:
authorStefan Eissing <icing@apache.org>
Mon, 4 Apr 2022 08:24:09 +0000 (08:24 +0000)
committerStefan Eissing <icing@apache.org>
Mon, 4 Apr 2022 08:24:09 +0000 (08:24 +0000)
     - adds new meta bucket types REQUEST, RESPONSE and HEADERS to the API.
     - adds a new method for setting standard response headers Date and Server
     - adds helper methods for formatting parts of HTTP/1.x, like headers and
       end chunks for use in non-core parts of the server, e.g. mod_proxy
     - splits the HTTP_IN filter into a "generic HTTP" and "specific HTTP/1.x"
       filter. The latter one named HTTP1_BODY_IN.
     - Uses HTTP1_BODY_IN only for requests with HTTP version <= 1.1
     - Removes the chunked input simulation from mod_http2
     - adds body_indeterminate flag to request_rec that indicates that a request
       body may be present and needs to be read/discarded. This replaces logic
       that thinks without Content-Length and Transfer-Encoding, no request
       body can exist.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1899547 13f79535-47bb-0310-9956-ffa450edef68

18 files changed:
CMakeLists.txt
include/ap_mmn.h
include/http_protocol.h
include/httpd.h
include/mod_core.h
libhttpd.dsp
modules/http/chunk_filter.c
modules/http/http_core.c
modules/http/http_filters.c
modules/http/http_protocol.c
modules/http2/h2_c2.c
modules/http2/h2_c2_filter.c
modules/http2/h2_mplx.c
modules/http2/h2_request.c
modules/proxy/proxy_util.c
server/Makefile.in
server/headers_bucket.c [new file with mode: 0644]
server/protocol.c

index 71f992cfbb373648cfdfffc28916e262dead4c94..8cc2eaa22cba15053855b3d2957f94dfbd37dd0f 100644 (file)
@@ -699,6 +699,7 @@ SET(LIBHTTPD_SOURCES
   server/eoc_bucket.c
   server/eor_bucket.c
   server/error_bucket.c
+  server/headers_bucket.c
   server/listen.c
   server/log.c
   server/mpm/winnt/child.c
index 6457cb21d9a8002e3c70e87e50c75309c1117c7d..04fb184048e8a5a663b593a2efbaad2624b21fd4 100644 (file)
  * 20211221.5 (2.5.1-dev)  Add hook create_secondary_connection and method
  *                         ap_create_secondary_connection() to have connection
  *                         setup of http2-like connections in core.
- *
+ * 20211221.6 (2.5.1-dev)  Add new meta buckets request/response/headers
+ *                         Add field `body_indeterminate` in request_rec
+ *                         Add new http/1.x formatting helpers
+ *                         Add ap_assign_request()
  */
 
 #define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */
 #ifndef MODULE_MAGIC_NUMBER_MAJOR
 #define MODULE_MAGIC_NUMBER_MAJOR 20211221
 #endif
-#define MODULE_MAGIC_NUMBER_MINOR 5             /* 0...n */
+#define MODULE_MAGIC_NUMBER_MINOR 6             /* 0...n */
 
 /**
  * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
index 38eef396a3e22f29698d8d52918765314341fa87..b522f70928d54a6fd5562ae0bcb7df18a7c46d88 100644 (file)
@@ -67,6 +67,18 @@ AP_DECLARE(request_rec *) ap_create_request(conn_rec *c);
  */
 request_rec *ap_read_request(conn_rec *c);
 
+/**
+ * Assign the method, uri and protocol to the request.
+ * @param r The current request
+ * @param method the HTTP method
+ * @param uri the request uri
+ * @param protocol the request protocol
+ * @return 1 on success, 0 on failure
+ */
+AP_DECLARE(int) ap_assign_request(request_rec *r,
+                                  const char *method, const char *uri,
+                                  const char *protocol);
+
 /**
  * Parse and validate the request line.
  * @param r The current request
@@ -1027,6 +1039,252 @@ AP_DECLARE(apr_bucket *) ap_bucket_error_create(int error, const char *buf,
                                                 apr_pool_t *p,
                                                 apr_bucket_alloc_t *list);
 
+/** @see ap_bucket_type_request */
+typedef struct ap_bucket_request ap_bucket_request;
+
+/**
+ * @struct ap_bucket_request
+ * @brief  A bucket referring to a HTTP request
+ *
+ */
+struct ap_bucket_request {
+    /** Number of buckets using this memory */
+    apr_bucket_refcount refcount;
+    apr_pool_t *pool; /* pool that holds the contents, not for modification */
+    const char *method; /* request method */
+    const char *uri; /* request uri */
+    const char *protocol; /* request protocol */
+    apr_table_t *headers; /* request headers */
+};
+
+/** @see ap_bucket_type_request */
+AP_DECLARE_DATA extern const apr_bucket_type_t ap_bucket_type_request;
+
+/**
+ * Determine if a bucket is a request bucket
+ * @param e The bucket to inspect
+ * @return true or false
+ */
+#define AP_BUCKET_IS_REQUEST(e)         (e->type == &ap_bucket_type_request)
+
+/**
+ * Make the bucket passed in a request bucket
+ * Copies all parameters to the given pool.
+ * @param b The bucket to make into a request bucket
+ * @param method the HTTP method
+ * @param uri the uri requested
+ * @param protocol the protocol requested
+ * @param headers the table of response headers.
+ * @param p A pool to allocate out of.
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_request_make(
+            apr_bucket *b,
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p);
+
+/**
+ * Make the bucket passed in a request bucket
+ * Uses all paramters without copying.
+ * @param b The bucket to make into a request bucket
+ * @param method the HTTP method
+ * @param uri the uri requested
+ * @param protocol the protocol requested
+ * @param headers the table of response headers.
+ * @param p A pool to allocate out of.
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_request_maken(
+            apr_bucket *b,
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p);
+
+/**
+ * Create a bucket referring to a HTTP request.
+ * Copies all parameters to the given pool.
+ * @param method the HTTP method
+ * @param uri the uri requested
+ * @param protocol the protocol requested
+ * @param headers the table of response headers.
+ * @param p A pool to allocate the error string out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_request_create(
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p,
+            apr_bucket_alloc_t *list);
+
+/**
+ * Create a bucket referring to a HTTP request.
+ * Uses all paramters without copying.
+ * @param method the HTTP method
+ * @param uri the uri requested
+ * @param protocol the protocol requested
+ * @param headers the HTTP response headers.
+ * @param p A pool to allocate the error string out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_request_createn(
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p,
+            apr_bucket_alloc_t *list);
+
+/**
+ * Clone a request bucket into another pool/bucket_alloc that may
+ * have a separate lifetime than the source bucket/pool.
+ * @param source the request bucket to clone
+ * @param p A pool to allocate the data out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_request_clone(apr_bucket *source,
+                                                  apr_pool_t *p,
+                                                  apr_bucket_alloc_t *list);
+
+/** @see ap_bucket_type_response */
+typedef struct ap_bucket_response ap_bucket_response;
+
+/**
+ * @struct ap_bucket_response
+ * @brief  A bucket referring to a HTTP response
+ *
+ */
+struct ap_bucket_response {
+    /** Number of buckets using this memory */
+    apr_bucket_refcount refcount;
+    apr_pool_t *pool; /* pool that holds the contents, not for modification */
+    int status; /* The status code */
+    const char *reason; /* The optional HTTP reason for the status. */
+    apr_table_t *headers; /* The response headers */
+    apr_table_t *notes; /* internal notes about the response */
+};
+
+/** @see ap_bucket_type_headers */
+AP_DECLARE_DATA extern const apr_bucket_type_t ap_bucket_type_response;
+
+/**
+ * Determine if a bucket is a response bucket
+ * @param e The bucket to inspect
+ * @return true or false
+ */
+#define AP_BUCKET_IS_RESPONSE(e)         (e->type == &ap_bucket_type_response)
+
+/**
+ * Make the bucket passed in a response bucket
+ * @param b The bucket to make into a response bucket
+ * @param status The HTTP status code of the response.
+ * @param reason textual description of status, can be NULL.
+ * @param headers the table of response headers.
+ * @param notes internal notes on the response
+ * @param p A pool to allocate out of.
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_response_make(apr_bucket *b, int status,
+            const char *reason, apr_table_t *headers,
+            apr_table_t *notes, apr_pool_t *p);
+
+/**
+ * Create a bucket referring to a HTTP response.
+ * @param status The HTTP status code.
+ * @param reason textual description of status, can be NULL.
+ * @param headers the HTTP response headers.
+ * @param notes internal notes on the response
+ * @param p A pool to allocate the error string out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_response_create(
+            int status, const char *reason,
+            apr_table_t *headers,
+            apr_table_t *notes,
+            apr_pool_t *p,
+            apr_bucket_alloc_t *list);
+
+/**
+ * Clone a RESPONSE bucket into another pool/bucket_alloc that may
+ * have a separate lifetime than the source bucket/pool.
+ * @param source the response bucket to clone
+ * @param p A pool to allocate the data out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_response_clone(apr_bucket *source,
+                                                  apr_pool_t *p,
+                                                  apr_bucket_alloc_t *list);
+
+/** @see ap_bucket_type_headers */
+typedef struct ap_bucket_headers ap_bucket_headers;
+
+/**
+ * @struct ap_bucket_headers
+ * @brief  A bucket referring to an HTTP header set
+ *
+ */
+struct ap_bucket_headers {
+    /** Number of buckets using this memory */
+    apr_bucket_refcount refcount;
+    apr_pool_t *pool; /* pool that holds the contents, not for modification */
+    apr_table_t *headers; /* The headers */
+
+};
+
+/** @see ap_bucket_type_headers */
+AP_DECLARE_DATA extern const apr_bucket_type_t ap_bucket_type_headers;
+
+/**
+ * Determine if a bucket is an headers bucket
+ * @param e The bucket to inspect
+ * @return true or false
+ */
+#define AP_BUCKET_IS_HEADERS(e)         (e->type == &ap_bucket_type_headers)
+
+/**
+ * Make the bucket passed in a headers bucket
+ * @param b The bucket to make into a headers bucket
+ * @param headers the table of headers.
+ * @param p A pool to allocate out of.
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_headers_make(apr_bucket *b,
+                apr_table_t *headers, apr_pool_t *p);
+
+/**
+ * Create a bucket referring to a table of HTTP headers.
+ * @param headers the HTTP headers in the bucket.
+ * @param p A pool to allocate the error string out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_headers_create(apr_table_t *headers,
+                                                  apr_pool_t *p,
+                                                  apr_bucket_alloc_t *list);
+
+/**
+ * Clone a HEADER bucket into another pool/bucket_alloc that may
+ * have a separate lifetime than the source bucket/pool.
+ * @param source the header bucket to clone
+ * @param p A pool to allocate the data out of.
+ * @param list The bucket allocator from which to allocate the bucket
+ * @return The new bucket, or NULL if allocation failed
+ */
+AP_DECLARE(apr_bucket *) ap_bucket_headers_clone(apr_bucket *source,
+                                                 apr_pool_t *p,
+                                                 apr_bucket_alloc_t *list);
+
 AP_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f, apr_bucket_brigade *b);
 AP_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, apr_bucket_brigade *b);
 AP_DECLARE_NONSTD(apr_status_t) ap_content_length_filter(ap_filter_t *,
@@ -1047,6 +1305,13 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew, const request_rec *r
  */
 AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub_r);
 
+/**
+ * Set standard response headers, such as `Date` and `Server`
+ * in r->headers_out. Takes care of precedence of existing
+ * values from proxied requests.
+ */
+AP_DECLARE(void) ap_set_std_response_headers(request_rec *r);
+
 /**
  * Send an interim (HTTP 1xx) response immediately.
  * @param r The request
@@ -1054,6 +1319,33 @@ AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub_r);
  */
 AP_DECLARE(void) ap_send_interim_response(request_rec *r, int send_headers);
 
+/**
+ * Append the headers in HTTP/1.1 format to the brigade.
+ * @param b the brigade to append to
+ * @param r the reqeust this is done for (pool and logging)
+ * @param headers the headers to append
+ */
+AP_DECLARE(apr_status_t) ap_h1_append_headers(apr_bucket_brigade *b,
+                                              request_rec *r,
+                                              apr_table_t *headers);
+
+/**
+ * Append the HTTP/1.1 header termination (empty CRLF) to the brigade.
+ * @param b the brigade to append to
+ */
+AP_DECLARE(apr_status_t) ap_h1_terminate_header(apr_bucket_brigade *b);
+
+/**
+ * Insert/Append the last chunk in a HTTP/1.1 Transfer-Encoding chunked.
+ * @param b the brigade to add the chunk to
+ * @param eos the bucket before to add or NULL for insert at tail
+ * @param r the request handled
+ * @param trailers table of trailers or NULL
+ */
+AP_DECLARE(void) ap_h1_add_end_chunk(apr_bucket_brigade *b,
+                                     apr_bucket *eos,
+                                     request_rec *r,
+                                     apr_table_t *trailers);
 
 #ifdef __cplusplus
 }
index 41e570799ed4654ffb6882327163b0c7b2be6dfc..a4c32535c790301a7283d90d93368a1f0e944d73 100644 (file)
@@ -1144,6 +1144,14 @@ struct request_rec {
      * the elements of this field.
      */
     ap_request_bnotes_t bnotes;
+    /** Indicates that the request has a body of unknown length and
+     * protocol handlers need to read it, even if only to discard the
+     * data. In HTTP/1.1 this is set on chunked transfer encodings, but
+     * newer HTTP versions can transfer such bodies by other means. The
+     * absence of a "Transfer-Encoding" header is no longer sufficient
+     * to conclude that no body is there.
+     */
+    int body_indeterminate;
 };
 
 /**
index 8eab3e12c46196b453f0f1fcf473149fb9d41f11..4897fee6f5ce09507dcd3f0adbbd78fa1982a16a 100644 (file)
@@ -40,6 +40,7 @@ extern "C" {
 
 /* Handles for core filters */
 AP_DECLARE_DATA extern ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA extern ap_filter_rec_t *ap_h1_body_in_filter_handle;
 AP_DECLARE_DATA extern ap_filter_rec_t *ap_http_header_filter_handle;
 AP_DECLARE_DATA extern ap_filter_rec_t *ap_chunk_filter_handle;
 AP_DECLARE_DATA extern ap_filter_rec_t *ap_http_outerror_filter_handle;
@@ -52,6 +53,10 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
                             ap_input_mode_t mode, apr_read_type_e block,
                             apr_off_t readbytes);
 
+apr_status_t ap_h1_body_in_filter(ap_filter_t *f, apr_bucket_brigade *b,
+                                     ap_input_mode_t mode, apr_read_type_e block,
+                                     apr_off_t readbytes);
+
 /* HTTP/1.1 chunked transfer encoding filter. */
 apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b);
 
index f9a656ee3a7db6752e8406b3033b56836da01019..6467f8b84010a54592576e716df2f6af3ee70f1a 100644 (file)
@@ -525,6 +525,10 @@ SOURCE=.\server\error_bucket.c
 # End Source File
 # Begin Source File
 
+SOURCE=.\server\headers_bucket.c
+# End Source File
+# Begin Source File
+
 SOURCE=.\server\util.c
 # End Source File
 # Begin Source File
index d18a84ec9b18681123d756c7e68ecdf615760b4a..f44543a7657fc2730aa98fa51deb5fb0d8604445 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include "apr_strings.h"
+#include "apr_lib.h"
 #include "apr_thread_proc.h"    /* for RLIMIT stuff */
 
 #define APR_WANT_STRFUNC
@@ -28,6 +29,7 @@
 #include "http_config.h"
 #include "http_connection.h"
 #include "http_core.h"
+#include "http_log.h"
 #include "http_protocol.h"  /* For index_of_response().  Grump. */
 #include "http_request.h"
 
 
 #include "mod_core.h"
 
-/*
- * A pointer to this is used to memorize in the filter context that a bad
- * gateway error bucket had been seen. It is used as an invented unique pointer.
- */
-static char bad_gateway_seen;
+
+APLOG_USE_MODULE(http);
+
+
+typedef struct chunk_out_ctx {
+    int bad_gateway_seen;
+    apr_table_t *trailers;
+} chunk_out_ctx;
+
 
 apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
 {
     conn_rec *c = f->r->connection;
+    chunk_out_ctx *ctx = f->ctx;
     apr_bucket_brigade *more, *tmp;
     apr_bucket *e;
     apr_status_t rv;
 
+    if (!ctx) {
+        ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+    }
+
     for (more = tmp = NULL; b; b = more, more = NULL) {
         apr_off_t bytes = 0;
         apr_bucket *eos = NULL;
@@ -65,28 +76,43 @@ apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
              e != APR_BRIGADE_SENTINEL(b);
              e = APR_BUCKET_NEXT(e))
         {
-            if (APR_BUCKET_IS_EOS(e)) {
-                /* there shouldn't be anything after the eos */
-                ap_remove_output_filter(f);
-                eos = e;
-                break;
-            }
-            if (AP_BUCKET_IS_ERROR(e) &&
-                (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY ||
-                 ((ap_bucket_error *)(e->data))->status == HTTP_GATEWAY_TIME_OUT)) {
-                /*
-                 * We had a broken backend. Memorize this in the filter
-                 * context.
-                 */
-                f->ctx = &bad_gateway_seen;
-                continue;
-            }
-            if (APR_BUCKET_IS_FLUSH(e)) {
-                flush = e;
-                if (e != APR_BRIGADE_LAST(b)) {
-                    more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+            if (APR_BUCKET_IS_METADATA(e)) {
+                if (APR_BUCKET_IS_EOS(e)) {
+                    /* there shouldn't be anything after the eos */
+                    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+                                  "ap_http_chunk_filter eos seen, removing filter");
+                    ap_remove_output_filter(f);
+                    eos = e;
+                    break;
+                }
+                if (AP_BUCKET_IS_ERROR(e) &&
+                    (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY ||
+                     ((ap_bucket_error *)(e->data))->status == HTTP_GATEWAY_TIME_OUT)) {
+                    /*
+                     * We had a broken backend. Memorize this in the filter
+                     * context.
+                     */
+                    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+                                  "ap_http_chunk_filter bad gateway error, suppressing end chunk");
+                    ctx->bad_gateway_seen = 1;
+                    continue;
+                }
+                if (APR_BUCKET_IS_FLUSH(e)) {
+                    flush = e;
+                    if (e != APR_BRIGADE_LAST(b)) {
+                        more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+                    }
+                    break;
+                }
+                if (AP_BUCKET_IS_HEADERS(e)) {
+                    ap_bucket_headers *hdrs = e->data;
+                    if (!apr_is_empty_table(hdrs->headers)) {
+                        if (!ctx->trailers) {
+                            ctx->trailers = apr_table_make(f->r->pool, 5);
+                        }
+                        apr_table_overlap(ctx->trailers, hdrs->headers, APR_OVERLAP_TABLES_MERGE);
+                    }
                 }
-                break;
             }
             else if (e->length == (apr_size_t)-1) {
                 /* unknown amount of data (e.g. a pipe) */
@@ -132,6 +158,9 @@ apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
              * Insert the chunk header, specifying the number of bytes in
              * the chunk.
              */
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+                          "ap_http_chunk_filter sending chunk of %"
+                          APR_UINT64_T_HEX_FMT " bytes", (apr_uint64_t)bytes);
             hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
                                    "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
             ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
@@ -175,12 +204,8 @@ apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
          * marker above, but this is a bit more straight-forward for
          * now.
          */
-        if (eos && !f->ctx) {
-            /* XXX: (2) trailers ... does not yet exist */
-            e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII
-                                           /* <trailers> */
-                                           CRLF_ASCII, 5, c->bucket_alloc);
-            APR_BUCKET_INSERT_BEFORE(eos, e);
+        if (eos && !ctx->bad_gateway_seen) {
+            ap_h1_add_end_chunk(b, eos, f->r, ctx->trailers);
         }
 
         /* pass the brigade to the next filter. */
index 9e875ce2cc56a3d348be8c82435f120e400be92b..ce87ef2045f235cffd933c7e849b2b8890f1d35f 100644 (file)
@@ -36,6 +36,7 @@
 
 /* Handles for core filters */
 AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_h1_body_in_filter_handle;
 AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
 AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
 AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
@@ -301,6 +302,9 @@ static void register_hooks(apr_pool_t *p)
     ap_http_input_filter_handle =
         ap_register_input_filter("HTTP_IN", ap_http_filter,
                                  NULL, AP_FTYPE_PROTOCOL);
+    ap_h1_body_in_filter_handle =
+        ap_register_input_filter("HTTP1_BODY_IN", ap_h1_body_in_filter,
+                                 NULL, AP_FTYPE_TRANSCODE);
     ap_http_header_filter_handle =
         ap_register_output_filter("HTTP_HEADER", ap_http_header_filter,
                                   NULL, AP_FTYPE_PROTOCOL);
index d41a519b500d639e8ed83fe240472ec3b6b389d5..9bcf2297d0b5c17b3ae94dad12c45978a518b71d 100644 (file)
@@ -254,21 +254,28 @@ static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer,
 }
 
 static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
-                                          apr_bucket_brigade *b, int merge)
+                                          apr_bucket_brigade *b)
 {
     int rv;
     apr_bucket *e;
     request_rec *r = f->r;
+    apr_table_t *trailers;
     apr_table_t *saved_headers_in = r->headers_in;
     int saved_status = r->status;
 
+    trailers = apr_table_make(r->pool, 5);
     r->status = HTTP_OK;
-    r->headers_in = r->trailers_in;
-    apr_table_clear(r->headers_in);
+    r->headers_in = trailers;
     ap_get_mime_headers(r);
+    r->headers_in = saved_headers_in;
 
-    if(r->status == HTTP_OK) {
+    if (r->status == HTTP_OK) {
         r->status = saved_status;
+
+        if (!apr_is_empty_table(trailers)) {
+            e = ap_bucket_headers_create(trailers, r->pool, b->bucket_alloc);
+            APR_BRIGADE_INSERT_TAIL(b, e);
+        }
         e = apr_bucket_eos_create(f->c->bucket_alloc);
         APR_BRIGADE_INSERT_TAIL(b, e);
         ctx->at_eos = 1;
@@ -284,17 +291,16 @@ static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
         rv = APR_EINVAL;
     }
 
-    if(!merge) {
-        r->headers_in = saved_headers_in;
-    }
-    else {
-        r->headers_in = apr_table_overlay(r->pool, saved_headers_in,
-                r->trailers_in);
-    }
-
     return rv;
 }
 
+typedef struct h1_in_ctx_t
+{
+    unsigned int at_trailers:1;
+    unsigned int at_eos:1;
+    unsigned int seen_data:1;
+} h1_in_ctx_t;
+
 /* This is the HTTP_INPUT filter for HTTP requests and responses from
  * proxied servers (mod_proxy).  It handles chunked and content-length
  * bodies.  This can only be inserted/used after the headers
@@ -303,6 +309,122 @@ static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
 apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
                             ap_input_mode_t mode, apr_read_type_e block,
                             apr_off_t readbytes)
+{
+    apr_bucket *e, *next;
+    h1_in_ctx_t *ctx = f->ctx;
+    request_rec *r = f->r;
+    apr_status_t rv;
+
+    if (!ctx) {
+        f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+    }
+
+    /* Since we're about to read data, send 100-Continue if needed.
+     * Only valid on chunked and C-L bodies where the C-L is > 0.
+     *
+     * If the read is to be nonblocking though, the caller may not want to
+     * handle this just now (e.g. mod_proxy_http), and is prepared to read
+     * nothing if the client really waits for 100 continue, so we don't
+     * send it now and wait for later blocking read.
+     *
+     * In any case, even if r->expecting remains set at the end of the
+     * request handling, ap_set_keepalive() will finally do the right
+     * thing (i.e. "Connection: close" the connection).
+     */
+    if (block == APR_BLOCK_READ
+            && r->expecting_100 && r->proto_num >= HTTP_VERSION(1,1)
+            && !(ctx->at_eos || r->eos_sent || r->bytes_sent)) {
+        if (!ap_is_HTTP_SUCCESS(r->status)) {
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+                          "ap_http_in_filter: status != OK, not sending 100-continue");
+            ctx->at_eos = 1; /* send EOS below */
+        }
+        else if (!ctx->seen_data) {
+            int saved_status = r->status;
+            const char *saved_status_line = r->status_line;
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+                          "ap_http_in_filter: sending 100-continue");
+            r->status = HTTP_CONTINUE;
+            r->status_line = NULL;
+            ap_send_interim_response(r, 0);
+            AP_DEBUG_ASSERT(!r->expecting_100);
+            r->status_line = saved_status_line;
+            r->status = saved_status;
+        }
+        else {
+            /* https://tools.ietf.org/html/rfc7231#section-5.1.1
+             *   A server MAY omit sending a 100 (Continue) response if it
+             *   has already received some or all of the message body for
+             *   the corresponding request [...]
+             */
+            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10260)
+                          "request body already/partly received while "
+                          "100-continue is expected, omit sending interim "
+                          "response");
+            r->expecting_100 = 0;
+        }
+    }
+
+    /* sanity check in case we're read twice */
+    if (ctx->at_eos) {
+        e = apr_bucket_eos_create(f->c->bucket_alloc);
+        APR_BRIGADE_INSERT_TAIL(b, e);
+        rv = APR_SUCCESS;
+        goto cleanup;
+    }
+
+    rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+    if (APR_SUCCESS == rv) {
+        for (e = APR_BRIGADE_FIRST(b);
+             e != APR_BRIGADE_SENTINEL(b);
+             e = next)
+        {
+            next = APR_BUCKET_NEXT(e);
+            if (!APR_BUCKET_IS_METADATA(e)) {
+                if (e->length != 0) {
+                    ctx->seen_data = 1;
+                }
+                if (ctx->at_trailers) {
+                    /* DATA after trailers? Someone smuggling something? */
+                    rv = AP_FILTER_ERROR;
+                    goto cleanup;
+                }
+                continue;
+            }
+            if (AP_BUCKET_IS_HEADERS(e)) {
+                /* trailers */
+                ap_bucket_headers * hdrs = e->data;
+
+                /* Allow multiple HEADERS buckets carrying trailers here,
+                 * will not happen from HTTP/1.x and current H2 implementation,
+                 * but is an option. */
+                ctx->at_trailers = 1;
+                if (!apr_is_empty_table(hdrs->headers)) {
+                    r->trailers_in = apr_table_overlay(r->pool, r->trailers_in, hdrs->headers);
+                }
+                apr_bucket_delete(e);
+            }
+            if (APR_BUCKET_IS_EOS(e)) {
+                ctx->at_eos = 1;
+                if (!apr_is_empty_table(r->trailers_in)) {
+                    core_server_config *conf = ap_get_module_config(
+                        r->server->module_config, &core_module);
+                    if (conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+                        r->headers_in = apr_table_overlay(r->pool, r->headers_in, r->trailers_in);
+                    }
+                }
+                goto cleanup;
+            }
+        }
+    }
+
+cleanup:
+    return rv;
+}
+
+apr_status_t ap_h1_body_in_filter(ap_filter_t *f, apr_bucket_brigade *b,
+                                     ap_input_mode_t mode, apr_read_type_e block,
+                                     apr_off_t readbytes)
 {
     core_server_config *conf =
         (core_server_config *) ap_get_module_config(f->r->server->module_config,
@@ -360,6 +482,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
                  */
                 ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
                               "Unknown Transfer-Encoding: %s", tenc);
+                ap_die(HTTP_NOT_IMPLEMENTED, f->r);
                 return APR_EGENERAL;
             }
             lenp = NULL;
@@ -407,51 +530,6 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
         }
     }
 
-    /* Since we're about to read data, send 100-Continue if needed.
-     * Only valid on chunked and C-L bodies where the C-L is > 0.
-     *
-     * If the read is to be nonblocking though, the caller may not want to
-     * handle this just now (e.g. mod_proxy_http), and is prepared to read
-     * nothing if the client really waits for 100 continue, so we don't
-     * send it now and wait for later blocking read.
-     *
-     * In any case, even if r->expecting remains set at the end of the
-     * request handling, ap_set_keepalive() will finally do the right
-     * thing (i.e. "Connection: close" the connection).
-     */
-    if (block == APR_BLOCK_READ
-            && (ctx->state == BODY_CHUNK
-                || (ctx->state == BODY_LENGTH && ctx->remaining > 0))
-            && f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
-            && !(ctx->at_eos || f->r->eos_sent || f->r->bytes_sent)) {
-        if (!ap_is_HTTP_SUCCESS(f->r->status)) {
-            ctx->state = BODY_NONE;
-            ctx->at_eos = 1; /* send EOS below */
-        }
-        else if (!ctx->seen_data) {
-            int saved_status = f->r->status;
-            const char *saved_status_line = f->r->status_line;
-            f->r->status = HTTP_CONTINUE;
-            f->r->status_line = NULL;
-            ap_send_interim_response(f->r, 0);
-            AP_DEBUG_ASSERT(!f->r->expecting_100);
-            f->r->status_line = saved_status_line;
-            f->r->status = saved_status;
-        }
-        else {
-            /* https://tools.ietf.org/html/rfc7231#section-5.1.1
-             *   A server MAY omit sending a 100 (Continue) response if it
-             *   has already received some or all of the message body for
-             *   the corresponding request [...]
-             */
-            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, APLOGNO(10260)
-                          "request body already/partly received while "
-                          "100-continue is expected, omit sending interim "
-                          "response");
-            f->r->expecting_100 = 0;
-        }
-    }
-
     /* sanity check in case we're read twice */
     if (ctx->at_eos) {
         e = apr_bucket_eos_create(f->c->bucket_alloc);
@@ -519,8 +597,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
 
             if (ctx->state == BODY_CHUNK_TRAILER) {
                 /* Treat UNSET as DISABLE - trailers aren't merged by default */
-                return read_chunked_trailers(ctx, f, b,
-                            conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
+                return read_chunked_trailers(ctx, f, b);
             }
 
             break;
@@ -1636,25 +1713,22 @@ cleanup:
 
 AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
 {
-    const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
     const char *lenp = apr_table_get(r->headers_in, "Content-Length");
 
     r->read_body = read_policy;
     r->read_chunked = 0;
     r->remaining = 0;
 
-    if (tenc) {
-        if (ap_cstr_casecmp(tenc, "chunked")) {
-            ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592)
-                          "Unknown Transfer-Encoding %s", tenc);
-            return HTTP_NOT_IMPLEMENTED;
-        }
+    if (r->body_indeterminate) {
+        /* Protocols like HTTP/2 can carry bodies without length and
+         * HTTP/1.1 has chunked encoding signalled via this note.
+         */
         if (r->read_body == REQUEST_CHUNKED_ERROR) {
             ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593)
-                          "chunked Transfer-Encoding forbidden: %s", r->uri);
+                          "indeterminate request body length forbidden: %s", r->uri);
+            r->read_chunked = 0;
             return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
         }
-
         r->read_chunked = 1;
     }
     else if (lenp) {
index 9cae54aee7d6cf7880581bcf937f1442a0a1167a..59fef62756cfb265490c07d1866b5038606d84bf 100644 (file)
@@ -1485,3 +1485,108 @@ AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
     l->method_list->nelts = 0;
 }
 
+/* Send a request's HTTP response headers to the client.
+ */
+AP_DECLARE(apr_status_t) ap_h1_append_headers(apr_bucket_brigade *bb,
+                                              request_rec *r,
+                                              apr_table_t *headers)
+{
+    const apr_array_header_t *elts;
+    const apr_table_entry_t *t_elt;
+    const apr_table_entry_t *t_end;
+    struct iovec *vec;
+    struct iovec *vec_next;
+
+    elts = apr_table_elts(headers);
+    if (elts->nelts == 0) {
+        return APR_SUCCESS;
+    }
+    t_elt = (const apr_table_entry_t *)(elts->elts);
+    t_end = t_elt + elts->nelts;
+    vec = (struct iovec *)apr_palloc(r->pool, 4 * elts->nelts *
+                                     sizeof(struct iovec));
+    vec_next = vec;
+
+    /* For each field, generate
+     *    name ": " value CRLF
+     */
+    do {
+        if (t_elt->key && t_elt->val) {
+            vec_next->iov_base = (void*)(t_elt->key);
+            vec_next->iov_len = strlen(t_elt->key);
+            vec_next++;
+            vec_next->iov_base = ": ";
+            vec_next->iov_len = sizeof(": ") - 1;
+            vec_next++;
+            vec_next->iov_base = (void*)(t_elt->val);
+            vec_next->iov_len = strlen(t_elt->val);
+            vec_next++;
+            vec_next->iov_base = CRLF;
+            vec_next->iov_len = sizeof(CRLF) - 1;
+            vec_next++;
+        }
+        t_elt++;
+    } while (t_elt < t_end);
+
+    if (APLOGrtrace4(r)) {
+        t_elt = (const apr_table_entry_t *)(elts->elts);
+        do {
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "  %s: %s",
+                          t_elt->key, t_elt->val);
+            t_elt++;
+        } while (t_elt < t_end);
+    }
+
+#if APR_CHARSET_EBCDIC
+    {
+        apr_size_t len;
+        char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+        ap_xlate_proto_to_ascii(tmp, len);
+        return apr_brigade_write(bb, NULL, NULL, tmp, len);
+    }
+#else
+    return apr_brigade_writev(bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+AP_DECLARE(apr_status_t) ap_h1_terminate_header(apr_bucket_brigade *bb)
+{
+    char crlf[] = CRLF;
+    apr_size_t buflen;
+
+    buflen = strlen(crlf);
+    ap_xlate_proto_to_ascii(crlf, buflen);
+    return apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+AP_DECLARE(void) ap_h1_add_end_chunk(apr_bucket_brigade *b,
+                                     apr_bucket *eos,
+                                     request_rec *r,
+                                     apr_table_t *trailers)
+{
+    if (!trailers || apr_is_empty_table(trailers)) {
+        apr_bucket *e;
+
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                      "append empty end chunk");
+        e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII
+                                       CRLF_ASCII, 5, b->bucket_alloc);
+        if (eos) {
+            APR_BUCKET_INSERT_BEFORE(eos, e);
+        }
+        else {
+            APR_BRIGADE_INSERT_TAIL(b, e);
+        }
+    }
+    else {
+        apr_bucket_brigade *tmp;
+
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                      "append end chunk with trailers");
+        tmp = eos? apr_brigade_split_ex(b, eos, NULL) : NULL;
+        apr_brigade_write(b, NULL, NULL, ZERO_ASCII CRLF_ASCII, 3);
+        ap_h1_append_headers(b, r, trailers);
+        ap_h1_terminate_header(b);
+        if (tmp) APR_BRIGADE_CONCAT(b, tmp);
+    }
+}
index 751a509ba269853bfc736905a57e730ed2ca977a..acb28ff3ded9c5306036ae5fb20b8630998f63ab 100644 (file)
@@ -478,6 +478,16 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
 
     /* the request_rec->server carries the timeout value that applies */
     h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
+    /* We only handle this one request on the connection and tell everyone
+     * that there is no need to keep it "clean" if something fails. Also,
+     * this prevents mod_reqtimeout from doing funny business with monitoring
+     * keepalive timeouts.
+     */
+    r->connection->keepalive = AP_CONN_CLOSE;
+
+    if (conn_ctx->beam_in && !apr_table_get(r->headers_in, "Content-Length")) {
+        r->body_indeterminate = 1;
+    }
 
     if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
index 96e56cdd6f9f6d4522b8a470832f6e585a6ea1d3..dabfd420ea2e723b9d01e37813dd0b11cbdcc68a 100644 (file)
@@ -668,139 +668,6 @@ apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb)
 }
 
 
-struct h2_chunk_filter_t {
-    const char *id;
-    int eos_chunk_added;
-    apr_bucket_brigade *bbchunk;
-    apr_off_t chunked_total;
-};
-typedef struct h2_chunk_filter_t h2_chunk_filter_t;
-
-
-static void make_chunk(conn_rec *c, h2_chunk_filter_t *fctx, apr_bucket_brigade *bb,
-                       apr_bucket *first, apr_off_t chunk_len, 
-                       apr_bucket *tail)
-{
-    /* Surround the buckets [first, tail[ with new buckets carrying the
-     * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
-     * to the end of the brigade. */
-    char buffer[128];
-    apr_bucket *b;
-    apr_size_t len;
-    
-    len = (apr_size_t)apr_snprintf(buffer, H2_ALEN(buffer), 
-                                   "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
-    b = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
-    APR_BUCKET_INSERT_BEFORE(first, b);
-    b = apr_bucket_immortal_create("\r\n", 2, bb->bucket_alloc);
-    if (tail) {
-        APR_BUCKET_INSERT_BEFORE(tail, b);
-    }
-    else {
-        APR_BRIGADE_INSERT_TAIL(bb, b);
-    }
-    fctx->chunked_total += chunk_len;
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-                  "h2_c2(%s): added chunk %ld, total %ld",
-                  fctx->id, (long)chunk_len, (long)fctx->chunked_total);
-}
-
-static int ser_header(void *ctx, const char *name, const char *value) 
-{
-    apr_bucket_brigade *bb = ctx;
-    apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value);
-    return 1;
-}
-
-static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx,
-                                   apr_read_type_e block) {
-    h2_chunk_filter_t *fctx = f->ctx;
-    request_rec *r = f->r;
-    apr_status_t status = APR_SUCCESS;
-
-    if (!fctx->bbchunk) {
-        fctx->bbchunk = apr_brigade_create(r->pool, f->c->bucket_alloc);
-    }
-    
-    if (APR_BRIGADE_EMPTY(fctx->bbchunk)) {
-        apr_bucket *b, *next, *first_data = NULL;
-        apr_bucket_brigade *tmp;
-        apr_off_t bblen = 0;
-
-        /* get more data from the lower layer filters. Always do this
-         * in larger pieces, since we handle the read modes ourself. */
-        status = ap_get_brigade(f->next, fctx->bbchunk,
-                                AP_MODE_READBYTES, block, conn_ctx->mplx->stream_max_mem);
-        if (status != APR_SUCCESS) {
-            return status;
-        }
-
-        for (b = APR_BRIGADE_FIRST(fctx->bbchunk);
-             b != APR_BRIGADE_SENTINEL(fctx->bbchunk);
-             b = next) {
-            next = APR_BUCKET_NEXT(b);
-            if (APR_BUCKET_IS_METADATA(b)) {
-                if (first_data) {
-                    make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, b);
-                    first_data = NULL;
-                }
-                
-                if (H2_BUCKET_IS_HEADERS(b)) {
-                    h2_headers *headers = h2_bucket_headers_get(b);
-                    
-                    ap_assert(headers);
-                    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                                  "h2_c2(%s-%d): receiving trailers",
-                                  conn_ctx->id, conn_ctx->stream_id);
-                    tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
-                    if (!apr_is_empty_table(headers->headers)) {
-                        status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n");
-                        apr_table_do(ser_header, fctx->bbchunk, headers->headers, NULL);
-                        status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "\r\n");
-                    }
-                    else {
-                        status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
-                    }
-                    r->trailers_in = apr_table_clone(r->pool, headers->headers);
-                    APR_BUCKET_REMOVE(b);
-                    apr_bucket_destroy(b);
-                    APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
-                    apr_brigade_destroy(tmp);
-                    fctx->eos_chunk_added = 1;
-                }
-                else if (APR_BUCKET_IS_EOS(b)) {
-                    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                                  "h2_c2(%s-%d): receiving eos",
-                                  conn_ctx->id, conn_ctx->stream_id);
-                    if (!fctx->eos_chunk_added) {
-                        tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
-                        status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
-                        APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
-                        apr_brigade_destroy(tmp);
-                    }
-                    fctx->eos_chunk_added = 0;
-                }
-            }
-            else if (b->length == 0) {
-                APR_BUCKET_REMOVE(b);
-                apr_bucket_destroy(b);
-            } 
-            else {
-                if (!first_data) {
-                    first_data = b;
-                    bblen = 0;
-                }
-                bblen += b->length;
-            }    
-        }
-        
-        if (first_data) {
-            make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, NULL);
-        }            
-    }
-    return status;
-}
-
 apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
                                   apr_bucket_brigade* bb,
                                   ap_input_mode_t mode,
@@ -808,7 +675,6 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
                                   apr_off_t readbytes)
 {
     h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
-    h2_chunk_filter_t *fctx = f->ctx;
     request_rec *r = f->r;
     apr_status_t status = APR_SUCCESS;
     apr_bucket *b, *next;
@@ -817,89 +683,36 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
                                                     &core_module);
     ap_assert(conn_ctx);
 
-    if (!fctx) {
-        fctx = apr_pcalloc(r->pool, sizeof(*fctx));
-        fctx->id = apr_psprintf(r->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
-        f->ctx = fctx;
-    }
-
     ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
                   "h2_c2(%s-%d): request input, exp=%d",
                   conn_ctx->id, conn_ctx->stream_id, r->expecting_100);
-    if (!conn_ctx->request->chunked) {
-        status = ap_get_brigade(f->next, bb, mode, block, readbytes);
-        /* pipe data through, just take care of trailers */
-        for (b = APR_BRIGADE_FIRST(bb); 
-             b != APR_BRIGADE_SENTINEL(bb); b = next) {
-            next = APR_BUCKET_NEXT(b);
-            if (H2_BUCKET_IS_HEADERS(b)) {
-                h2_headers *headers = h2_bucket_headers_get(b);
-                ap_assert(headers);
-                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                              "h2_c2(%s-%d): receiving trailers",
-                              conn_ctx->id, conn_ctx->stream_id);
-                r->trailers_in = headers->headers;
-                if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
-                    r->headers_in = apr_table_overlay(r->pool, r->headers_in,
-                                                      r->trailers_in);                    
-                }
-                APR_BUCKET_REMOVE(b);
-                apr_bucket_destroy(b);
-                ap_remove_input_filter(f);
-                
-                if (headers->raw_bytes && h2_c_logio_add_bytes_in) {
-                    h2_c_logio_add_bytes_in(f->c, headers->raw_bytes);
-                }
-                break;
+
+    status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+    /* pipe data through, just take care of trailers */
+    for (b = APR_BRIGADE_FIRST(bb);
+         b != APR_BRIGADE_SENTINEL(bb); b = next) {
+        next = APR_BUCKET_NEXT(b);
+        if (H2_BUCKET_IS_HEADERS(b)) {
+            h2_headers *headers = h2_bucket_headers_get(b);
+            ap_assert(headers);
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                          "h2_c2(%s-%d): receiving trailers",
+                          conn_ctx->id, conn_ctx->stream_id);
+            r->trailers_in = headers->headers;
+            if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+                r->headers_in = apr_table_overlay(r->pool, r->headers_in,
+                                                  r->trailers_in);
             }
-        }
-        return status;
-    }
+            APR_BUCKET_REMOVE(b);
+            apr_bucket_destroy(b);
+            ap_remove_input_filter(f);
 
-    /* Things are more complicated. The standard HTTP input filter, which
-     * does a lot what we do not want to duplicate, also cares about chunked
-     * transfer encoding and trailers.
-     * We need to simulate chunked encoding for it to be happy.
-     */
-    if ((status = read_and_chunk(f, conn_ctx, block)) != APR_SUCCESS) {
-        return status;
-    }
-    
-    if (mode == AP_MODE_EXHAUSTIVE) {
-        /* return all we have */
-        APR_BRIGADE_CONCAT(bb, fctx->bbchunk);
-    }
-    else if (mode == AP_MODE_READBYTES) {
-        status = h2_brigade_concat_length(bb, fctx->bbchunk, readbytes);
-    }
-    else if (mode == AP_MODE_SPECULATIVE) {
-        status = h2_brigade_copy_length(bb, fctx->bbchunk, readbytes);
-    }
-    else if (mode == AP_MODE_GETLINE) {
-        /* we are reading a single LF line, e.g. the HTTP headers. 
-         * this has the nasty side effect to split the bucket, even
-         * though it ends with CRLF and creates a 0 length bucket */
-        status = apr_brigade_split_line(bb, fctx->bbchunk, block, HUGE_STRING_LEN);
-        if (APLOGctrace1(f->c)) {
-            char buffer[1024];
-            apr_size_t len = sizeof(buffer)-1;
-            apr_brigade_flatten(bb, buffer, &len);
-            buffer[len] = 0;
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-                          "h2_c2(%s-%d): getline: %s",
-                          conn_ctx->id, conn_ctx->stream_id, buffer);
+            if (headers->raw_bytes && h2_c_logio_add_bytes_in) {
+                h2_c_logio_add_bytes_in(f->c, headers->raw_bytes);
+            }
+            break;
         }
     }
-    else {
-        /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
-         * to support it. Seems to work. */
-        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
-                      APLOGNO(02942) 
-                      "h2_c2, unsupported READ mode %d", mode);
-        status = APR_ENOTIMPL;
-    }
-    
-    h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2, "returning input", bb);
     return status;
 }
 
index 1557449785badafac15da9d16cc70f1fbad5e9ec..37a7a9339cf129208829c15ad4996a9e2b1c8159 100644 (file)
@@ -632,8 +632,8 @@ static apr_status_t c1_process_stream(h2_mplx *m,
     if (APLOGctrace1(m->c1)) {
         const h2_request *r = stream->request;
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
-                      H2_STRM_MSG(stream, "process %s %s://%s%s chunked=%d"),
-                      r->method, r->scheme, r->authority, r->path, r->chunked);
+                      H2_STRM_MSG(stream, "process %s %s://%s%s"),
+                      r->method, r->scheme, r->authority, r->path);
     }
 
     rv = h2_stream_setup_input(stream);
index c65f88d3343492f51c960f08e5d6a91d5ac1bae6..aa5f764a989178f9e8658c617ef2c6ff38f9a3c0 100644 (file)
@@ -211,15 +211,7 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
          * internal request processing is used to HTTP/1.1, so we
          * need to either add a Content-Length or a Transfer-Encoding
          * if any content can be expected. */
-        if (!eos) {
-            /* We have not seen a content-length and have no eos,
-             * simulate a chunked encoding for our HTTP/1.1 infrastructure,
-             * in case we have "H2SerializeHeaders on" here
-             */
-            req->chunked = 1;
-            apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
-        }
-        else if (apr_table_get(req->headers, "Content-Type")) {
+        if (eos && apr_table_get(req->headers, "Content-Type")) {
             /* If we have a content-type, but already seen eos, no more
              * data will come. Signal a zero content length explicitly.
              */
index 1cc275e18c90aeca0c58ca584bf0a4c81562ca8e..d8739e8a7fa5e57381495dc9016c649d53832ce6 100644 (file)
@@ -815,7 +815,8 @@ PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *c
 PROXY_DECLARE(int) ap_proxy_pre_http_request(conn_rec *c, request_rec *r)
 {
     ap_add_input_filter("HTTP_IN", NULL, r, c);
-    return OK;
+    ap_add_input_filter("HTTP1_BODY_IN", NULL, r, c);
+   return OK;
 }
 
 PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
index ea1154663265ac9800740e6b8d79ba81f6ea6666..3b25ebcafdaef4ae9e1a7303adfa49acb5761372 100644 (file)
@@ -14,7 +14,7 @@ LTLIBRARY_SOURCES = \
        util_charset.c util_cookies.c util_debug.c util_xml.c \
        util_filter.c util_pcre.c util_regex.c $(EXPORTS_DOT_C) \
        scoreboard.c error_bucket.c protocol.c core.c request.c ssl.c provider.c \
-       eoc_bucket.c eor_bucket.c core_filters.c \
+       eoc_bucket.c eor_bucket.c headers_bucket.c core_filters.c \
        util_expr_parse.c util_expr_scan.c util_expr_eval.c \
        apreq_cookie.c apreq_error.c apreq_module.c \
        apreq_module_cgi.c apreq_module_custom.c apreq_param.c \
diff --git a/server/headers_bucket.c b/server/headers_bucket.c
new file mode 100644 (file)
index 0000000..1fa782d
--- /dev/null
@@ -0,0 +1,269 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "http_protocol.h"
+#include "apr_buckets.h"
+#include "apr_strings.h"
+#if APR_HAVE_STRINGS_H
+#include <strings.h>
+#endif
+
+static apr_status_t dummy_read(apr_bucket *b, const char **str,
+                               apr_size_t *len, apr_read_type_e block)
+{
+    *str = NULL;
+    *len = 0;
+    return APR_SUCCESS;
+}
+
+static void request_bucket_destroy(void *data)
+{
+    ap_bucket_request *h = data;
+
+    if (apr_bucket_shared_destroy(h)) {
+        apr_bucket_free(h);
+    }
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_request_make(
+            apr_bucket *b,
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p)
+{
+    return ap_bucket_request_maken(b, apr_pstrdup(p, method),
+                                   apr_pstrdup(p, uri), protocol,
+                                   headers? apr_table_clone(p, headers) : NULL,
+                                   p);
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_request_maken(
+            apr_bucket *b,
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p)
+{
+    ap_bucket_request *h;
+
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->pool = p;
+    h->method = method;
+    h->uri = uri;
+    h->protocol = protocol;
+    h->headers = headers;
+
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_request;
+    return b;
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_request_create(
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p,
+            apr_bucket_alloc_t *list)
+{
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    return ap_bucket_request_make(b, method, uri, protocol, headers, p);
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_request_createn(
+            const char *method,
+            const char *uri,
+            const char *protocol,
+            apr_table_t *headers,
+            apr_pool_t *p,
+            apr_bucket_alloc_t *list)
+{
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    return ap_bucket_request_maken(b, method, uri, protocol, headers, p);
+}
+
+AP_DECLARE_DATA const apr_bucket_type_t ap_bucket_type_request = {
+    "REQUEST", 5, APR_BUCKET_METADATA,
+    request_bucket_destroy,
+    dummy_read,
+    apr_bucket_setaside_notimpl,
+    apr_bucket_split_notimpl,
+    apr_bucket_shared_copy
+};
+
+AP_DECLARE(apr_bucket *) ap_bucket_request_clone(
+        apr_bucket *source,
+        apr_pool_t *p,
+        apr_bucket_alloc_t *list)
+{
+    ap_bucket_request *sreq = source->data;
+
+    AP_DEBUG_ASSERT(AP_BUCKET_IS_REQUEST(source));
+    return ap_bucket_request_create(sreq->method, sreq->uri,
+                                    sreq->protocol, sreq->headers, p, list);
+}
+
+static void response_bucket_destroy(void *data)
+{
+    ap_bucket_response *h = data;
+
+    if (apr_bucket_shared_destroy(h)) {
+        apr_bucket_free(h);
+    }
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_response_make(apr_bucket *b, int status,
+                                                 const char *reason,
+                                                 apr_table_t *headers,
+                                                 apr_table_t *notes,
+                                                 apr_pool_t *p)
+{
+    ap_bucket_response *h;
+
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->pool = p;
+    h->status = status;
+    h->reason = reason? apr_pstrdup(p, reason) : NULL;
+    h->headers = headers? apr_table_copy(p, headers) : apr_table_make(p, 5);
+    h->notes = notes? apr_table_copy(p, notes) : apr_table_make(p, 5);
+
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_response;
+    return b;
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_response_create(int status, const char *reason,
+                                                   apr_table_t *headers,
+                                                   apr_table_t *notes,
+                                                   apr_pool_t *p,
+                                                   apr_bucket_alloc_t *list)
+{
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    return ap_bucket_response_make(b, status, reason, headers, notes, p);
+}
+
+AP_DECLARE_DATA const apr_bucket_type_t ap_bucket_type_response = {
+    "RESPONSE", 5, APR_BUCKET_METADATA,
+    response_bucket_destroy,
+    dummy_read,
+    apr_bucket_setaside_notimpl,
+    apr_bucket_split_notimpl,
+    apr_bucket_shared_copy
+};
+
+AP_DECLARE(apr_bucket *) ap_bucket_response_clone(apr_bucket *source,
+                                                  apr_pool_t *p,
+                                                  apr_bucket_alloc_t *list)
+{
+    ap_bucket_response *sresp = source->data;
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+    ap_bucket_response *h;
+
+    AP_DEBUG_ASSERT(AP_BUCKET_IS_RESPONSE(source));
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->status = sresp->status;
+    h->reason = sresp->reason? apr_pstrdup(p, sresp->reason) : NULL;
+    h->headers = apr_table_clone(p, sresp->headers);
+    h->notes = apr_table_clone(p, sresp->notes);
+
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_response;
+    return b;
+}
+
+static void headers_bucket_destroy(void *data)
+{
+    ap_bucket_headers *h = data;
+
+    if (apr_bucket_shared_destroy(h)) {
+        apr_bucket_free(h);
+    }
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_headers_make(apr_bucket *b,
+                                                apr_table_t *headers,
+                                                apr_pool_t *p)
+{
+    ap_bucket_headers *h;
+
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->pool = p;
+    h->headers = headers? apr_table_copy(p, headers) : apr_table_make(p, 5);
+
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_headers;
+    return b;
+}
+
+AP_DECLARE(apr_bucket *) ap_bucket_headers_create(apr_table_t *headers,
+                                                  apr_pool_t *p,
+                                                  apr_bucket_alloc_t *list)
+{
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    return ap_bucket_headers_make(b, headers, p);
+}
+
+AP_DECLARE_DATA const apr_bucket_type_t ap_bucket_type_headers = {
+    "HEADERS", 5, APR_BUCKET_METADATA,
+    headers_bucket_destroy,
+    dummy_read,
+    apr_bucket_setaside_notimpl,
+    apr_bucket_split_notimpl,
+    apr_bucket_shared_copy
+};
+
+AP_DECLARE(apr_bucket *) ap_bucket_headers_clone(apr_bucket *source,
+                                                 apr_pool_t *p,
+                                                 apr_bucket_alloc_t *list)
+{
+    ap_bucket_headers *shdrs = source->data;
+    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+    ap_bucket_headers *h;
+
+    AP_DEBUG_ASSERT(AP_BUCKET_IS_HEADERS(source));
+    APR_BUCKET_INIT(b);
+    b->free = apr_bucket_free;
+    b->list = list;
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->headers = apr_table_clone(p, shdrs->headers);
+
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_headers;
+    return b;
+}
+
index c5570f6d5b17839b4ea1b590cd9ca45914534fd8..ee2bfd4cb272dc0e50883bcdff6bd669a3caf89f 100644 (file)
@@ -47,6 +47,7 @@
 #include "mod_core.h"
 #include "util_charset.h"
 #include "util_ebcdic.h"
+#include "util_time.h"
 #include "scoreboard.h"
 
 #if APR_HAVE_STDARG_H
@@ -1465,6 +1466,18 @@ static void apply_server_config(request_rec *r)
     r->per_dir_config = r->server->lookup_defaults;
 }
 
+AP_DECLARE(int) ap_assign_request(request_rec *r,
+                                  const char *method, const char *uri,
+                                  const char *protocol)
+{
+    /* dummy, for now */
+    (void)r;
+    (void)method;
+    (void)uri;
+    (void)protocol;
+    return 0;
+}
+
 request_rec *ap_read_request(conn_rec *conn)
 {
     int access_status;
@@ -1583,6 +1596,14 @@ request_rec *ap_read_request(conn_rec *conn)
      */
     ap_add_input_filter_handle(ap_http_input_filter_handle,
                                NULL, r, r->connection);
+    if (r->proto_num <= HTTP_VERSION(1,1)) {
+        ap_add_input_filter_handle(ap_h1_body_in_filter_handle,
+                                   NULL, r, r->connection);
+        if (r->proto_num >= HTTP_VERSION(1,0)
+            && apr_table_get(r->headers_in, "Transfer-Encoding")) {
+            r->body_indeterminate = 1;
+        }
+    }
 
     /* Validate Host/Expect headers and select vhost. */
     if (!ap_check_request_header(r)) {
@@ -2385,6 +2406,38 @@ static int send_header(void *data, const char *key, const char *val)
  }
 #endif
 
+AP_DECLARE(void) ap_set_std_response_headers(request_rec *r)
+{
+    const char *server = NULL, *date;
+    char *s;
+
+    /* Before generating a response, we make sure that `Date` and `Server`
+     * headers are present. When proxying requests, we preserver existing
+     * values and replace them otherwise.
+     */
+    if (r->proxyreq != PROXYREQ_NONE) {
+        date = apr_table_get(r->headers_out, "Date");
+        if (!date) {
+            s = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+            ap_recent_rfc822_date(s, r->request_time);
+            date = s;
+        }
+        server = apr_table_get(r->headers_out, "Server");
+    }
+    else {
+        s = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+        ap_recent_rfc822_date(s, r->request_time);
+        date = s;
+    }
+
+    apr_table_setn(r->headers_out, "Date", date);
+
+    if (!server)
+        server = ap_get_server_banner();
+    if (server && *server)
+        apr_table_setn(r->headers_out, "Server", server);
+}
+
 AP_DECLARE(void) ap_send_interim_response(request_rec *r, int send_headers)
 {
     hdr_ptr x;