--- /dev/null
+ *) mod_http2: v2.0.15 with the following fixes and improvements
+ - New directive 'H2EarlyHint name value' to add headers to a response,
+ picked up already when a "103 Early Hints" response is sent. 'name' and
+ 'value' must comply to the HTTP field restrictions.
+ This directive can be repeated several times and header fields of the
+ same names add. Sending a 'Link' header with 'preload' relation will
+ also cause a HTTP/2 PUSH if enabled and supported by the client.
+ - Fixed an issue where requests were not logged and accounted in a timely
+ fashion when the connection returns to "keepalive" handling, e.g. when
+ the request served was the last outstanding one.
+ This led to late appearance in access logs with wrong duration times
+ reported.
+ - Accurately report the bytes sent for a request in the '%O' Log format.
+ This addresses #203, a long outstanding issue where mod_h2 has reported
+ numbers over-eagerly from internal buffering and not what has actually
+ been placed on the connection.
+ The numbers are now the same with and without H2CopyFiles enabled.
+ [Stefan Eissing]
</usage>
</directivesynopsis>
+ <directivesynopsis>
+ <name>H2EarlyHint</name>
+ <description>Add a response header to be picked up in 103 Early Hints</description>
+ <syntax>H2EarlyHint <em>name</em> <em>value</em></syntax>
+ <contextlist>
+ <context>server config</context>
+ <context>virtual host</context>
+ <context>directory</context>
+ <context>.htaccess</context>
+ </contextlist>
+ <compatibility>Available in version 2.5.1 and later.</compatibility>
+
+ <usage>
+ <p>
+ <directive>H2EarlyHint</directive> allows adding a response
+ header before the real request processing is started. Such headers
+ are picked up for "103 Early Hints" intermediate responses. The main
+ purpose is to send "preload" information to client browsers.
+ </p><p>
+ <em>name</em> and <em>value</em> must be valid HTTP header fields
+ or will lead to failed responses. <directive>H2EarlyHints</directive>
+ must still be enabled to allow 103 intermediate responses to be sent.
+ This directive can be repeated several times and header fields of the
+ same names add.
+ </p>
+ <example><title>Example</title>
+ <highlight language="config">
+H2EarlyHint Link "</my.css>;rel=preload;as=style"
+ </highlight>
+ </example>
+ </usage>
+ </directivesynopsis>
+
</modulesynopsis>
#include <httpd.h>
#include <http_protocol.h>
+#include <http_request.h>
#include <http_log.h>
#include "h2_private.h"
* from sender thread only */
while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) {
b = H2_BLIST_FIRST(&beam->buckets_consumed);
+ if(AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_eor, b);
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+}
+
+static void purge_eor_buckets(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ /* delete all sender buckets in purge brigade, needs to be called
+ * from sender thread only */
+ while (!H2_BLIST_EMPTY(&beam->buckets_eor)) {
+ b = H2_BLIST_FIRST(&beam->buckets_eor);
apr_bucket_delete(b);
}
}
{
h2_bucket_beam *beam = data;
beam_shutdown(beam, APR_SHUTDOWN_READWRITE);
+ purge_eor_buckets(beam);
beam->pool = NULL; /* the pool is clearing now */
return APR_SUCCESS;
}
H2_BLIST_INIT(&beam->buckets_to_send);
H2_BLIST_INIT(&beam->buckets_consumed);
+ H2_BLIST_INIT(&beam->buckets_eor);
beam->tx_mem_limits = 1;
beam->max_buf_size = max_buf_size;
beam->timeout = timeout;
apr_pool_t *pool;
h2_blist buckets_to_send;
h2_blist buckets_consumed;
+ h2_blist buckets_eor;
apr_size_t max_buf_size;
apr_interval_time_t timeout;
APR_PROTO_TCP, pool);
}
+static void h2_c2_log_io(conn_rec *c2, apr_off_t bytes_sent)
+{
+ if (bytes_sent && h2_c_logio_add_bytes_out) {
+ h2_c_logio_add_bytes_out(c2, bytes_sent);
+ }
+}
+
void h2_c2_destroy(conn_rec *c2)
{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
"h2_c2(%s): destroy", c2->log_id);
+ if(!c2->aborted && conn_ctx && conn_ctx->bytes_sent) {
+ h2_c2_log_io(c2, conn_ctx->bytes_sent);
+ }
apr_pool_destroy(c2->pool);
}
AP_DEBUG_ASSERT(conn_ctx);
AP_DEBUG_ASSERT(conn_ctx->stream_id);
+ if(!c2->aborted && conn_ctx->bytes_sent) {
+ h2_c2_log_io(c2, conn_ctx->bytes_sent);
+ }
+
if (conn_ctx->beam_in) {
h2_beam_abort(conn_ctx->beam_in, from);
}
static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb)
{
- apr_off_t written, header_len = 0;
+ apr_off_t written = 0;
apr_status_t rv;
- if (h2_c_logio_add_bytes_out) {
- /* mod_logio wants to report the number of bytes written in a
- * response, including header and footer fields. Since h2 converts
- * those during c1 processing into the HPACKed h2 HEADER frames,
- * we need to give mod_logio something here and count just the
- * raw lengths of all headers in the buckets. */
- apr_bucket *b;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
-#if AP_HAS_RESPONSE_BUCKETS
- if (AP_BUCKET_IS_RESPONSE(b)) {
- header_len += (apr_off_t)response_length_estimate(b->data);
- }
- if (AP_BUCKET_IS_HEADERS(b)) {
- header_len += (apr_off_t)headers_length_estimate(b->data);
- }
-#else
- if (H2_BUCKET_IS_HEADERS(b)) {
- header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
- }
-#endif /* AP_HAS_RESPONSE_BUCKETS */
- }
- }
-
rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ, &written);
-
if (APR_STATUS_IS_EAGAIN(rv)) {
rv = APR_SUCCESS;
}
- if (written && h2_c_logio_add_bytes_out) {
- h2_c_logio_add_bytes_out(c2, written + header_len);
- }
return rv;
}
return rv;
}
-static void check_push(request_rec *r, const char *tag)
+static int addn_headers(void *udata, const char *name, const char *value)
{
- apr_array_header_t *push_list = h2_config_push_list(r);
+ apr_table_t *dest = udata;
+ apr_table_addn(dest, name, value);
+ return 1;
+}
- if (!r->expecting_100 && push_list && push_list->nelts > 0) {
- int i, old_status;
- const char *old_line;
+static void check_early_hints(request_rec *r, const char *tag)
+{
+ apr_array_header_t *push_list = h2_config_push_list(r);
+ apr_table_t *early_headers = h2_config_early_headers(r);
+
+ if (!r->expecting_100 &&
+ ((push_list && push_list->nelts > 0) ||
+ (early_headers && !apr_is_empty_table(early_headers)))) {
+ int have_hints = 0, i;
+
+ if (push_list && push_list->nelts > 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, push_list->nelts);
+ for (i = 0; i < push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ have_hints = 1;
+ }
+ if (early_headers && !apr_is_empty_table(early_headers)) {
+ apr_table_do(addn_headers, r->headers_out, early_headers, NULL);
+ have_hints = 1;
+ }
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "%s, early announcing %d resources for push",
- tag, push_list->nelts);
- for (i = 0; i < push_list->nelts; ++i) {
- h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
- apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
+ if (have_hints) {
+ int old_status;
+ const char *old_line;
+
+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 &&
+ h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
+ apr_table_setn(r->connection->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
}
- old_status = r->status;
- old_line = r->status_line;
- r->status = 103;
- r->status_line = "103 Early Hints";
- ap_send_interim_response(r, 1);
- r->status = old_status;
- r->status_line = old_line;
}
}
return DECLINED;
}
- check_push(r, "late_fixup");
+ check_early_hints(r, "late_fixup");
return DECLINED;
}
{
apr_bucket *b;
apr_status_t status;
-
h2_headers *response = h2_headers_create(parser->http_status,
make_table(parser),
- NULL, 0, parser->pool);
+ parser->c->notes,
+ 0, parser->pool);
apr_brigade_cleanup(parser->tmp);
b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
int push_diary_size; /* # of entries in push diary */
int copy_files; /* if files shall be copied vs setaside on output */
apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ apr_table_t *early_headers; /* HTTP headers for a 103 response */
int early_hints; /* support status code 103 */
int padding_bits;
int padding_always;
int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
int h2_push; /* if HTTP/2 server push is enabled */
apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ apr_table_t *early_headers; /* HTTP headers for a 103 response */
int early_hints; /* support status code 103 */
apr_interval_time_t stream_timeout;/* beam timeout */
} h2_dir_config;
256, /* push diary size */
0, /* copy files across threads */
NULL, /* push list */
+ NULL, /* early headers */
0, /* early hints, http status 103 */
0, /* padding bits */
1, /* padding always */
-1, /* HTTP/1 Upgrade support */
-1, /* HTTP/2 server push enabled */
NULL, /* push list */
+ NULL, /* early headers */
-1, /* early hints, http status 103 */
-1, /* beam timeout */
};
conf->push_diary_size = DEF_VAL;
conf->copy_files = DEF_VAL;
conf->push_list = NULL;
+ conf->early_headers = NULL;
conf->early_hints = DEF_VAL;
conf->padding_bits = DEF_VAL;
conf->padding_always = DEF_VAL;
else {
n->push_list = add->push_list? add->push_list : base->push_list;
}
+ if (add->early_headers && base->early_headers) {
+ n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers);
+ }
+ else {
+ n->early_headers = add->early_headers? add->early_headers : base->early_headers;
+ }
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
n->padding_always = H2_CONFIG_GET(add, base, padding_always);
else {
n->push_list = add->push_list? add->push_list : base->push_list;
}
+ if (add->early_headers && base->early_headers) {
+ n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers);
+ }
+ else {
+ n->early_headers = add->early_headers? add->early_headers : base->early_headers;
+ }
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
return n;
return sconf? sconf->push_list : NULL;
}
+apr_table_t *h2_config_early_headers(request_rec *r)
+{
+ const h2_config *sconf;
+ const h2_dir_config *conf = h2_config_rget(r);
+
+ if (conf && conf->early_headers) {
+ return conf->early_headers;
+ }
+ sconf = h2_config_sget(r->server);
+ return sconf? sconf->early_headers : NULL;
+}
+
const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
{
const h2_config *conf = h2_config_get(c);
return NULL;
}
+static const char *h2_conf_add_early_hint(cmd_parms *cmd, void *dirconf,
+ const char *name, const char *value)
+{
+ apr_table_t *hds, **phds;
+
+ if(!name || !*name)
+ return "Early Hint header name must not be empty";
+ if(!value)
+ return "Early Hint header value must not be empty";
+ while (apr_isspace(*value))
+ ++value;
+ if(!*value)
+ return "Early Hint header value must not be empty/only space";
+ if (*ap_scan_http_field_content(value))
+ return "Early Hint header value contains invalid characters";
+
+ if (cmd->path) {
+ phds = &((h2_dir_config*)dirconf)->early_headers;
+ }
+ else {
+ phds = &(h2_config_sget(cmd->server))->early_headers;
+ }
+ hds = *phds;
+ if (!hds) {
+ *phds = hds = apr_table_make(cmd->pool, 10);
+ }
+ apr_table_add(hds, name, value);
+
+ return NULL;
+}
+
static const char *h2_conf_set_early_hints(cmd_parms *cmd,
void *dirconf, const char *value)
{
RSRC_CONF, "set stream timeout"),
AP_INIT_TAKE1("H2MaxDataFrameLen", h2_conf_set_max_data_frame_len, NULL,
RSRC_CONF, "maximum number of bytes in a single HTTP/2 DATA frame"),
+ AP_INIT_TAKE2("H2EarlyHint", h2_conf_add_early_hint, NULL,
+ OR_FILEINFO|OR_AUTHCFG, "add a a 'Link:' header for a 103 Early Hints response."),
AP_END_CMD
};
apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
apr_array_header_t *h2_config_push_list(request_rec *r);
+apr_table_t *h2_config_early_headers(request_rec *r);
void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
int has_final_response; /* final HTTP response passed on out */
apr_status_t last_err; /* APR_SUCCES or last error encountered in filters */
+ apr_off_t bytes_sent; /* c2: bytes acutaly sent via c1 */
/* atomic */ apr_uint32_t started; /* c2: processing was started */
apr_time_t started_at; /* c2: when processing started */
/* atomic */ apr_uint32_t done; /* c2: processing has finished */
const apr_table_t *header, apr_pool_t *pool)
{
h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, headers->status, r,
+ "h2_headers_rcreate(%ld): status=%d",
+ (long)r->connection->id, status);
if (headers->status == HTTP_FORBIDDEN) {
request_rec *r_prev;
for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
return 0;
}
+static void c1_purge_streams(h2_mplx *m);
+
void h2_mplx_c1_destroy(h2_mplx *m)
{
apr_status_t status;
h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
-
+
+ c1_purge_streams(m);
+
m->c1->aborted = old_aborted;
H2_MPLX_LEAVE(m);
apr_array_clear(m->spurge);
}
+void h2_mplx_c1_going_keepalive(h2_mplx *m)
+{
+ H2_MPLX_ENTER_ALWAYS(m);
+ if (m->spurge->nelts) {
+ c1_purge_streams(m);
+ }
+ H2_MPLX_LEAVE(m);
+}
+
apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
stream_ev_callback *on_stream_input,
stream_ev_callback *on_stream_output,
*/
apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2);
+
+/**
+ * Session processing is entering KEEPALIVE, e.g. giving control
+ * to the MPM for monitoring incoming socket events only.
+ * Last chance for maintenance work before losing control.
+ */
+void h2_mplx_c1_going_keepalive(h2_mplx *m);
+
#define H2_MPLX_MSG(m, msg) \
"h2_mplx(%d-%lu): "msg, m->child_num, (unsigned long)m->id
const char *cl;
r->headers_in = apr_table_clone(r->pool, req->headers);
+
if (req->authority) {
/* for internal handling, we have to simulate that :authority
* came in as Host:, RFC 9113 ch. says that mismatches between
/* Time to populate r with the data we have. */
r->request_time = req->request_time;
AP_DEBUG_ASSERT(req->authority);
- if (req->scheme && (ap_cstr_casecmp(req->scheme,
- ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
- || !ap_cstr_casecmp("CONNECT", req->method))) {
- /* Client sent a non-matching ':scheme' pseudo header. Forward this
- * via an absolute URI in the request line.
- */
+ if (!apr_strnatcasecmp("CONNECT", req->method)) {
+ /* CONNECT MUST NOT have scheme or path */
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ req->method, req->authority);
+ }
+ else if (req->scheme && ap_cstr_casecmp(req->scheme, "http")
+ && ap_cstr_casecmp(req->scheme, "https")) {
+ /* Client sent a ':scheme' pseudo header for something else
+ * than what we handle by default. Make an absolute URI. */
r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0",
req->method, req->scheme, req->authority,
req->path ? req->path : "");
r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
req->method, req->path);
}
- else if (!apr_strnatcasecmp("CONNECT", req->method)) {
- /* CONNECT MUST NOT have scheme or path */
- r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
- req->method, req->authority);
- }
else {
/* We should only come here on a request that is errored already.
* create a request line that passes parsing, we'll die anyway.
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
H2_SSSN_MSG(session, "process returns"));
}
-
+ h2_mplx_c1_going_keepalive(session->mplx);
+
if (session->state == H2_SESSION_ST_DONE) {
if (session->local.error) {
char buffer[128];
++stream->out_frames;
stream->out_frame_octets += frame_len;
+ if(stream->c2) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ if(conn_ctx)
+ conn_ctx->bytes_sent = stream->out_frame_octets;
+ }
+
switch (ftype) {
case NGHTTP2_DATA:
eos = (flags & NGHTTP2_FLAG_END_STREAM);
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "2.0.13"
+#define MOD_HTTP2_VERSION "2.0.15"
/**
* @macro
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x02000d
+#define MOD_HTTP2_VERSION_NUM 0x02000f
#endif /* mod_h2_h2_version_h */
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup)))
- self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"])
+ self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl", "include"])
def make(self):
super().make()
--- /dev/null
+Hello include<br>
--- /dev/null
+<!doctype html>
+<html>
+<head><meta charset="UTF-8"></head>
+<body>
+ test<br>
+ <!--#include virtual="./include.inc"-->
+ hello<br>
+</body>
+</html>
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http1.1", "-H", "Expect: 100-continue"])
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http2", "-H", "Expect: 100-continue"])
- @pytest.mark.skipif(True, reason="python3 regresses in chunked inputs to cgi")
def test_h2_004_06(self, env):
- self.curl_upload_and_verify(env, "data-1k", ["--http1.1", "-H", "Content-Length: "])
- self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length: "])
+ self.curl_upload_and_verify(env, "data-1k", [
+ "--http1.1", "-H", "Content-Length:", "-H", "Transfer-Encoding: chunked"
+ ])
+ self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length:"])
@pytest.mark.parametrize("name, value", [
("HTTP2", "on"),
def test_h2_004_25(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, ["--no-content-length"])
- def test_h2_004_30(self, env):
- # issue: #203
- resource = "data-1k"
- full_length = 1000
- chunk = 200
- self.curl_upload_and_verify(env, resource, ["-v", "--http2"])
- logfile = os.path.join(env.server_logs_dir, "test_004_30")
- if os.path.isfile(logfile):
- os.remove(logfile)
- H2Conf(env).add("""
-LogFormat "{ \\"request\\": \\"%r\\", \\"status\\": %>s, \\"bytes_resp_B\\": %B, \\"bytes_tx_O\\": %O, \\"bytes_rx_I\\": %I, \\"bytes_rx_tx_S\\": %S }" issue_203
-CustomLog logs/test_004_30 issue_203
- """).add_vhost_cgi().install()
- assert env.apache_restart() == 0
- url = env.mkurl("https", "cgi", "/files/{0}".format(resource))
- r = env.curl_get(url, 5, options=["--http2"])
- assert r.response["status"] == 200
- r = env.curl_get(url, 5, options=["--http1.1", "-H", "Range: bytes=0-{0}".format(chunk-1)])
- assert 206 == r.response["status"]
- assert chunk == len(r.response["body"].decode('utf-8'))
- r = env.curl_get(url, 5, options=["--http2", "-H", "Range: bytes=0-{0}".format(chunk-1)])
- assert 206 == r.response["status"]
- assert chunk == len(r.response["body"].decode('utf-8'))
- # Wait for log completeness
- time.sleep(1)
- # now check what response lengths have actually been reported
- lines = open(logfile).readlines()
- log_h2_full = json.loads(lines[-3])
- log_h1 = json.loads(lines[-2])
- log_h2 = json.loads(lines[-1])
- assert log_h2_full['bytes_rx_I'] > 0
- assert log_h2_full['bytes_resp_B'] == full_length
- assert log_h2_full['bytes_tx_O'] > full_length
- assert log_h1['bytes_rx_I'] > 0 # input bytes received
- assert log_h1['bytes_resp_B'] == chunk # response bytes sent (payload)
- assert log_h1['bytes_tx_O'] > chunk # output bytes sent
- assert log_h2['bytes_rx_I'] > 0
- assert log_h2['bytes_resp_B'] == chunk
- assert log_h2['bytes_tx_O'] > chunk
-
def test_h2_004_40(self, env):
# echo content using h2test_module "echo" handler
def post_and_verify(fname, options=None):
--- /dev/null
+import re
+import pytest
+
+from .env import H2Conf, H2TestEnv
+
+
+@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
+class TestSSI:
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env):
+ conf = H2Conf(env, extras={
+ f'cgi.{env.http_tld}': [
+ 'AddOutputFilter INCLUDES .html',
+ '<Location "/ssi">',
+ ' Options +Includes',
+ '</Location>',
+ ],
+ })
+ conf.add_vhost_cgi(
+ proxy_self=True, h2proxy_self=True
+ ).add_vhost_test1(
+ proxy_self=True, h2proxy_self=True
+ ).install()
+ assert env.apache_restart() == 0
+
+ # SSI test from https://bz.apache.org/bugzilla/show_bug.cgi?id=66483
+ def test_h2_007_01(self, env):
+ url = env.mkurl("https", "cgi", "/ssi/test.html")
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ assert r.stdout == '''<!doctype html>
+<html>
+<head><meta charset="UTF-8"></head>
+<body>
+ test<br>
+ Hello include<br>
+
+ hello<br>
+</body>
+</html>
+''' , f'{r}'
+
--- /dev/null
+import inspect
+import json
+import os
+import pytest
+
+from .env import H2Conf, H2TestEnv
+
+
+@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
+class TestRanges:
+
+ LOGFILE = ""
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env):
+ TestRanges.LOGFILE = os.path.join(env.server_logs_dir, "test_008")
+ TestRanges.SRCDIR = os.path.dirname(inspect.getfile(TestRanges))
+ if os.path.isfile(TestRanges.LOGFILE):
+ os.remove(TestRanges.LOGFILE)
+ destdir = os.path.join(env.gen_dir, 'apache/htdocs/test1')
+ env.make_data_file(indir=destdir, fname="data-100m", fsize=100*1024*1024)
+ conf = H2Conf(env=env)
+ conf.add([
+ "CustomLog logs/test_008 combined"
+ ])
+ conf.add_vhost_cgi()
+ conf.add_vhost_test1()
+ conf.install()
+ assert env.apache_restart() == 0
+
+ def test_h2_008_01(self, env):
+ # issue: #203
+ resource = "data-1k"
+ full_length = 1000
+ chunk = 200
+ self.curl_upload_and_verify(env, resource, ["-v", "--http2"])
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", f"/files/{resource}?01full")
+ r = env.curl_get(url, 5, options=["--http2"])
+ assert r.response["status"] == 200
+ url = env.mkurl("https", "cgi", f"/files/{resource}?01range")
+ r = env.curl_get(url, 5, options=["--http1.1", "-H", "Range: bytes=0-{0}".format(chunk-1)])
+ assert 206 == r.response["status"]
+ assert chunk == len(r.response["body"].decode('utf-8'))
+ r = env.curl_get(url, 5, options=["--http2", "-H", "Range: bytes=0-{0}".format(chunk-1)])
+ assert 206 == r.response["status"]
+ assert chunk == len(r.response["body"].decode('utf-8'))
+ # Restart for logs to be flushed out
+ assert env.apache_restart() == 0
+ # now check what response lengths have actually been reported
+ detected = {}
+ for line in open(TestRanges.LOGFILE).readlines():
+ e = json.loads(line)
+ if e['request'] == f'GET /files/{resource}?01full HTTP/2.0':
+ assert e['bytes_rx_I'] > 0
+ assert e['bytes_resp_B'] == full_length
+ assert e['bytes_tx_O'] > full_length
+ detected['h2full'] = 1
+ elif e['request'] == f'GET /files/{resource}?01range HTTP/2.0':
+ assert e['bytes_rx_I'] > 0
+ assert e['bytes_resp_B'] == chunk
+ assert e['bytes_tx_O'] > chunk
+ assert e['bytes_tx_O'] < chunk + 256 # response + frame stuff
+ detected['h2range'] = 1
+ elif e['request'] == f'GET /files/{resource}?01range HTTP/1.1':
+ assert e['bytes_rx_I'] > 0 # input bytes received
+ assert e['bytes_resp_B'] == chunk # response bytes sent (payload)
+ assert e['bytes_tx_O'] > chunk # output bytes sent
+ detected['h1range'] = 1
+ assert 'h1range' in detected, f'HTTP/1.1 range request not found in {TestRanges.LOGFILE}'
+ assert 'h2range' in detected, f'HTTP/2 range request not found in {TestRanges.LOGFILE}'
+ assert 'h2full' in detected, f'HTTP/2 full request not found in {TestRanges.LOGFILE}'
+
+ def test_h2_008_02(self, env, repeat):
+ path = '/002.jpg'
+ res_len = 90364
+ url = env.mkurl("https", "test1", f'{path}?02full')
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ assert "HTTP/2" == r.response["protocol"]
+ h = r.response["header"]
+ assert "accept-ranges" in h
+ assert "bytes" == h["accept-ranges"]
+ assert "content-length" in h
+ clen = h["content-length"]
+ assert int(clen) == res_len
+ # get the first 1024 bytes of the resource, 206 status, but content-length as original
+ url = env.mkurl("https", "test1", f'{path}?02range')
+ r = env.curl_get(url, 5, options=["-H", "range: bytes=0-1023"])
+ assert 206 == r.response["status"]
+ assert "HTTP/2" == r.response["protocol"]
+ assert 1024 == len(r.response["body"])
+ assert "content-length" in h
+ assert clen == h["content-length"]
+ # Restart for logs to be flushed out
+ assert env.apache_restart() == 0
+ # now check what response lengths have actually been reported
+ found = False
+ for line in open(TestRanges.LOGFILE).readlines():
+ e = json.loads(line)
+ if e['request'] == f'GET {path}?02range HTTP/2.0':
+ assert e['bytes_rx_I'] > 0
+ assert e['bytes_resp_B'] == 1024
+ assert e['bytes_tx_O'] > 1024
+ assert e['bytes_tx_O'] < 1024 + 256 # response and frame stuff
+ found = True
+ break
+ assert found, f'request not found in {self.LOGFILE}'
+
+ # send a paced curl download that aborts in the middle of the transfer
+ def test_h2_008_03(self, env, repeat):
+ path = '/data-100m'
+ url = env.mkurl("https", "test1", f'{path}?03broken')
+ r = env.curl_get(url, 5, options=[
+ '--limit-rate', '2k', '-m', '2'
+ ])
+ assert r.exit_code != 0, f'{r}'
+ found = False
+ for line in open(TestRanges.LOGFILE).readlines():
+ e = json.loads(line)
+ if e['request'] == f'GET {path}?03broken HTTP/2.0':
+ assert e['bytes_rx_I'] > 0
+ assert e['bytes_resp_B'] == 100*1024*1024
+ assert e['bytes_tx_O'] > 1024
+ assert e['bytes_tx_O'] < 5*1024*1024 # curl buffers, but not that much
+ found = True
+ break
+ assert found, f'request not found in {self.LOGFILE}'
+
+ # upload and GET again using curl, compare to original content
+ def curl_upload_and_verify(self, env, fname, options=None):
+ url = env.mkurl("https", "cgi", "/upload.py")
+ fpath = os.path.join(env.gen_dir, fname)
+ r = env.curl_upload(url, fpath, options=options)
+ assert r.exit_code == 0, f"{r}"
+ assert 200 <= r.response["status"] < 300
+
+ r2 = env.curl_get(r.response["header"]["location"])
+ assert r2.exit_code == 0
+ assert r2.response["status"] == 200
+ with open(os.path.join(TestRanges.SRCDIR, fpath), mode='rb') as file:
+ src = file.read()
+ assert src == r2.response["body"]
+
--- /dev/null
+import inspect
+import json
+import os
+import pytest
+
+from .env import H2Conf, H2TestEnv
+
+
+@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
+class TestTiming:
+
+ LOGFILE = ""
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env):
+ TestTiming.LOGFILE = os.path.join(env.server_logs_dir, "test_009")
+ if os.path.isfile(TestTiming.LOGFILE):
+ os.remove(TestTiming.LOGFILE)
+ conf = H2Conf(env=env)
+ conf.add([
+ "CustomLog logs/test_009 combined"
+ ])
+ conf.add_vhost_cgi()
+ conf.add_vhost_test1()
+ conf.install()
+ assert env.apache_restart() == 0
+
+ # check that we get a positive time_taken reported on a simple GET
+ def test_h2_009_01(self, env):
+ path = '/002.jpg'
+ url = env.mkurl("https", "test1", f'{path}?01')
+ args = [
+ env.h2load, "-n", "1", "-c", "1", "-m", "1",
+ f"--connect-to=localhost:{env.https_port}",
+ f"--base-uri={url}", url
+ ]
+ r = env.run(args)
+ # Restart for logs to be flushed out
+ assert env.apache_restart() == 0
+ found = False
+ for line in open(TestTiming.LOGFILE).readlines():
+ e = json.loads(line)
+ if e['request'] == f'GET {path}?01 HTTP/2.0':
+ assert e['time_taken'] > 0
+ found = True
+ assert found, f'request not found in {TestTiming.LOGFILE}'
+
+ # test issue #253, where time_taken in a keepalive situation is not
+ # reported until the next request arrives
+ def test_h2_009_02(self, env):
+ baseurl = env.mkurl("https", "test1", '/')
+ tscript = os.path.join(env.gen_dir, 'h2load-timing-009_02')
+ with open(tscript, 'w') as fd:
+ fd.write('\n'.join([
+ f'0.0\t/002.jpg?02a', # 1st request right away
+ f'1000.0\t/002.jpg?02b', # 2nd a second later
+ ]))
+ args = [
+ env.h2load,
+ f'--timing-script-file={tscript}',
+ f"--connect-to=localhost:{env.https_port}",
+ f"--base-uri={baseurl}"
+ ]
+ r = env.run(args)
+ # Restart for logs to be flushed out
+ assert env.apache_restart() == 0
+ found = False
+ for line in open(TestTiming.LOGFILE).readlines():
+ e = json.loads(line)
+ if e['request'] == f'GET /002.jpg?02a HTTP/2.0':
+ assert e['time_taken'] > 0
+ assert e['time_taken'] < 500 * 1000, f'time for 1st request not reported correctly'
+ found = True
+ assert found, f'request not found in {TestTiming.LOGFILE}'
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert r.response is None
- if r.response is not None:
- assert r.response["status"] == 400
url = env.mkurl("https", "cgi", "/proxy/hello.py")
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert env.apache_restart() == 0
url = env.mkurl("https", "test1", "/index.html")
r = env.curl_get(url, options=['--http2'])
+ if status == 500 and r.exit_code != 0:
+ # in 2.4.x we fail late on control chars in a response
+ # and RST_STREAM. That's also ok
+ return
assert r.response["status"] == status
if int(status) < 400:
assert r.response["header"][hname] == expvalue
<Location /006-nohints.html>
Header add Link "</006/006.css>;rel=preload"
</Location>
+ <Location /006-early.html>
+ H2EarlyHint Link "</006/006.css>;rel=preload;as=style"
+ </Location>
+ <Location /006-early-no-push.html>
+ H2Push off
+ H2EarlyHint Link "</006/006.css>;rel=preload;as=style"
+ </Location>
""").end_vhost(
).install()
assert env.apache_restart() == 0
promises = r.results["streams"][r.response["id"]]["promises"]
assert 1 == len(promises)
assert "previous" not in r.response
+
+ # H2EarlyHints enabled in general, check that it works for H2EarlyHint
+ def test_h2_401_33(self, env, repeat):
+ url = env.mkurl("https", "hints", "/006-early.html")
+ r = env.nghttp().get(url)
+ assert r.response["status"] == 200
+ promises = r.results["streams"][r.response["id"]]["promises"]
+ assert 1 == len(promises)
+ early = r.response["previous"]
+ assert early
+ assert 103 == int(early["header"][":status"])
+ assert early["header"]["link"] == '</006/006.css>;rel=preload;as=style'
+
+ # H2EarlyHints enabled, no PUSH, check that it works for H2EarlyHint
+ def test_h2_401_34(self, env, repeat):
+ url = env.mkurl("https", "hints", "/006-early-no-push.html")
+ r = env.nghttp().get(url)
+ assert r.response["status"] == 200
+ promises = r.results["streams"][r.response["id"]]["promises"]
+ assert 0 == len(promises)
+ early = r.response["previous"]
+ assert early
+ assert 103 == int(early["header"][":status"])
+ assert early["header"]["link"] == '</006/006.css>;rel=preload;as=style'
+
self.curl_upload_and_verify(env, "data-100k", ["--http2"])
self.curl_upload_and_verify(env, "data-1m", ["--http2"])
+ def test_h2_500_11(self, env):
+ self.curl_upload_and_verify(env, "data-1k", [
+ "--http1.1", "-H", "Content-Length:", "-H", "Transfer-Encoding: chunked"
+ ])
+ self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length:"])
+
# POST some data using nghttp and see it echo'ed properly back
def nghttp_post_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/proxy/echo.py")
ServerName localhost
ServerRoot "${server_dir}"
+DefaultRuntimeDir logs
+PidFile httpd.pid
+
Include "conf/modules.conf"
DocumentRoot "${server_dir}/htdocs"
<IfModule log_config_module>
- LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %k" combined
+ LogFormat "{ \"request\": \"%r\", \"status\": %>s, \"bytes_resp_B\": %B, \"bytes_tx_O\": %O, \"bytes_rx_I\": %I, \"bytes_rx_tx_S\": %S, \"time_taken\": %D }" combined
LogFormat "%h %l %u %t \"%r\" %>s %b" common
CustomLog "logs/access_log" combined
if HttpdTestEnv.LIBEXEC_DIR is None:
HttpdTestEnv.LIBEXEC_DIR = self._libexec_dir = self.get_apxs_var('LIBEXECDIR')
self._curl = self.config.get('global', 'curl_bin')
+ if 'CURL' in os.environ:
+ self._curl = os.environ['CURL']
self._nghttp = self.config.get('global', 'nghttp')
if self._nghttp is None:
self._nghttp = 'nghttp'
}
run.add_results({"h2load": stats})
return run
+
+ def make_data_file(self, indir: str, fname: str, fsize: int) -> str:
+ fpath = os.path.join(indir, fname)
+ s10 = "0123456789"
+ s = (101 * s10) + s10[0:3]
+ with open(fpath, 'w') as fd:
+ for i in range(int(fsize / 1024)):
+ fd.write(f"{i:09d}-{s}\n")
+ remain = int(fsize % 1024)
+ if remain != 0:
+ i = int(fsize / 1024) + 1
+ s = f"{i:09d}-{s}\n"
+ fd.write(s[0:remain])
+ return fpath
+