modules/http2/h2_alt_svc.c modules/http2/h2_bucket_eoc.c
modules/http2/h2_bucket_eos.c modules/http2/h2_config.c
modules/http2/h2_conn.c modules/http2/h2_conn_io.c
- modules/http2/h2_ctx.c modules/http2/h2_from_h1.c
- modules/http2/h2_h2.c modules/http2/h2_io.c
- modules/http2/h2_io_set.c modules/http2/h2_mplx.c
- modules/http2/h2_push.c modules/http2/h2_request.c
- modules/http2/h2_response.c modules/http2/h2_session.c
- modules/http2/h2_stream.c modules/http2/h2_stream_set.c
- modules/http2/h2_switch.c modules/http2/h2_task.c
- modules/http2/h2_task_input.c modules/http2/h2_task_output.c
- modules/http2/h2_task_queue.c modules/http2/h2_util.c
- modules/http2/h2_worker.c modules/http2/h2_workers.c
+ modules/http2/h2_ctx.c modules/http2/h2_filter.c
+ modules/http2/h2_from_h1.c modules/http2/h2_h2.c
+ modules/http2/h2_io.c modules/http2/h2_io_set.c
+ modules/http2/h2_mplx.c modules/http2/h2_push.c
+ modules/http2/h2_request.c modules/http2/h2_response.c
+ modules/http2/h2_session.c modules/http2/h2_stream.c
+ modules/http2/h2_stream_set.c modules/http2/h2_switch.c
+ modules/http2/h2_task.c modules/http2/h2_task_input.c
+ modules/http2/h2_task_output.c modules/http2/h2_task_queue.c
+ modules/http2/h2_util.c modules/http2/h2_worker.c
+ modules/http2/h2_workers.c
)
SET(mod_ldap_extra_defines LDAP_DECLARE_EXPORT)
SET(mod_ldap_extra_libs wldap32)
</summary>
+ <section id="envvars"><title>Environment Variables</title>
+
+ <p>This module can be configured to provide HTTP/2 related information
+ as additional environment variables to the SSI and CGI namespace.
+ </p>
+
+ <table border="1">
+ <columnspec><column width=".3"/><column width=".2"/><column width=".5"/>
+ </columnspec>
+ <tr>
+ <th><a name="table3">Variable Name:</a></th>
+ <th>Value Type:</th>
+ <th>Description:</th>
+ </tr>
+ <tr><td><code>HTTPe</code></td> <td>flag</td> <td>HTTP/2 is being used.</td></tr>
+ <tr><td><code>H2PUSH</code></td> <td>flag</td> <td>HTTP/2 Server Push is enabled for this request and also supported by the client.</td></tr>
+ </table>
+
+ </section>
+
<directivesynopsis>
<name>H2Direct</name>
<description>H2 Direct Protocol Switch</description>
</usage>
</directivesynopsis>
+ <directivesynopsis>
+ <name>H2PushDiarySize</name>
+ <description>H2 Server Push Diary Size</description>
+ <syntax>H2PushDiarySize n</syntax>
+ <default>H2PushDiarySize 256</default>
+ <contextlist>
+ <context>server config</context>
+ <context>virtual host</context>
+ </contextlist>
+ <compatibility>Available in version 2.4.19 and later.</compatibility>
+
+ <usage>
+ <p>
+ This directive toggles the maximum number of HTTP/2 server pushes
+ that are remembered per HTTP/2 connection. This can be used inside the
+ <directive module="core" type="section">VirtualHost</directive>
+ section to influence the number for all connections to that virtual host.
+ </p>
+ <p>
+ The push diary records a digest (currently using a 64 bit number) of pushed
+ resources (their URL) to avoid duplicate pushes on the same connection.
+ These value are not persisted, so clients openeing a new connection
+ will experience known pushes again. There is ongoing work to enable
+ a client to disclose a digest of the resources it already has, so
+ the diary maybe initialized by the client on each connection setup.
+ </p>
+ <p>
+ If the maximum size is reached, newer entries replace the oldest
+ ones. A diary entry uses 8 bytes, letting a
+ default diary with 256 entries consume around 2 KB of memory.
+ </p>
+ <p>
+ A size of 0 will effectively disable the push diary.
+ </p>
+ </usage>
+ </directivesynopsis>
+
<directivesynopsis>
<name>H2PushPriority</name>
<description>H2 Server Push Priority</description>
<name>H2WindowSize</name>
<description>Size of Stream Window for upstream data.</description>
<syntax>H2WindowSize <em>bytes</em></syntax>
- <default>H2WindowSize 65536</default>
+ <default>H2WindowSize 65535</default>
<contextlist>
<context>server config</context>
<context>virtual host</context>
<name>H2SessionExtraFiles</name>
<description>Number of Extra File Handles</description>
<syntax>H2SessionExtraFiles <em>n</em></syntax>
- <default>H2SessionExtraFiles 5</default>
<contextlist>
<context>server config</context>
<context>virtual host</context>
H2SessionExtraFiles 10
</highlight>
</example>
+ <p>
+ If nothing is configured, the module tries to make a conservative
+ guess how many files are safe to use. This depends largely on the
+ MPM chosen.
+ </p>
</usage>
</directivesynopsis>
</usage>
</directivesynopsis>
+ <directivesynopsis>
+ <name>H2Timeout</name>
+ <description>Timeout (in seconds) for HTTP/2 connections</description>
+ <syntax>H2Timeout seconds</syntax>
+ <default>H2Timeout 5</default>
+ <contextlist>
+ <context>server config</context>
+ <context>virtual host</context>
+ </contextlist>
+ <compatibility>Available in version 2.4.19 and later.</compatibility>
+
+ <usage>
+ <p>
+ This directive sets the timeout for read/write operations on
+ connections where HTTP/2 is negotiated. This can be used server wide or for specific
+ <directive module="core" type="section">VirtualHost</directive>s.
+ </p>
+ <p>
+ This directive is similar to the
+ <directive module="core" type="section">Timeout</directive>, but
+ applies only to HTTP/2 connections.
+ </p>
+ <p>
+ A value of 0 enforces no timeout.
+ </p>
+ </usage>
+ </directivesynopsis>
+
+ <directivesynopsis>
+ <name>H2KeepAliveTimeout</name>
+ <description>Timeout (in seconds) for idle HTTP/2 connections</description>
+ <syntax>H2KeepAliveTimeout seconds</syntax>
+ <contextlist>
+ <context>server config</context>
+ <context>virtual host</context>
+ </contextlist>
+ <compatibility>Available in version 2.4.19 and later.</compatibility>
+
+ <usage>
+ <p>
+ This directive sets the timeout for read/write operations on
+ idle connections where HTTP/2 is negotiated. This can be used server wide or for specific
+ <directive module="core" type="section">VirtualHost</directive>s.
+ </p>
+ <p>
+ This directive is similar to the
+ <directive module="core" type="section">KeepAliveTimeout</directive>, but
+ applies only to HTTP/2 connections. A HTTP/2 connection is considered
+ idle when no streams are open, e.g. no requests are ongoing.
+ </p>
+ <p>
+ By default, for non-async MPMs (prefork, worker) the keepalive timeout
+ will be the same as H2Timeout. For async MPMs, the keepalive handling for
+ HTTP/1 connections applies as no special action is taken.
+ </p>
+ </usage>
+ </directivesynopsis>
+
+ <directivesynopsis>
+ <name>H2StreamTimeout</name>
+ <description>Timeout (in seconds) for idle HTTP/2 connections</description>
+ <syntax>H2StreamTimeout seconds</syntax>
+ <default>H2StreamTimeout 0</default>
+ <contextlist>
+ <context>server config</context>
+ <context>virtual host</context>
+ </contextlist>
+ <compatibility>Available in version 2.4.19 and later.</compatibility>
+
+ <usage>
+ <p>
+ This directive sets the timeout for read/write operations on
+ HTTP/2 streams, e.g. individual requests. This can be used server wide or for specific
+ <directive module="core" type="section">VirtualHost</directive>s.
+ </p>
+ <p>
+ Due to the nature of HTTP/2, which sends multiple requests over a single
+ connection and has priority scheduling, individual streams might not
+ see input for much longer times than HTTP/1.1 requests would.
+ </p>
+ <p>
+ A value of 0 enforces no timeout, so could wait on chances to receive
+ input or write data indefinitely. This expose a server to
+ risks of thread exhaustion.
+ </p>
+ <p>
+ Depending on your handling of pushed streams,
+ priorities and general responsiveness, a site might need to increase
+ this value. For example, if you PUSH a large resource <em>before</em>
+ the requested one, the initial stream will not write until the
+ pushed resource is fully sent.
+ </p>
+ </usage>
+ </directivesynopsis>
+
</modulesynopsis>
h2_conn.lo dnl
h2_conn_io.lo dnl
h2_ctx.lo dnl
+h2_filter.lo dnl
h2_from_h1.lo dnl
h2_h2.lo dnl
h2_io.lo dnl
handling. Implemented by mod_http2. This module requires a libnghttp2 installation.
See --with-nghttp2 on how to manage non-standard locations. This module
is usually linked shared and requires loading. ], $http2_objs, , most, [
+ APACHE_CHECK_OPENSSL
+ if test "$ac_cv_openssl" = "yes" ; then
+ APR_ADDTO(MOD_CPPFLAGS, ["-DH2_OPENSSL"])
+ fi
+
APACHE_CHECK_NGHTTP2
if test "$ac_cv_nghttp2" = "yes" ; then
if test "x$enable_http2" = "xshared"; then
fi
])
+# Ensure that other modules can pick up mod_http2.h
+APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+
dnl # end of module specific part
APACHE_MODPATH_FINISH
* - do not percent encode token values
* - do not use quotation marks
*/
-h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool) {
+h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool)
+{
const char *sep = ap_strchr_c(s, '=');
if (sep) {
- const char *alpn = apr_pstrndup(pool, s, sep - s);
+ const char *alpn = apr_pstrmemdup(pool, s, sep - s);
const char *host = NULL;
int port = 0;
s = sep + 1;
sep = ap_strchr_c(s, ':'); /* mandatory : */
if (sep) {
if (sep != s) { /* optional host */
- host = apr_pstrndup(pool, s, sep - s);
+ host = apr_pstrmemdup(pool, s, sep - s);
}
s = sep + 1;
if (*s) { /* must be a port number */
static int h2_alt_svc_handler(request_rec *r)
{
- h2_ctx *ctx;
const h2_config *cfg;
int i;
return DECLINED;
}
- ctx = h2_ctx_rget(r);
- if (h2_ctx_is_active(ctx) || h2_ctx_is_task(ctx)) {
+ if (h2_ctx_rget(r)) {
return DECLINED;
}
}
}
if (*alt_svc) {
- apr_table_set(r->headers_out, "Alt-Svc", alt_svc);
+ apr_table_setn(r->headers_out, "Alt-Svc", alt_svc);
}
}
}
return DECLINED;
}
-
1, /* TLS cooldown secs */
1, /* HTTP/2 server push enabled */
NULL, /* map of content-type to priorities */
+ -1, /* connection timeout */
+ -1, /* keepalive timeout */
+ 0, /* stream timeout */
+ 256, /* push diary size */
+
};
-static int files_per_session = 0;
-
-void h2_config_init(apr_pool_t *pool) {
- /* Determine a good default for this platform and mpm?
- * TODO: not sure how APR wants to hand out this piece of
- * information.
- */
- int max_files = 256;
- int conn_threads = 1;
- int tx_files = max_files / 4;
-
+void h2_config_init(apr_pool_t *pool)
+{
(void)pool;
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &conn_threads);
- switch (h2_conn_mpm_type()) {
- case H2_MPM_PREFORK:
- case H2_MPM_WORKER:
- case H2_MPM_EVENT:
- /* allow that many transfer open files per mplx */
- files_per_session = (tx_files / conn_threads);
- break;
- default:
- /* don't know anything about it, stay safe */
- break;
- }
}
static void *h2_config_create(apr_pool_t *pool,
h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
const char *s = x? x : "unknown";
- char *name = apr_pcalloc(pool, strlen(prefix) + strlen(s) + 20);
- strcpy(name, prefix);
- strcat(name, "[");
- strcat(name, s);
- strcat(name, "]");
+ char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
conf->name = name;
conf->h2_max_streams = DEF_VAL;
conf->tls_cooldown_secs = DEF_VAL;
conf->h2_push = DEF_VAL;
conf->priorities = NULL;
+ conf->h2_timeout = DEF_VAL;
+ conf->h2_keepalive = DEF_VAL;
+ conf->h2_stream_timeout = DEF_VAL;
+ conf->push_diary_size = DEF_VAL;
return conf;
}
h2_config *add = (h2_config *)addv;
h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
- char *name = apr_pcalloc(pool, 20 + strlen(add->name) + strlen(base->name));
- strcpy(name, "merged[");
- strcat(name, add->name);
- strcat(name, ", ");
- strcat(name, base->name);
- strcat(name, "]");
+ char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
n->name = name;
n->h2_max_streams = H2_CONFIG_GET(add, base, h2_max_streams);
else {
n->priorities = add->priorities? add->priorities : base->priorities;
}
+ n->h2_timeout = H2_CONFIG_GET(add, base, h2_timeout);
+ n->h2_keepalive = H2_CONFIG_GET(add, base, h2_keepalive);
+ n->h2_stream_timeout = H2_CONFIG_GET(add, base, h2_stream_timeout);
+ n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);
return n;
}
apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
{
- int n;
switch(var) {
case H2_CONF_MAX_STREAMS:
return H2_CONFIG_GET(conf, &defconf, h2_max_streams);
case H2_CONF_DIRECT:
return H2_CONFIG_GET(conf, &defconf, h2_direct);
case H2_CONF_SESSION_FILES:
- n = H2_CONFIG_GET(conf, &defconf, session_extra_files);
- if (n < 0) {
- n = files_per_session;
- }
- return n;
+ return H2_CONFIG_GET(conf, &defconf, session_extra_files);
case H2_CONF_TLS_WARMUP_SIZE:
return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
case H2_CONF_TLS_COOLDOWN_SECS:
return H2_CONFIG_GET(conf, &defconf, tls_cooldown_secs);
case H2_CONF_PUSH:
return H2_CONFIG_GET(conf, &defconf, h2_push);
+ case H2_CONF_TIMEOUT_SECS:
+ return H2_CONFIG_GET(conf, &defconf, h2_timeout);
+ case H2_CONF_KEEPALIVE_SECS:
+ return H2_CONFIG_GET(conf, &defconf, h2_keepalive);
+ case H2_CONF_STREAM_TIMEOUT_SECS:
+ return H2_CONFIG_GET(conf, &defconf, h2_stream_timeout);
+ case H2_CONF_PUSH_DIARY_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, push_diary_size);
default:
return DEF_VAL;
}
cfg->h2_window_size = (int)apr_atoi64(value);
(void)arg;
if (cfg->h2_window_size < 1024) {
- return "value must be > 1k";
+ return "value must be >= 1024";
}
return NULL;
}
cfg->min_workers = (int)apr_atoi64(value);
(void)arg;
if (cfg->min_workers < 1) {
- return "value must be > 1";
+ return "value must be > 0";
}
return NULL;
}
cfg->max_workers = (int)apr_atoi64(value);
(void)arg;
if (cfg->max_workers < 1) {
- return "value must be > 1";
+ return "value must be > 0";
}
return NULL;
}
cfg->max_worker_idle_secs = (int)apr_atoi64(value);
(void)arg;
if (cfg->max_worker_idle_secs < 1) {
- return "value must be > 1";
+ return "value must be > 0";
}
return NULL;
}
cfg->stream_max_mem_size = (int)apr_atoi64(value);
(void)arg;
if (cfg->stream_max_mem_size < 1024) {
- return "value must be > 1k";
+ return "value must be >= 1024";
}
return NULL;
}
return NULL;
}
+static const char *h2_conf_set_timeout(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->h2_timeout = (int)apr_atoi64(value);
+ if (cfg->h2_timeout < 0) {
+ return "value must be >= 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_keepalive(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->h2_keepalive = (int)apr_atoi64(value);
+ if (cfg->h2_keepalive < 0) {
+ return "value must be >= 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_stream_timeout(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->h2_stream_timeout = (int)apr_atoi64(value);
+ if (cfg->h2_stream_timeout < 0) {
+ return "value must be >= 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->push_diary_size = (int)apr_atoi64(value);
+ if (cfg->push_diary_size < 0) {
+ return "value must be >= 0";
+ }
+ if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
+ return "value must a power of 2";
+ }
+ if (cfg->push_diary_size > (1 << 15)) {
+ return "value must <= 65536";
+ }
+ return NULL;
+}
#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
RSRC_CONF, "off to disable HTTP/2 server push"),
AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
RSRC_CONF, "define priority of PUSHed resources per content type"),
+ AP_INIT_TAKE1("H2Timeout", h2_conf_set_timeout, NULL,
+ RSRC_CONF, "read/write timeout (seconds) for HTTP/2 connections"),
+ AP_INIT_TAKE1("H2KeepAliveTimeout", h2_conf_set_keepalive, NULL,
+ RSRC_CONF, "timeout (seconds) for idle HTTP/2 connections, no streams open"),
+ AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
+ RSRC_CONF, "read/write timeout (seconds) for HTTP/2 streams"),
+ AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
+ RSRC_CONF, "size of push diary"),
AP_END_CMD
};
const h2_config *h2_config_get(conn_rec *c)
{
- h2_ctx *ctx = h2_ctx_get(c);
+ h2_ctx *ctx = h2_ctx_get(c, 0);
- if (ctx->config) {
- return ctx->config;
- }
- else if (ctx->server) {
- ctx->config = h2_config_sget(ctx->server);
- return ctx->config;
+ if (ctx) {
+ if (ctx->config) {
+ return ctx->config;
+ }
+ else if (ctx->server) {
+ ctx->config = h2_config_sget(ctx->server);
+ return ctx->config;
+ }
}
return h2_config_sget(c->base_server);
}
-
H2_CONF_TLS_WARMUP_SIZE,
H2_CONF_TLS_COOLDOWN_SECS,
H2_CONF_PUSH,
+ H2_CONF_TIMEOUT_SECS,
+ H2_CONF_KEEPALIVE_SECS,
+ H2_CONF_STREAM_TIMEOUT_SECS,
+ H2_CONF_PUSH_DIARY_SIZE,
} h2_config_var_t;
struct apr_hash_t;
int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
int h2_push; /* if HTTP/2 server push is enabled */
struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
+
+ int h2_timeout; /* timeout for http/2 connections */
+ int h2_keepalive; /* timeout for idle connections, no streams */
+ int h2_stream_timeout; /* timeout for http/2 streams, slave connections */
+ int push_diary_size; /* # of entries in push diary */
} h2_config;
const struct h2_priority *h2_config_get_priority(const h2_config *conf,
const char *content_type);
-
+
#endif /* __mod_h2__h2_config_h__ */
#include "h2_private.h"
#include "h2_config.h"
#include "h2_ctx.h"
+#include "h2_filter.h"
#include "h2_mplx.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_worker.h"
#include "h2_workers.h"
#include "h2_conn.h"
+#include "h2_version.h"
static struct h2_workers *workers;
static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
static module *mpm_module;
-static int checked;
+static int async_mpm;
-static void check_modules(void)
+static void check_modules(int force)
{
+ static int checked = 0;
int i;
- if (!checked) {
+
+ if (force || !checked) {
for (i = 0; ap_loaded_modules[i]; ++i) {
module *m = ap_loaded_modules[i];
+
if (!strcmp("event.c", m->name)) {
mpm_type = H2_MPM_EVENT;
mpm_module = m;
+ break;
}
else if (!strcmp("worker.c", m->name)) {
mpm_type = H2_MPM_WORKER;
mpm_module = m;
+ break;
}
else if (!strcmp("prefork.c", m->name)) {
mpm_type = H2_MPM_PREFORK;
mpm_module = m;
+ break;
}
}
checked = 1;
{
const h2_config *config = h2_config_sget(s);
apr_status_t status = APR_SUCCESS;
- int minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
- int maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
-
+ int minw, maxw, max_tx_handles, n;
int max_threads_per_child = 0;
- int threads_limit = 0;
int idle_secs = 0;
- int i;
- h2_config_init(pool);
+ check_modules(1);
ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
- ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &threads_limit);
- for (i = 0; ap_loaded_modules[i]; ++i) {
- module *m = ap_loaded_modules[i];
- if (!strcmp("event.c", m->name)) {
- mpm_type = H2_MPM_EVENT;
- mpm_module = m;
- }
- else if (!strcmp("worker.c", m->name)) {
- mpm_type = H2_MPM_WORKER;
- mpm_module = m;
- }
- else if (!strcmp("prefork.c", m->name)) {
- mpm_type = H2_MPM_PREFORK;
- mpm_module = m;
- }
+ status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, status, s, "querying MPM for async");
+ /* some MPMs do not implemnent this */
+ async_mpm = 0;
+ status = APR_SUCCESS;
}
+
+ h2_config_init(pool);
+ minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
+ maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
if (minw <= 0) {
minw = max_threads_per_child;
}
if (maxw <= 0) {
- maxw = threads_limit;
- if (maxw < minw) {
- maxw = minw;
- }
+ maxw = minw;
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d, thr_limit=%d",
- minw, maxw, max_threads_per_child, threads_limit);
+ /* How many file handles is it safe to use for transfer
+ * to the master connection to be streamed out?
+ * Is there a portable APR rlimit on NOFILES? Have not
+ * found it. And if, how many of those would we set aside?
+ * This leads all into a process wide handle allocation strategy
+ * which ultimately would limit the number of accepted connections
+ * with the assumption of implicitly reserving n handles for every
+ * connection and requiring modules with excessive needs to allocate
+ * from a central pool.
+ */
+ n = h2_config_geti(config, H2_CONF_SESSION_FILES);
+ if (n < 0) {
+ max_tx_handles = maxw * 2;
+ }
+ else {
+ max_tx_handles = maxw * n;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d",
+ minw, maxw, max_threads_per_child, max_tx_handles);
+ workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles);
- workers = h2_workers_create(s, pool, minw, maxw);
idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
h2_workers_set_max_idle_secs(workers, idle_secs);
-
+
+ ap_register_input_filter("H2_IN", h2_filter_core_input,
+ NULL, AP_FTYPE_CONNECTION);
+
return status;
}
-h2_mpm_type_t h2_conn_mpm_type(void) {
- check_modules();
+h2_mpm_type_t h2_conn_mpm_type(void)
+{
+ check_modules(0);
return mpm_type;
}
-static module *h2_conn_mpm_module(void) {
- check_modules();
+static module *h2_conn_mpm_module(void)
+{
+ check_modules(0);
return mpm_module;
}
-apr_status_t h2_conn_process(conn_rec *c, request_rec *r, server_rec *s)
+apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
{
- apr_status_t status;
h2_session *session;
- const h2_config *config;
- int rv;
if (!workers) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
return APR_EGENERAL;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, "h2_conn_process start");
-
- if (!s && r) {
- s = r->server;
- }
-
- config = s? h2_config_sget(s) : h2_config_get(c);
if (r) {
- session = h2_session_rcreate(r, config, workers);
+ session = h2_session_rcreate(r, ctx, workers);
}
else {
- session = h2_session_create(c, config, workers);
+ session = h2_session_create(c, ctx, workers);
}
-
- if (!h2_is_acceptable_connection(c, 1)) {
- nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
- NGHTTP2_INADEQUATE_SECURITY, NULL, 0);
- }
- ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
- status = h2_session_start(session, &rv);
+ h2_ctx_session_set(ctx, session);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
+{
+ apr_status_t status;
+ int mpm_state = 0;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- "h2_session(%ld): starting on %s:%d", session->id,
- session->c->base_server->server_hostname,
- session->c->local_addr->port);
- if (status != APR_SUCCESS) {
- h2_session_abort(session, status, rv);
- h2_session_eoc_callback(session);
- return status;
- }
+ do {
+ if (c->cs) {
+ c->cs->sense = CONN_SENSE_DEFAULT;
+ }
+ status = h2_session_process(h2_ctx_session_get(ctx), async_mpm);
+
+ if (c->cs) {
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ }
+ if (APR_STATUS_IS_EOF(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ "h2_session(%ld): process, closing conn", c->id);
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+ } while (!async_mpm
+ && c->keepalive == AP_CONN_KEEPALIVE
+ && mpm_state != AP_MPMQ_STOPPING);
- status = h2_session_process(session);
-
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, session->c,
- "h2_session(%ld): done", session->id);
- /* Make sure this connection gets closed properly. */
- ap_update_child_status_from_conn(c->sbh, SERVER_CLOSING, c);
- c->keepalive = AP_CONN_CLOSE;
- if (c->cs) {
- c->cs->state = CONN_STATE_WRITE_COMPLETION;
- }
-
- h2_session_close(session);
- /* hereafter session will be gone */
- return status;
+ return DONE;
}
static void fix_event_conn(conn_rec *c, conn_rec *master);
-conn_rec *h2_conn_create(conn_rec *master, apr_pool_t *pool)
+conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p,
+ apr_thread_t *thread, apr_socket_t *socket)
{
conn_rec *c;
AP_DEBUG_ASSERT(master);
-
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
+ "h2_conn(%ld): created from master", master->id);
+
/* This is like the slave connection creation from 2.5-DEV. A
* very efficient way - not sure how compatible this is, since
* the core hooks are no longer run.
* But maybe it's is better this way, not sure yet.
*/
- c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ c = (conn_rec *) apr_palloc(p, sizeof(conn_rec));
if (c == NULL) {
- ap_log_perror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, pool,
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
APLOGNO(02913) "h2_task: creating conn");
return NULL;
}
memcpy(c, master, sizeof(conn_rec));
- c->id = (master->id & (long)pool);
- c->master = master;
- c->input_filters = NULL;
- c->output_filters = NULL;
- c->pool = pool;
- return c;
-}
-
-apr_status_t h2_conn_setup(h2_task *task, apr_bucket_alloc_t *bucket_alloc,
- apr_thread_t *thread, apr_socket_t *socket)
-{
- conn_rec *master = task->mplx->c;
-
- ap_log_perror(APLOG_MARK, APLOG_TRACE3, 0, task->pool,
- "h2_conn(%ld): created from master", master->id);
-
- /* Ok, we are just about to start processing the connection and
- * the worker is calling us to setup all necessary resources.
- * We can borrow some from the worker itself and some we do as
- * sub-resources from it, so that we get a nice reuse of
- * pools.
- */
- task->c->pool = task->pool;
- task->c->current_thread = thread;
- task->c->bucket_alloc = bucket_alloc;
+
+ /* Replace these */
+ c->id = (master->id & (long)p);
+ c->master = master;
+ c->pool = p;
+ c->current_thread = thread;
+ c->conn_config = ap_create_conn_config(p);
+ c->notes = apr_table_make(p, 5);
+ c->input_filters = NULL;
+ c->output_filters = NULL;
+ c->bucket_alloc = apr_bucket_alloc_create(p);
+ c->cs = NULL;
+ c->data_in_input_filters = 0;
+ c->data_in_output_filters = 0;
+ c->clogging_input_filters = 1;
+ c->log = NULL;
+ c->log_id = NULL;
- task->c->conn_config = ap_create_conn_config(task->pool);
- task->c->notes = apr_table_make(task->pool, 5);
+ /* TODO: these should be unique to this thread */
+ c->sbh = master->sbh;
- /* In order to do this in 2.4.x, we need to add a member to conn_rec */
- task->c->master = master;
+ /* Simulate that we had already a request on this connection. */
+ c->keepalives = 1;
- ap_set_module_config(task->c->conn_config, &core_module, socket);
+ ap_set_module_config(c->conn_config, &core_module, socket);
/* This works for mpm_worker so far. Other mpm modules have
* different needs, unfortunately. The most interesting one
/* all fine */
break;
case H2_MPM_EVENT:
- fix_event_conn(task->c, master);
+ fix_event_conn(c, master);
break;
default:
/* fingers crossed */
break;
}
- /* TODO: we simulate that we had already a request on this connection.
- * This keeps the mod_ssl SNI vs. Host name matcher from answering
- * 400 Bad Request
- * when names do not match. We prefer a predictable 421 status.
- */
- task->c->keepalives = 1;
-
- return APR_SUCCESS;
+ return c;
}
/* This is an internal mpm event.c struct which is disguised
#ifndef __mod_h2__h2_conn__
#define __mod_h2__h2_conn__
+struct h2_ctx;
struct h2_task;
/**
- * Process the connection that is now starting the HTTP/2
- * conversation. Return when the HTTP/2 session is done
- * and the connection will close.
+ * Setup the connection and our context for HTTP/2 processing
*
+ * @param ctx the http2 context to setup
* @param c the connection HTTP/2 is starting on
* @param r the upgrade request that still awaits an answer, optional
- * @param s the server selected by request or, if NULL, connection
*/
-apr_status_t h2_conn_process(conn_rec *c, request_rec *r, server_rec *s);
+apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
+
+/**
+ * Run the HTTP/2 connection in synchronous fashion.
+ * Return when the HTTP/2 session is done
+ * and the connection will close or a fatal error occured.
+ *
+ * @param ctx the http2 context to run
+ * @return APR_SUCCESS when session is done.
+ */
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
/* Initialize this child process for h2 connection work,
* to be called once during child init before multi processing
h2_mpm_type_t h2_conn_mpm_type(void);
-conn_rec *h2_conn_create(conn_rec *master, apr_pool_t *stream_pool);
-
-apr_status_t h2_conn_setup(struct h2_task *task, apr_bucket_alloc_t *bucket_alloc,
- apr_thread_t *thread, apr_socket_t *socket);
+conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p,
+ apr_thread_t *thread, apr_socket_t *socket);
#endif /* defined(__mod_h2__h2_conn__) */
#include "h2_config.h"
#include "h2_conn_io.h"
#include "h2_h2.h"
+#include "h2_session.h"
#include "h2_util.h"
#define TLS_DATA_MAX (16*1024)
apr_pool_t *pool)
{
io->connection = c;
- io->input = apr_brigade_create(pool, c->bucket_alloc);
io->output = apr_brigade_create(pool, c->bucket_alloc);
io->buflen = 0;
io->is_tls = h2_h2_is_tls(c);
}
if (APLOGctrace1(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, cd_secs=%f",
io->connection->id, io->buffer_output, (long)io->warmup_size,
((float)io->cooldown_usecs/APR_USEC_PER_SEC));
return io->bufsize > 0;
}
-static apr_status_t h2_conn_io_bucket_read(h2_conn_io *io,
- apr_read_type_e block,
- h2_conn_io_on_read_cb on_read_cb,
- void *puser, int *pdone)
-{
- apr_status_t status = APR_SUCCESS;
- apr_size_t readlen = 0;
- *pdone = 0;
-
- while (status == APR_SUCCESS && !*pdone
- && !APR_BRIGADE_EMPTY(io->input)) {
-
- apr_bucket* bucket = APR_BRIGADE_FIRST(io->input);
- if (APR_BUCKET_IS_METADATA(bucket)) {
- /* we do nothing regarding any meta here */
- }
- else {
- const char *bucket_data = NULL;
- apr_size_t bucket_length = 0;
- status = apr_bucket_read(bucket, &bucket_data,
- &bucket_length, block);
-
- if (status == APR_SUCCESS && bucket_length > 0) {
- apr_size_t consumed = 0;
-
- if (APLOGctrace2(io->connection)) {
- char buffer[32];
- h2_util_hex_dump(buffer, sizeof(buffer)/sizeof(buffer[0]),
- bucket_data, bucket_length);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
- "h2_conn_io(%ld): read %d bytes: %s",
- io->connection->id, (int)bucket_length, buffer);
- }
-
- status = on_read_cb(bucket_data, bucket_length, &consumed,
- pdone, puser);
- if (status == APR_SUCCESS && bucket_length > consumed) {
- /* We have data left in the bucket. Split it. */
- status = apr_bucket_split(bucket, consumed);
- }
- readlen += consumed;
- }
- }
- apr_bucket_delete(bucket);
- }
-
- if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
- return APR_EAGAIN;
- }
- return status;
-}
-
-apr_status_t h2_conn_io_read(h2_conn_io *io,
- apr_read_type_e block,
- h2_conn_io_on_read_cb on_read_cb,
- void *puser)
-{
- apr_status_t status;
- int done = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
- "h2_conn_io: try read, block=%d", block);
-
- if (!APR_BRIGADE_EMPTY(io->input)) {
- /* Seems something is left from a previous read, lets
- * satisfy our caller with the data we already have. */
- status = h2_conn_io_bucket_read(io, block, on_read_cb, puser, &done);
- apr_brigade_cleanup(io->input);
- if (status != APR_SUCCESS || done) {
- return status;
- }
- }
-
- /* We only do a blocking read when we have no streams to process. So,
- * in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
- * When reading non-blocking, we do have streams to process and update
- * child with NULL request. That way, any current request information
- * in the scoreboard is preserved.
- */
- if (block == APR_BLOCK_READ) {
- ap_update_child_status_from_conn(io->connection->sbh,
- SERVER_BUSY_KEEPALIVE,
- io->connection);
- }
- else {
- ap_update_child_status(io->connection->sbh, SERVER_BUSY_READ, NULL);
- }
-
- /* TODO: replace this with a connection filter itself, so that we
- * no longer need to transfer incoming buckets to our own brigade.
- */
- status = ap_get_brigade(io->connection->input_filters,
- io->input, AP_MODE_READBYTES,
- block, 64 * 4096);
- switch (status) {
- case APR_SUCCESS:
- return h2_conn_io_bucket_read(io, block, on_read_cb, puser, &done);
- case APR_EOF:
- case APR_EAGAIN:
- break;
- default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->connection,
- "h2_conn_io: error reading");
- break;
- }
- return status;
-}
+typedef struct {
+ conn_rec *c;
+ h2_conn_io *io;
+} pass_out_ctx;
static apr_status_t pass_out(apr_bucket_brigade *bb, void *ctx)
{
- h2_conn_io *io = (h2_conn_io*)ctx;
+ pass_out_ctx *pctx = ctx;
+ conn_rec *c = pctx->c;
apr_status_t status;
apr_off_t bblen;
return APR_SUCCESS;
}
- ap_update_child_status(io->connection->sbh, SERVER_BUSY_WRITE, NULL);
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
status = apr_brigade_length(bb, 0, &bblen);
if (status == APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
"h2_conn_io(%ld): pass_out brigade %ld bytes",
- io->connection->id, (long)bblen);
- status = ap_pass_brigade(io->connection->output_filters, bb);
- if (status == APR_SUCCESS) {
- io->bytes_written += (apr_size_t)bblen;
- io->last_write = apr_time_now();
+ c->id, (long)bblen);
+ status = ap_pass_brigade(c->output_filters, bb);
+ if (status == APR_SUCCESS && pctx->io) {
+ pctx->io->bytes_written += (apr_size_t)bblen;
+ pctx->io->last_write = apr_time_now();
}
- apr_brigade_cleanup(bb);
}
+ apr_brigade_cleanup(bb);
return status;
}
/* Bring the current buffer content into the output brigade, appropriately
* chunked.
*/
-static apr_status_t bucketeer_buffer(h2_conn_io *io) {
+static apr_status_t bucketeer_buffer(h2_conn_io *io)
+{
const char *data = io->buffer;
apr_size_t remaining = io->buflen;
apr_bucket *b;
/* long time not written, reset write size */
io->write_size = WRITE_SIZE_INITIAL;
io->bytes_written = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io(%ld): timeout write size reset to %ld",
(long)io->connection->id, (long)io->write_size);
}
&& io->bytes_written >= io->warmup_size) {
/* connection is hot, use max size */
io->write_size = WRITE_SIZE_MAX;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io(%ld): threshold reached, write size now %ld",
(long)io->connection->id, (long)io->write_size);
}
const char *buf, size_t length)
{
apr_status_t status = APR_SUCCESS;
+ pass_out_ctx ctx;
+ ctx.c = io->connection;
+ ctx.io = io;
io->unflushed = 1;
if (io->bufsize > 0) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io: buffering %ld bytes", (long)length);
if (!APR_BRIGADE_EMPTY(io->output)) {
while (length > 0 && (status == APR_SUCCESS)) {
apr_size_t avail = io->bufsize - io->buflen;
if (avail <= 0) {
+
bucketeer_buffer(io);
- status = pass_out(io->output, io);
+ status = pass_out(io->output, &ctx);
io->buflen = 0;
}
else if (length > avail) {
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, status, io->connection,
"h2_conn_io: writing %ld bytes to brigade", (long)length);
- status = apr_brigade_write(io->output, pass_out, io, buf, length);
+ status = apr_brigade_write(io->output, pass_out, &ctx, buf, length);
}
return status;
return status;
}
-static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int force)
+static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int force, int eoc)
{
if (io->unflushed || force) {
+ pass_out_ctx ctx;
+
if (io->buflen > 0) {
/* something in the buffer, put it in the output brigade */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io: flush, flushing %ld bytes", (long)io->buflen);
bucketeer_buffer(io);
io->buflen = 0;
apr_bucket_flush_create(io->output->bucket_alloc));
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection,
"h2_conn_io: flush");
/* Send it out */
io->unflushed = 0;
- return pass_out(io->output, io);
+
+ ctx.c = io->connection;
+ ctx.io = eoc? NULL : io;
+ return pass_out(io->output, &ctx);
/* no more access after this, as we might have flushed an EOC bucket
* that de-allocated us all. */
}
return APR_SUCCESS;
}
-apr_status_t h2_conn_io_flush(h2_conn_io *io)
+apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, apr_bucket *b)
{
- return h2_conn_io_flush_int(io, 1);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ return h2_conn_io_flush_int(io, 1, 1);
}
-apr_status_t h2_conn_io_pass(h2_conn_io *io)
+apr_status_t h2_conn_io_flush(h2_conn_io *io)
{
- return h2_conn_io_flush_int(io, 0);
+ return h2_conn_io_flush_int(io, 1, 0);
}
-apr_status_t h2_conn_io_close(h2_conn_io *io, void *session)
+apr_status_t h2_conn_io_pass(h2_conn_io *io)
{
- apr_bucket *b;
+ return h2_conn_io_flush_int(io, 0, 0);
+}
- /* Send out anything in our buffers */
- h2_conn_io_flush_int(io, 0);
-
- b = h2_bucket_eoc_create(io->connection->bucket_alloc, session);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- b = apr_bucket_flush_create(io->connection->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- return ap_pass_brigade(io->connection->output_filters, io->output);
- /* and all is gone */
-}
\ No newline at end of file
#define __mod_h2__h2_conn_io__
struct h2_config;
+struct h2_session;
/* h2_io is the basic handler of a httpd connection. It keeps two brigades,
* one for input, one for output and works with the installed connection
*/
typedef struct {
conn_rec *connection;
- apr_bucket_brigade *input;
apr_bucket_brigade *output;
int is_tls;
apr_size_t write_size;
apr_time_t last_write;
+ apr_int64_t bytes_read;
apr_int64_t bytes_written;
int buffer_output;
int h2_conn_io_is_buffered(h2_conn_io *io);
-typedef apr_status_t (*h2_conn_io_on_read_cb)(const char *data, apr_size_t len,
- apr_size_t *readlen, int *done,
- void *puser);
-
-apr_status_t h2_conn_io_read(h2_conn_io *io,
- apr_read_type_e block,
- h2_conn_io_on_read_cb on_read_cb,
- void *puser);
-
apr_status_t h2_conn_io_write(h2_conn_io *io,
const char *buf,
size_t length);
apr_status_t h2_conn_io_pass(h2_conn_io *io);
apr_status_t h2_conn_io_flush(h2_conn_io *io);
-apr_status_t h2_conn_io_close(h2_conn_io *io, void *session);
+apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, apr_bucket *b);
#endif /* defined(__mod_h2__h2_conn_io__) */
#include <http_config.h>
#include "h2_private.h"
+#include "h2_session.h"
#include "h2_task.h"
#include "h2_ctx.h"
#include "h2_private.h"
h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
AP_DEBUG_ASSERT(ctx);
ap_set_module_config(c->conn_config, &http2_module, ctx);
+ h2_ctx_server_set(ctx, c->base_server);
return ctx;
}
+void h2_ctx_clear(const conn_rec *c)
+{
+ AP_DEBUG_ASSERT(c);
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+}
+
h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task)
{
h2_ctx *ctx = h2_ctx_create(c);
return ctx;
}
-h2_ctx *h2_ctx_get(const conn_rec *c)
+h2_ctx *h2_ctx_get(const conn_rec *c, int create)
{
h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
- if (ctx == NULL) {
+ if (ctx == NULL && create) {
ctx = h2_ctx_create(c);
}
return ctx;
h2_ctx *h2_ctx_rget(const request_rec *r)
{
- return h2_ctx_get(r->connection);
+ return h2_ctx_get(r->connection, 0);
}
const char *h2_ctx_protocol_get(const conn_rec *c)
h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto)
{
ctx->protocol = proto;
- ctx->is_h2 = (proto != NULL);
return ctx;
}
+h2_session *h2_ctx_session_get(h2_ctx *ctx)
+{
+ return ctx? ctx->session : NULL;
+}
+
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session)
+{
+ ctx->session = session;
+}
+
+server_rec *h2_ctx_server_get(h2_ctx *ctx)
+{
+ return ctx? ctx->server : NULL;
+}
+
h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
{
ctx->server = s;
int h2_ctx_is_task(h2_ctx *ctx)
{
- return ctx && !!ctx->task;
-}
-
-int h2_ctx_is_active(h2_ctx *ctx)
-{
- return ctx && ctx->is_h2;
+ return ctx && ctx->task;
}
struct h2_task *h2_ctx_get_task(h2_ctx *ctx)
{
- return ctx->task;
+ return ctx? ctx->task : NULL;
}
#ifndef __mod_h2__h2_ctx__
#define __mod_h2__h2_ctx__
+struct h2_session;
struct h2_task;
struct h2_config;
* - those created by ourself to perform work on HTTP/2 streams
*/
typedef struct h2_ctx {
- int is_h2; /* h2 engine is used */
const char *protocol; /* the protocol negotiated */
+ struct h2_session *session; /* the session established */
struct h2_task *task; /* the h2_task executing or NULL */
const char *hostname; /* hostname negotiated via SNI, optional */
server_rec *server; /* httpd server config selected. */
const struct h2_config *config; /* effective config in this context */
} h2_ctx;
-h2_ctx *h2_ctx_get(const conn_rec *c);
+/**
+ * Get (or create) a h2 context record for this connection.
+ * @param c the connection to look at
+ * @param create != 0 iff missing context shall be created
+ * @return h2 context of this connection
+ */
+h2_ctx *h2_ctx_get(const conn_rec *c, int create);
+void h2_ctx_clear(const conn_rec *c);
+
h2_ctx *h2_ctx_rget(const request_rec *r);
h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task);
/* Set the server_rec relevant for this context.
*/
h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
+server_rec *h2_ctx_server_get(h2_ctx *ctx);
+
+struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
/**
* Get the h2 protocol negotiated for this connection, or NULL.
const char *h2_ctx_protocol_get(const conn_rec *c);
int h2_ctx_is_task(h2_ctx *ctx);
-int h2_ctx_is_active(h2_ctx *ctx);
struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
--- /dev/null
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2_conn_io.h"
+#include "h2_ctx.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_task.h"
+#include "h2_stream.h"
+#include "h2_stream_set.h"
+#include "h2_request.h"
+#include "h2_response.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+
+#include "h2_filter.h"
+
+#define UNSET -1
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+static apr_status_t consume_brigade(h2_filter_cin *cin,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+
+ apr_bucket* bucket = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_METADATA(bucket)) {
+ /* we do nothing regarding any meta here */
+ }
+ else {
+ const char *bucket_data = NULL;
+ apr_size_t bucket_length = 0;
+ status = apr_bucket_read(bucket, &bucket_data,
+ &bucket_length, block);
+
+ if (status == APR_SUCCESS && bucket_length > 0) {
+ apr_size_t consumed = 0;
+
+ status = cin->cb(cin->cb_ctx, bucket_data, bucket_length, &consumed);
+ if (status == APR_SUCCESS && bucket_length > consumed) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(bucket, consumed);
+ }
+ readlen += consumed;
+ cin->start_read = apr_time_now();
+ }
+ }
+ apr_bucket_delete(bucket);
+ }
+
+ if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx)
+{
+ h2_filter_cin *cin;
+
+ cin = apr_pcalloc(p, sizeof(*cin));
+ cin->pool = p;
+ cin->cb = cb;
+ cin->cb_ctx = ctx;
+ cin->start_read = UNSET;
+ return cin;
+}
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, int timeout_secs)
+{
+ cin->timeout_secs = timeout_secs;
+}
+
+apr_status_t h2_filter_core_input(ap_filter_t* f,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_filter_cin *cin = f->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_time_t saved_timeout = UNSET;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "core_input(%ld): read, %s, mode=%d, readbytes=%ld, timeout=%d",
+ (long)f->c->id, (block == APR_BLOCK_READ)? "BLOCK_READ" : "NONBLOCK_READ",
+ mode, (long)readbytes, cin->timeout_secs);
+
+ if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) {
+ return ap_get_brigade(f->next, brigade, mode, block, readbytes);
+ }
+
+ if (mode != AP_MODE_READBYTES) {
+ return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN;
+ }
+
+ if (!cin->bb) {
+ cin->bb = apr_brigade_create(cin->pool, f->c->bucket_alloc);
+ }
+
+ if (!cin->socket) {
+ cin->socket = ap_get_conn_socket(f->c);
+ }
+
+ cin->start_read = apr_time_now();
+ if (APR_BRIGADE_EMPTY(cin->bb)) {
+ /* We only do a blocking read when we have no streams to process. So,
+ * in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
+ * When reading non-blocking, we do have streams to process and update
+ * child with NULL request. That way, any current request information
+ * in the scoreboard is preserved.
+ */
+ if (block == APR_BLOCK_READ) {
+ if (cin->timeout_secs > 0) {
+ apr_time_t t = apr_time_from_sec(cin->timeout_secs);
+ apr_socket_timeout_get(cin->socket, &saved_timeout);
+ apr_socket_timeout_set(cin->socket, t);
+ }
+ }
+ status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES,
+ block, readbytes);
+ if (saved_timeout != UNSET) {
+ apr_socket_timeout_set(cin->socket, saved_timeout);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "core_input(%ld): got_brigade", (long)f->c->id);
+ }
+
+ switch (status) {
+ case APR_SUCCESS:
+ status = consume_brigade(cin, cin->bb, block);
+ break;
+ case APR_EOF:
+ case APR_EAGAIN:
+ case APR_TIMEUP:
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c,
+ "h2_conn_io: error reading");
+ break;
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * http2 connection status handler + stream out source
+ ******************************************************************************/
+
+static const char *H2_SOS_H2_STATUS = "http2-status";
+
+int h2_filter_h2_status_handler(request_rec *r)
+{
+ h2_ctx *ctx = h2_ctx_rget(r);
+ h2_task *task;
+
+ if (strcmp(r->handler, "http2-status")) {
+ return DECLINED;
+ }
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ task = ctx? h2_ctx_get_task(ctx) : NULL;
+ if (task) {
+ /* We need to handle the actual output on the main thread, as
+ * we need to access h2_session information. */
+ apr_table_setn(r->notes, H2_RESP_SOS_NOTE, H2_SOS_H2_STATUS);
+ apr_table_setn(r->headers_out, "Content-Type", "application/json");
+ r->status = 200;
+ return DONE;
+ }
+ return DECLINED;
+}
+
+#define bbout(...) apr_brigade_printf(bb, NULL, NULL, __VA_ARGS__)
+static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
+{
+ h2_stream *stream = sos->stream;
+ h2_session *session = stream->session;
+ h2_mplx *mplx = session->mplx;
+ h2_push_diary *diary;
+ apr_status_t status;
+
+ if (!bb) {
+ bb = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ }
+
+ bbout("{\n");
+ bbout(" \"HTTP2\": \"on\",\n");
+ bbout(" \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off");
+ bbout(" \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION);
+ bbout(" \"session_id\": %ld,\n", (long)session->id);
+ bbout(" \"streams_max\": %d,\n", (int)session->max_stream_count);
+ bbout(" \"this_stream\": %d,\n", stream->id);
+ bbout(" \"streams_open\": %d,\n", (int)h2_stream_set_size(session->streams));
+ bbout(" \"max_stream_started\": %d,\n", mplx->max_stream_started);
+ bbout(" \"requests_received\": %d,\n", session->requests_received);
+ bbout(" \"responses_submitted\": %d,\n", session->responses_submitted);
+ bbout(" \"streams_reset\": %d, \n", session->streams_reset);
+ bbout(" \"pushes_promised\": %d,\n", session->pushes_promised);
+ bbout(" \"pushes_submitted\": %d,\n", session->pushes_submitted);
+ bbout(" \"pushes_reset\": %d,\n", session->pushes_reset);
+
+ diary = session->push_diary;
+ if (diary) {
+ const char *data;
+ const char *base64_digest;
+ apr_size_t len;
+
+ status = h2_push_diary_digest_get(diary, stream->pool, 256,
+ stream->request->authority, &data, &len);
+ if (status == APR_SUCCESS) {
+ base64_digest = h2_util_base64url_encode(data, len, stream->pool);
+ bbout(" \"cache_digest\": \"%s\",\n", base64_digest);
+ }
+
+ /* try the reverse for testing purposes */
+ status = h2_push_diary_digest_set(diary, stream->request->authority, data, len);
+ if (status == APR_SUCCESS) {
+ status = h2_push_diary_digest_get(diary, stream->pool, 256,
+ stream->request->authority, &data, &len);
+ if (status == APR_SUCCESS) {
+ base64_digest = h2_util_base64url_encode(data, len, stream->pool);
+ bbout(" \"cache_digest^2\": \"%s\",\n", base64_digest);
+ }
+ }
+ }
+ bbout(" \"frames_received\": %ld,\n", (long)session->frames_received);
+ bbout(" \"frames_sent\": %ld,\n", (long)session->frames_sent);
+ bbout(" \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read);
+ bbout(" \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written);
+ bbout("}\n");
+
+ return sos->prev->buffer(sos->prev, bb);
+}
+
+static apr_status_t h2_sos_h2_status_read_to(h2_sos *sos, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ return sos->prev->read_to(sos->prev, bb, plen, peos);
+}
+
+static apr_status_t h2_sos_h2_status_prep_read(h2_sos *sos, apr_off_t *plen, int *peos)
+{
+ return sos->prev->prep_read(sos->prev, plen, peos);
+}
+
+static apr_status_t h2_sos_h2_status_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos)
+{
+ return sos->prev->readx(sos->prev, cb, ctx, plen, peos);
+}
+
+static apr_table_t *h2_sos_h2_status_get_trailers(h2_sos *sos)
+{
+ return sos->prev->get_trailers(sos->prev);
+}
+
+static h2_sos *h2_sos_h2_status_create(h2_sos *prev)
+{
+ h2_sos *sos;
+ h2_response *response = prev->response;
+
+ apr_table_unset(response->headers, "Content-Length");
+ response->content_length = -1;
+
+ sos = apr_pcalloc(prev->stream->pool, sizeof(*sos));
+ sos->prev = prev;
+ sos->response = response;
+ sos->stream = prev->stream;
+ sos->buffer = h2_sos_h2_status_buffer;
+ sos->prep_read = h2_sos_h2_status_prep_read;
+ sos->readx = h2_sos_h2_status_readx;
+ sos->read_to = h2_sos_h2_status_read_to;
+ sos->get_trailers = h2_sos_h2_status_get_trailers;
+
+ return sos;
+}
+
+h2_sos *h2_filter_sos_create(const char *name, struct h2_sos *prev)
+{
+ if (!strcmp(H2_SOS_H2_STATUS, name)) {
+ return h2_sos_h2_status_create(prev);
+ }
+ return prev;
+}
+
--- /dev/null
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_filter__
+#define __mod_h2__h2_filter__
+
+struct h2_stream;
+struct h2_session;
+
+typedef apr_status_t h2_filter_cin_cb(void *ctx,
+ const char *data, apr_size_t len,
+ apr_size_t *readlen);
+
+typedef struct h2_filter_cin {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+ h2_filter_cin_cb *cb;
+ void *cb_ctx;
+ apr_socket_t *socket;
+ int timeout_secs;
+ apr_time_t start_read;
+} h2_filter_cin;
+
+h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx);
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, int timeout_secs);
+
+apr_status_t h2_filter_core_input(ap_filter_t* filter,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+typedef struct h2_sos h2_sos;
+typedef apr_status_t h2_sos_data_cb(void *ctx, const char *data, apr_off_t len);
+
+typedef apr_status_t h2_sos_buffer(h2_sos *sos, apr_bucket_brigade *bb);
+typedef apr_status_t h2_sos_prep_read(h2_sos *sos, apr_off_t *plen, int *peos);
+typedef apr_status_t h2_sos_readx(h2_sos *sos, h2_sos_data_cb *cb,
+ void *ctx, apr_off_t *plen, int *peos);
+typedef apr_status_t h2_sos_read_to(h2_sos *sos, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+typedef apr_table_t *h2_sos_get_trailers(h2_sos *sos);
+
+
+#define H2_RESP_SOS_NOTE "h2-sos-filter"
+
+struct h2_sos {
+ struct h2_stream *stream;
+ h2_sos *prev;
+ struct h2_response *response;
+ void *ctx;
+ h2_sos_buffer *buffer;
+ h2_sos_prep_read *prep_read;
+ h2_sos_readx *readx;
+ h2_sos_read_to *read_to;
+ h2_sos_get_trailers *get_trailers;
+};
+
+h2_sos *h2_filter_sos_create(const char *name, struct h2_sos *prev);
+
+int h2_filter_h2_status_handler(request_rec *r);
+
+
+#endif /* __mod_h2__h2_filter__ */
static apr_status_t make_h2_headers(h2_from_h1 *from_h1, request_rec *r)
{
from_h1->response = h2_response_create(from_h1->stream_id, 0,
- from_h1->http_status, from_h1->hlines,
+ from_h1->http_status,
+ from_h1->hlines,
+ r->notes,
from_h1->pool);
from_h1->content_length = from_h1->response->content_length;
from_h1->chunked = r->chunked;
*/
for (i = 0, strpp = (char **) values->elts; i < values->nelts;
++i, ++strpp) {
- if (*strpp && strcasecmp(*strpp, start) == 0) {
+ if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
break;
}
}
while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
for (i = 0; i < r->content_languages->nelts; ++i) {
- if (!strcasecmp(token, languages[i]))
+ if (!apr_strnatcasecmp(token, languages[i]))
break;
}
if (i == r->content_languages->nelts) {
*/
if (AP_BUCKET_IS_EOC(b)) {
ap_remove_output_filter(f);
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, f->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
"h2_from_h1(%d): eoc bucket passed",
from_h1->stream_id);
return ap_pass_brigade(f->next, bb);
from_h1->response = create_response(from_h1, r);
if (from_h1->response == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, f->c,
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c,
"h2_from_h1(%d): unable to create response",
from_h1->stream_id);
return APR_ENOMEM;
#include <http_config.h>
#include <http_connection.h>
#include <http_protocol.h>
+#include <http_request.h>
#include <http_log.h>
+#include "mod_http2.h"
#include "h2_private.h"
#include "h2_stream.h"
#include "h2_config.h"
#include "h2_ctx.h"
#include "h2_conn.h"
+#include "h2_request.h"
+#include "h2_session.h"
+#include "h2_util.h"
#include "h2_h2.h"
+#include "mod_http2.h"
const char *h2_tls_protos[] = {
"h2", NULL
/*******************************************************************************
* Hooks for processing incoming connections:
- * - pre_conn_before_tls switches SSL off for stream connections
* - process_conn take over connection in case of h2
*/
static int h2_h2_process_conn(conn_rec* c);
-static int h2_h2_remove_timeout(conn_rec* c);
static int h2_h2_post_read_req(request_rec *r);
-
/*******************************************************************************
* Once per lifetime init, retrieve optional functions
*/
apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s)
{
(void)pool;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "h2_h2, child_init");
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init");
opt_ssl_engine_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
opt_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
int h2_allows_h2_direct(conn_rec *c)
{
const h2_config *cfg = h2_config_get(c);
+ int is_tls = h2_h2_is_tls(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
if (h2_direct < 0) {
- if (h2_h2_is_tls(c)) {
- /* disabled by default on TLS */
- h2_direct = 0;
- }
- else {
- /* enabled if "Protocols h2c" is configured */
- h2_direct = ap_is_allowed_protocol(c, NULL, NULL, "h2c");
- }
+ h2_direct = is_tls? 0 : 1;
}
- return !!h2_direct;
+ return (h2_direct
+ && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
}
int h2_allows_h2_upgrade(conn_rec *c)
/*******************************************************************************
* Register various hooks
*/
-static const char *const mod_reqtimeout[] = { "reqtimeout.c", NULL};
-static const char* const mod_ssl[] = {"mod_ssl.c", NULL};
+static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
void h2_h2_register_hooks(void)
{
- /* When the connection processing actually starts, we might to
- * take over, if h2* was selected as protocol.
+ /* Our main processing needs to run quite late. Definitely after mod_ssl,
+ * as we need its connection filters, but also before reqtimeout as its
+ * method of timeouts is specific to HTTP/1.1 (as of now).
+ * The core HTTP/1 processing run as REALLY_LAST, so we will have
+ * a chance to take over before it.
*/
ap_hook_process_connection(h2_h2_process_conn,
- mod_ssl, NULL, APR_HOOK_MIDDLE);
+ mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
- /* Perform connection cleanup before the actual processing happens.
- */
- ap_hook_process_connection(h2_h2_remove_timeout,
- mod_reqtimeout, NULL, APR_HOOK_LAST);
-
/* With "H2SerializeHeaders On", we install the filter in this hook
* that parses the response. This needs to happen before any other post
* read function terminates the request with an error. Otherwise we will
ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
}
-static int h2_h2_remove_timeout(conn_rec* c)
+int h2_h2_process_conn(conn_rec* c)
{
- h2_ctx *ctx = h2_ctx_get(c);
+ apr_status_t status;
+ h2_ctx *ctx;
- if (h2_ctx_is_active(ctx) && !h2_ctx_is_task(ctx)) {
- /* cleanup on master h2 connections */
- ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
+ if (c->master) {
+ return DECLINED;
}
- return DECLINED;
-}
-
-int h2_h2_process_conn(conn_rec* c)
-{
- h2_ctx *ctx = h2_ctx_get(c);
-
+ ctx = h2_ctx_get(c, 0);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
if (h2_ctx_is_task(ctx)) {
/* our stream pseudo connection */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
return DECLINED;
}
-
- if (h2_ctx_protocol_get(c)) {
- /* Something has been negotiated */
- }
- else if (!strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))
- && h2_allows_h2_direct(c)
- && h2_is_acceptable_connection(c, 1)) {
- /* connection still is on http/1.1 and H2Direct is enabled.
- * Otherwise connection is in a fully acceptable state.
- * -> peek at the first 24 incoming bytes
- */
- apr_bucket_brigade *temp;
- apr_status_t status;
- char *s = NULL;
- apr_size_t slen;
-
- temp = apr_brigade_create(c->pool, c->bucket_alloc);
- status = ap_get_brigade(c->input_filters, temp,
- AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (!ctx && c->keepalives == 0) {
+ const char *proto = ap_get_protocol(c);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
- "h2_h2, error reading 24 bytes speculative");
- apr_brigade_destroy(temp);
- return DECLINED;
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_allows_h2_direct(c),
+ h2_is_acceptable_connection(c, 1));
}
- apr_brigade_pflatten(temp, &s, &slen, c->pool);
- if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, direct mode detected");
- h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c");
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, not detected in %d bytes: %s",
- (int)slen, s);
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_allows_h2_direct(c)
+ && h2_is_acceptable_connection(c, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+ char *s = NULL;
+ apr_size_t slen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+ AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ "h2_h2, error reading 24 bytes speculative");
+ apr_brigade_destroy(temp);
+ return DECLINED;
+ }
+
+ apr_brigade_pflatten(temp, &s, &slen, c->pool);
+ if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ if (!ctx) {
+ ctx = h2_ctx_get(c, 1);
+ }
+ h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c");
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes: %s",
+ (int)slen, s);
+ }
+
+ apr_brigade_destroy(temp);
}
-
- apr_brigade_destroy(temp);
- }
- else {
- /* the connection is not HTTP/1.1 or not for us, don't touch it */
- return DECLINED;
}
- /* If "h2" was selected as protocol (by whatever mechanism), take over
- * the connection.
- */
- if (h2_ctx_is_active(ctx)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, connection, h2 active");
-
- return h2_conn_process(c, NULL, ctx->server);
+ if (ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+ if (!h2_ctx_session_get(ctx)) {
+ status = h2_conn_setup(ctx, c, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return h2_conn_run(ctx, c);
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
static int h2_h2_post_read_req(request_rec *r)
{
- h2_ctx *ctx = h2_ctx_rget(r);
- struct h2_task *task = h2_ctx_get_task(ctx);
- if (task) {
- /* FIXME: sometimes, this hook gets called twice for a single request.
- * This should not be, right? */
- /* h2_task connection for a stream, not for h2c */
- ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
- "adding h1_to_h2_resp output filter");
- if (task->serialize_headers) {
- ap_remove_output_filter_byhandle(r->output_filters, "H1_TO_H2_RESP");
- ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
- }
- else {
- /* replace the core http filter that formats response headers
- * in HTTP/1 with our own that collects status and headers */
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
- ap_remove_output_filter_byhandle(r->output_filters, "H2_RESPONSE");
- ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+ /* slave connection? */
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ struct h2_task *task = h2_ctx_get_task(ctx);
+ /* This hook will get called twice on internal redirects. Take care
+ * that we manipulate filters only once. */
+ /* our slave connection? */
+ if (task && !task->filters_set) {
+ ap_filter_t *f;
+
+ /* setup the correct output filters to process the response
+ * on the proper mod_http2 way. */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "adding task output filter");
+ if (task->ser_headers) {
+ ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
+ }
+ else {
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+ ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+ }
+
+ /* trailers processing. Incoming trailers are added to this
+ * request via our h2 input filter, outgoing trailers
+ * in a special h2 out filter. */
+ for (f = r->input_filters; f; f = f->next) {
+ if (!strcmp("H2_TO_H1", f->frec->name)) {
+ f->r = r;
+ break;
+ }
+ }
+ ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
+ task->filters_set = 1;
}
- ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
}
return DECLINED;
}
-
#include <assert.h>
+#include <apr_pools.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
#include <http_connection.h>
#include "h2_private.h"
+#include "h2_h2.h"
#include "h2_io.h"
+#include "h2_mplx.h"
#include "h2_response.h"
+#include "h2_request.h"
#include "h2_task.h"
#include "h2_util.h"
-h2_io *h2_io_create(int id, apr_pool_t *pool, apr_bucket_alloc_t *bucket_alloc)
+h2_io *h2_io_create(int id, apr_pool_t *pool)
{
h2_io *io = apr_pcalloc(pool, sizeof(*io));
if (io) {
io->id = id;
io->pool = pool;
- io->bucket_alloc = bucket_alloc;
- io->bbin = NULL;
- io->bbout = NULL;
+ io->bucket_alloc = apr_bucket_alloc_create(pool);
}
return io;
}
return h2_io_in_close(io);
}
+
+void h2_io_signal_init(h2_io *io, h2_io_op op, int timeout_secs, apr_thread_cond_t *cond)
+{
+ io->timed_op = op;
+ io->timed_cond = cond;
+ if (timeout_secs > 0) {
+ io->timeout_at = apr_time_now() + apr_time_from_sec(timeout_secs);
+ }
+ else {
+ io->timeout_at = 0;
+ }
+}
+
+void h2_io_signal_exit(h2_io *io)
+{
+ io->timed_cond = NULL;
+ io->timeout_at = 0;
+}
+
+apr_status_t h2_io_signal_wait(h2_mplx *m, h2_io *io)
+{
+ apr_status_t status;
+
+ if (io->timeout_at != 0) {
+ status = apr_thread_cond_timedwait(io->timed_cond, m->lock, io->timeout_at);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
+ "h2_mplx(%ld-%d): stream timeout expired: %s",
+ m->id, io->id,
+ (io->timed_op == H2_IO_READ)? "read" : "write");
+ h2_io_rst(io, H2_ERR_CANCEL);
+ }
+ }
+ else {
+ apr_thread_cond_wait(io->timed_cond, m->lock);
+ status = APR_SUCCESS;
+ }
+ if (io->orphaned && status == APR_SUCCESS) {
+ return APR_ECONNABORTED;
+ }
+ return status;
+}
+
+void h2_io_signal(h2_io *io, h2_io_op op)
+{
+ if (io->timed_cond && (io->timed_op == op || H2_IO_ANY == op)) {
+ apr_thread_cond_signal(io->timed_cond);
+ }
+}
+
+void h2_io_make_orphaned(h2_io *io, int error)
+{
+ io->orphaned = 1;
+ if (error) {
+ h2_io_rst(io, error);
+ }
+ /* if someone is waiting, wake him up */
+ h2_io_signal(io, H2_IO_ANY);
+}
+
+static int add_trailer(void *ctx, const char *key, const char *value)
+{
+ apr_bucket_brigade *bb = ctx;
+ apr_status_t status;
+
+ status = apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n",
+ key, value);
+ return (status == APR_SUCCESS);
+}
+
+static apr_status_t append_eos(h2_io *io, apr_bucket_brigade *bb,
+ apr_table_t *trailers)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_table_t *t = io->request->trailers;
+
+ if (trailers && t && !apr_is_empty_table(trailers)) {
+ /* trailers passed in, transfer directly. */
+ apr_table_overlap(trailers, t, APR_OVERLAP_TABLES_SET);
+ t = NULL;
+ }
+
+ if (io->request->chunked) {
+ if (t && !apr_is_empty_table(t)) {
+ /* no trailers passed in, transfer via chunked */
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(add_trailer, bb, t, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ }
+ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc));
+ return status;
+}
+
apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen)
+ apr_size_t maxlen, apr_table_t *trailers)
{
apr_off_t start_len = 0;
- apr_bucket *last;
apr_status_t status;
if (io->rst_error) {
}
if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) {
- return io->eos_in? APR_EOF : APR_EAGAIN;
+ if (io->eos_in) {
+ if (!io->eos_in_written) {
+ status = append_eos(io, bb, trailers);
+ io->eos_in_written = 1;
+ return status;
+ }
+ return APR_EOF;
+ }
+ return APR_EAGAIN;
}
- apr_brigade_length(bb, 1, &start_len);
- last = APR_BRIGADE_LAST(bb);
- status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
- if (status == APR_SUCCESS) {
- apr_bucket *nlast = APR_BRIGADE_LAST(bb);
- apr_off_t end_len = 0;
- apr_brigade_length(bb, 1, &end_len);
- if (last == nlast) {
- return APR_EAGAIN;
+ if (io->request->chunked) {
+ /* the reader expects HTTP/1.1 chunked encoding */
+ status = h2_util_move(io->tmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk");
+ if (status == APR_SUCCESS) {
+ apr_off_t tmp_len = 0;
+
+ apr_brigade_length(io->tmp, 1, &tmp_len);
+ if (tmp_len > 0) {
+ io->input_consumed += tmp_len;
+ status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n",
+ (unsigned long)tmp_len);
+ if (status == APR_SUCCESS) {
+ status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp1");
+ if (status == APR_SUCCESS) {
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ }
+ }
+ else {
+ status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp2");
+ }
+ apr_brigade_cleanup(io->tmp);
}
- io->input_consumed += (end_len - start_len);
}
+ else {
+ apr_brigade_length(bb, 1, &start_len);
+
+ status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
+ if (status == APR_SUCCESS) {
+ apr_off_t end_len = 0;
+ apr_brigade_length(bb, 1, &end_len);
+ io->input_consumed += (end_len - start_len);
+ }
+ }
+
return status;
}
if (!APR_BRIGADE_EMPTY(bb)) {
if (!io->bbin) {
io->bbin = apr_brigade_create(io->pool, io->bucket_alloc);
+ io->tmp = apr_brigade_create(io->pool, io->bucket_alloc);
}
return h2_util_move(io->bbin, bb, -1, NULL, "h2_io_in_write");
}
return APR_ECONNABORTED;
}
- if (io->bbin) {
- APR_BRIGADE_INSERT_TAIL(io->bbin,
- apr_bucket_eos_create(io->bbin->bucket_alloc));
- }
io->eos_in = 1;
return APR_SUCCESS;
}
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
apr_size_t maxlen, apr_table_t *trailers,
- int *pfile_handles_allowed)
+ apr_size_t *pfile_buckets_allowed)
{
apr_status_t status;
int start_allowed;
* many open files already buffered. Otherwise we will run out of
* file handles.
*/
- start_allowed = *pfile_handles_allowed;
- status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed,
+ start_allowed = *pfile_buckets_allowed;
+ status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed,
"h2_io_out_write");
/* track # file buckets moved into our pool */
- if (start_allowed != *pfile_handles_allowed) {
- io->files_handles_owned += (start_allowed - *pfile_handles_allowed);
+ if (start_allowed != *pfile_buckets_allowed) {
+ io->files_handles_owned += (start_allowed - *pfile_buckets_allowed);
}
return status;
}
}
if (!h2_util_has_eos(io->bbout, -1)) {
APR_BRIGADE_INSERT_TAIL(io->bbout,
- apr_bucket_eos_create(io->bbout->bucket_alloc));
+ apr_bucket_eos_create(io->bucket_alloc));
}
}
return APR_SUCCESS;
struct h2_response;
struct apr_thread_cond_t;
+struct h2_mplx;
struct h2_request;
typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
+typedef enum {
+ H2_IO_READ,
+ H2_IO_WRITE,
+ H2_IO_ANY,
+}
+h2_io_op;
typedef struct h2_io h2_io;
struct h2_io {
- int id; /* stream identifier */
- apr_pool_t *pool; /* stream pool */
- int orphaned; /* h2_stream is gone for this io */
+ int id; /* stream identifier */
+ apr_pool_t *pool; /* stream pool */
+ apr_bucket_alloc_t *bucket_alloc;
- int task_done;
- const struct h2_request *request; /* request on this io */
- int request_body; /* == 0 iff request has no body */
- struct h2_response *response;/* response for submit, once created */
- int rst_error;
-
- int eos_in;
- apr_bucket_brigade *bbin; /* input data for stream */
- struct apr_thread_cond_t *input_arrived; /* block on reading */
- apr_size_t input_consumed; /* how many bytes have been read */
+ const struct h2_request *request;/* request on this io */
+ struct h2_response *response; /* response to request */
+ int rst_error; /* h2 related stream abort error */
+
+ apr_bucket_brigade *bbin; /* input data for stream */
+ apr_bucket_brigade *bbout; /* output data from stream */
+ apr_bucket_brigade *tmp; /* temporary data for chunking */
+
+ unsigned int orphaned : 1; /* h2_stream is gone for this io */
+ unsigned int worker_started : 1; /* h2_worker started processing for this io */
+ unsigned int worker_done : 1; /* h2_worker finished for this io */
+ unsigned int request_body : 1; /* iff request has body */
+ unsigned int eos_in : 1; /* input eos has been seen */
+ unsigned int eos_in_written : 1; /* input eos has been forwarded */
+ unsigned int eos_out : 1; /* output eos has been seen */
- int eos_out;
- apr_bucket_brigade *bbout; /* output data from stream */
- apr_bucket_alloc_t *bucket_alloc;
- struct apr_thread_cond_t *output_drained; /* block on writing */
+ h2_io_op timed_op; /* which operation is waited on, if any */
+ struct apr_thread_cond_t *timed_cond; /* condition to wait on, maybe NULL */
+ apr_time_t timeout_at; /* when IO wait will time out */
+ apr_size_t input_consumed; /* how many bytes have been read */
+
int files_handles_owned;
};
/**
* Creates a new h2_io for the given stream id.
*/
-h2_io *h2_io_create(int id, apr_pool_t *pool, apr_bucket_alloc_t *bucket_alloc);
+h2_io *h2_io_create(int id, apr_pool_t *pool);
/**
* Frees any resources hold by the h2_io instance.
*/
int h2_io_out_has_data(h2_io *io);
+void h2_io_signal(h2_io *io, h2_io_op op);
+void h2_io_signal_init(h2_io *io, h2_io_op op, int timeout_secs,
+ struct apr_thread_cond_t *cond);
+void h2_io_signal_exit(h2_io *io);
+apr_status_t h2_io_signal_wait(struct h2_mplx *m, h2_io *io);
+
+void h2_io_make_orphaned(h2_io *io, int error);
+
/*******************************************************************************
* Input handling of streams.
******************************************************************************/
* is currently available, APR_EOF if end of input has been reached.
*/
apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen);
+ apr_size_t maxlen, apr_table_t *trailers);
/**
* Appends given bucket to the input.
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
apr_size_t maxlen, apr_table_t *trailers,
- int *pfile_buckets_allowed);
+ apr_size_t *pfile_buckets_allowed);
/**
* Closes the input. After existing data has been read, APR_EOF will
#include <assert.h>
#include <stddef.h>
+#include <stdlib.h>
#include <apr_atomic.h>
#include <apr_thread_mutex.h>
} while(0)
-static int is_aborted(h2_mplx *m, apr_status_t *pstatus) {
+static int is_aborted(h2_mplx *m, apr_status_t *pstatus)
+{
AP_DEBUG_ASSERT(m);
if (m->aborted) {
*pstatus = APR_ECONNABORTED;
static void have_out_data_for(h2_mplx *m, int stream_id);
+static void check_tx_reservation(h2_mplx *m)
+{
+ if (m->tx_handles_reserved == 0) {
+ m->tx_handles_reserved += h2_workers_tx_reserve(m->workers,
+ H2MIN(m->tx_chunk_size, h2_io_set_size(m->stream_ios)));
+ }
+}
+
+static void check_tx_free(h2_mplx *m)
+{
+ if (m->tx_handles_reserved > m->tx_chunk_size) {
+ apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size;
+ m->tx_handles_reserved = m->tx_chunk_size;
+ h2_workers_tx_free(m->workers, count);
+ }
+ else if (m->tx_handles_reserved
+ && (!m->stream_ios || h2_io_set_is_empty(m->stream_ios))) {
+ h2_workers_tx_free(m->workers, m->tx_handles_reserved);
+ m->tx_handles_reserved = 0;
+ }
+}
+
static void h2_mplx_destroy(h2_mplx *m)
{
AP_DEBUG_ASSERT(m);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- "h2_mplx(%ld): destroy, refs=%d",
- m->id, m->refs);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): destroy, ios=%d",
+ m->id, (int)h2_io_set_size(m->stream_ios));
m->aborted = 1;
if (m->ready_ios) {
h2_io_set_destroy(m->ready_ios);
m->stream_ios = NULL;
}
+ check_tx_free(m);
+
if (m->pool) {
apr_pool_destroy(m->pool);
}
if (m) {
m->id = c->id;
APR_RING_ELEM_INIT(m, link);
- m->refs = 1;
m->c = c;
apr_pool_create_ex(&m->pool, parent, NULL, allocator);
if (!m->pool) {
return NULL;
}
- m->bucket_alloc = apr_bucket_alloc_create(m->pool);
-
m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS));
m->stream_ios = h2_io_set_create(m->pool);
m->ready_ios = h2_io_set_create(m->pool);
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->workers = workers;
- m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
+ m->tx_handles_reserved = 0;
+ m->tx_chunk_size = 4;
+
+ m->stream_timeout_secs = h2_config_geti(conf, H2_CONF_STREAM_TIMEOUT_SECS);
}
return m;
}
-static void release(h2_mplx *m, int lock)
-{
- if (lock) {
- apr_thread_mutex_lock(m->lock);
- --m->refs;
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
- }
- apr_thread_mutex_unlock(m->lock);
- }
- else {
- --m->refs;
- }
-}
-
-void h2_mplx_reference(h2_mplx *m)
+int h2_mplx_get_max_stream_started(h2_mplx *m)
{
+ int stream_id = 0;
+
apr_thread_mutex_lock(m->lock);
- ++m->refs;
+ stream_id = m->max_stream_started;
apr_thread_mutex_unlock(m->lock);
+
+ return stream_id;
}
-void h2_mplx_release(h2_mplx *m)
+static void workers_register(h2_mplx *m)
{
- release(m, 1);
-}
-
-static void workers_register(h2_mplx *m) {
/* Initially, there was ref count increase for this as well, but
* this is not needed, even harmful.
* h2_workers is only a hub for all the h2_worker instances.
h2_workers_register(m->workers, m);
}
-static void workers_unregister(h2_mplx *m) {
- h2_workers_unregister(m->workers, m);
-}
-
-static int io_process_events(h2_mplx *m, h2_io *io) {
+static int io_process_events(h2_mplx *m, h2_io *io)
+{
if (io->input_consumed && m->input_consumed) {
m->input_consumed(m->input_consumed_ctx,
io->id, io->input_consumed);
return 0;
}
-
static void io_destroy(h2_mplx *m, h2_io *io, int events)
{
apr_pool_t *pool = io->pool;
/* The pool is cleared/destroyed which also closes all
* allocated file handles. Give this count back to our
* file handle pool. */
- m->file_handles_allowed += io->files_handles_owned;
+ m->tx_handles_reserved += io->files_handles_owned;
+
h2_io_set_remove(m->stream_ios, io);
h2_io_set_remove(m->ready_ios, io);
h2_io_destroy(io);
}
m->spare_pool = pool;
}
+
+ check_tx_free(m);
}
static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error)
{
/* Remove io from ready set, we will never submit it */
h2_io_set_remove(m->ready_ios, io);
- if (io->task_done || h2_tq_remove(m->q, io->id)) {
+ if (!io->worker_started || io->worker_done) {
/* already finished or not even started yet */
+ h2_tq_remove(m->q, io->id);
io_destroy(m, io, 1);
return 0;
}
else {
/* cleanup once task is done */
- io->orphaned = 1;
- if (rst_error) {
- h2_io_rst(io, rst_error);
- }
+ h2_io_make_orphaned(io, rst_error);
return 1;
}
}
-static int stream_done_iter(void *ctx, h2_io *io) {
+static int stream_done_iter(void *ctx, h2_io *io)
+{
return io_stream_done((h2_mplx*)ctx, io, 0);
}
{
apr_status_t status;
- workers_unregister(m);
+ h2_workers_unregister(m->workers, m);
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
+ int i, wait_secs = 5;
+
+ /* disable WINDOW_UPDATE callbacks */
+ h2_mplx_set_consumed_cb(m, NULL, NULL);
+
while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) {
- /* iterator until all h2_io have been orphaned or destroyed */
+ /* iterate until all ios have been orphaned or destroyed */
}
- release(m, 0);
- while (m->refs > 0) {
+ /* Any remaining ios have handed out requests to workers that are
+ * not done yet. Any operation they do on their assigned stream ios will
+ * be errored ECONNRESET/ABORTED, so that should find out pretty soon.
+ */
+ for (i = 0; h2_io_set_size(m->stream_ios) > 0; ++i) {
m->join_wait = wait;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- "h2_mplx(%ld): release_join, refs=%d, waiting...",
- m->id, m->refs);
- apr_thread_cond_wait(wait, m->lock);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): release_join, waiting on %d worker to report back",
+ m->id, (int)h2_io_set_size(m->stream_ios));
+
+ status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ if (i > 0) {
+ /* Oh, oh. Still we wait for assigned workers to report that
+ * they are done. Unless we have a bug, a worker seems to be hanging.
+ * If we exit now, all will be deallocated and the worker, once
+ * it does return, will walk all over freed memory...
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ "h2_mplx(%ld): release, waiting for %d seconds now for "
+ "all h2_workers to return, have still %d requests outstanding",
+ m->id, i*wait_secs, (int)h2_io_set_size(m->stream_ios));
+ }
+ }
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- "h2_mplx(%ld): release_join -> destroy, (#ios=%ld)",
- m->id, (long)h2_io_set_size(m->stream_ios));
+ "h2_mplx(%ld): release_join -> destroy", m->id);
+ apr_thread_mutex_unlock(m->lock);
h2_mplx_destroy(m);
/* all gone */
- /*apr_thread_mutex_unlock(m->lock);*/
}
return status;
}
void h2_mplx_abort(h2_mplx *m)
{
apr_status_t status;
+
AP_DEBUG_ASSERT(m);
- status = apr_thread_mutex_lock(m->lock);
- if (APR_SUCCESS == status) {
- m->aborted = 1;
- apr_thread_mutex_unlock(m->lock);
+ if (!m->aborted) {
+ status = apr_thread_mutex_lock(m->lock);
+ if (APR_SUCCESS == status) {
+ m->aborted = 1;
+ apr_thread_mutex_unlock(m->lock);
+ }
}
}
return status;
}
-void h2_mplx_task_done(h2_mplx *m, int stream_id)
+static const h2_request *pop_request(h2_mplx *m)
+{
+ const h2_request *req = NULL;
+ int sid;
+ while (!m->aborted && !req && (sid = h2_tq_shift(m->q)) > 0) {
+ h2_io *io = h2_io_set_get(m->stream_ios, sid);
+ if (io) {
+ req = io->request;
+ io->worker_started = 1;
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+ }
+ }
+ return req;
+}
+
+void h2_mplx_request_done(h2_mplx **pm, int stream_id, const h2_request **preq)
{
+ h2_mplx *m = *pm;
+
apr_status_t status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): task(%d) done", m->id, stream_id);
+ "h2_mplx(%ld): request(%d) done", m->id, stream_id);
if (io) {
- io->task_done = 1;
+ io->worker_done = 1;
if (io->orphaned) {
io_destroy(m, io, 0);
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
}
else {
/* hang around until the stream deregisteres */
}
}
+
+ if (preq) {
+ /* someone wants another request, if we have */
+ *preq = pop_request(m);
+ }
+ if (!preq || !*preq) {
+ /* No request to hand back to the worker, NULLify reference
+ * and decrement count */
+ *pm = NULL;
+ }
apr_thread_mutex_unlock(m->lock);
}
}
apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block,
- int stream_id, apr_bucket_brigade *bb,
+ int stream_id, apr_bucket_brigade *bb,
+ apr_table_t *trailers,
struct apr_thread_cond_t *iowait)
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
if (io && !io->orphaned) {
- io->input_arrived = iowait;
H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_pre");
- status = h2_io_in_read(io, bb, -1);
+
+ h2_io_signal_init(io, H2_IO_READ, m->stream_timeout_secs, iowait);
+ status = h2_io_in_read(io, bb, -1, trailers);
while (APR_STATUS_IS_EAGAIN(status)
&& !is_aborted(m, &status)
&& block == APR_BLOCK_READ) {
- apr_thread_cond_wait(io->input_arrived, m->lock);
- status = h2_io_in_read(io, bb, -1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+ "h2_mplx(%ld-%d): wait on in data (BLOCK_READ)",
+ m->id, stream_id);
+ status = h2_io_signal_wait(m, io);
+ if (status == APR_SUCCESS) {
+ status = h2_io_in_read(io, bb, -1, trailers);
+ }
}
H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_post");
- io->input_arrived = NULL;
+ h2_io_signal_exit(io);
}
else {
status = APR_EOF;
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_pre");
status = h2_io_in_write(io, bb);
H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_post");
- if (io->input_arrived) {
- apr_thread_cond_signal(io->input_arrived);
- }
+ h2_io_signal(io, H2_IO_READ);
io_process_events(m, io);
}
else {
- status = APR_EOF;
+ status = APR_ECONNABORTED;
}
apr_thread_mutex_unlock(m->lock);
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
if (io && !io->orphaned) {
status = h2_io_in_close(io);
H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_close");
- if (io->input_arrived) {
- apr_thread_cond_signal(io->input_arrived);
- }
+ h2_io_signal(io, H2_IO_READ);
io_process_events(m, io);
}
else {
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
status = h2_io_out_readx(io, cb, ctx, plen, peos);
H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_readx_post");
- if (status == APR_SUCCESS && cb && io->output_drained) {
- apr_thread_cond_signal(io->output_drained);
+ if (status == APR_SUCCESS && cb) {
+ h2_io_signal(io, H2_IO_WRITE);
}
}
else {
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
status = h2_io_out_read_to(io, bb, plen, peos);
H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_post");
- if (status == APR_SUCCESS && io->output_drained) {
- apr_thread_cond_signal(io->output_drained);
+ if (status == APR_SUCCESS) {
+ h2_io_signal(io, H2_IO_WRITE);
}
}
else {
{
apr_status_t status;
h2_stream *stream = NULL;
+
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return NULL;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_pop_highest_prio(m->ready_ios);
- if (io) {
+ if (io && !m->aborted) {
stream = h2_stream_set_get(streams, io->id);
if (stream) {
if (io->rst_error) {
h2_stream_set_response(stream, io->response, io->bbout);
H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_post");
}
-
}
else {
/* We have the io ready, but the stream has gone away, maybe
* reset by the client. Should no longer happen since such
* streams should clear io's from the ready queue.
*/
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, APLOGNO(02953)
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
"h2_mplx(%ld): stream for response %d closed, "
"resetting io to close request processing",
m->id, io->id);
- io->orphaned = 1;
- if (io->task_done) {
+ h2_io_make_orphaned(io, H2_ERR_STREAM_CLOSED);
+ if (!io->worker_started || io->worker_done) {
io_destroy(m, io, 1);
}
else {
* shutdown input and send out any events (e.g. window
* updates) asap. */
h2_io_in_shutdown(io);
- h2_io_rst(io, H2_ERR_STREAM_CLOSED);
io_process_events(m, io);
}
}
- if (io->output_drained) {
- apr_thread_cond_signal(io->output_drained);
- }
+ h2_io_signal(io, H2_IO_WRITE);
}
apr_thread_mutex_unlock(m->lock);
}
* We will not split buckets to enforce the limit to the last
* byte. After all, the bucket is already in memory.
*/
- while (!APR_BRIGADE_EMPTY(bb)
- && (status == APR_SUCCESS)
+ while (status == APR_SUCCESS
+ && !APR_BRIGADE_EMPTY(bb)
&& !is_aborted(m, &status)) {
status = h2_io_out_write(io, bb, m->stream_max_mem, trailers,
- &m->file_handles_allowed);
- /* Wait for data to drain until there is room again */
- while (!APR_BRIGADE_EMPTY(bb)
+ &m->tx_handles_reserved);
+ /* Wait for data to drain until there is room again or
+ * stream timeout expires */
+ h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout_secs, iowait);
+ while (status == APR_SUCCESS
+ && !APR_BRIGADE_EMPTY(bb)
&& iowait
- && status == APR_SUCCESS
&& (m->stream_max_mem <= h2_io_out_length(io))
&& !is_aborted(m, &status)) {
trailers = NULL;
- io->output_drained = iowait;
if (f) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
"h2_mplx(%ld-%d): waiting for out drain",
m->id, io->id);
}
- apr_thread_cond_wait(io->output_drained, m->lock);
- io->output_drained = NULL;
+ status = h2_io_signal_wait(m, io);
}
+ h2_io_signal_exit(io);
}
apr_brigade_cleanup(bb);
h2_io_set_response(io, response);
h2_io_set_add(m->ready_ios, io);
+ if (response && response->http_status < 300) {
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_tx_reservation(m);
+ }
if (bb) {
status = out_write(m, io, f, bb, response->trailers, iowait);
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- status = out_open(m, stream_id, response, f, bb, iowait);
- if (APLOGctrace1(m->c)) {
- h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
- }
if (m->aborted) {
- return APR_ECONNABORTED;
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = out_open(m, stream_id, response, f, bb, iowait);
+ if (APLOGctrace1(m->c)) {
+ h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
+ }
}
apr_thread_mutex_unlock(m->lock);
}
return status;
}
-
apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id,
ap_filter_t* f, apr_bucket_brigade *bb,
apr_table_t *trailers,
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- status = out_write(m, io, f, bb, trailers, iowait);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
- "h2_mplx(%ld-%d): write with trailers=%s",
- m->id, io->id, trailers? "yes" : "no");
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
-
- have_out_data_for(m, stream_id);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
- }
- else {
- status = APR_ECONNABORTED;
- }
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->orphaned) {
+ status = out_write(m, io, f, bb, trailers, iowait);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): write with trailers=%s",
+ m->id, io->id, trailers? "yes" : "no");
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
+
+ have_out_data_for(m, stream_id);
}
-
- if (m->lock) {
- apr_thread_mutex_unlock(m->lock);
+ else {
+ status = APR_ECONNABORTED;
}
+ apr_thread_mutex_unlock(m->lock);
}
return status;
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- if (!io->response && !io->rst_error) {
- /* In case a close comes before a response was created,
- * insert an error one so that our streams can properly
- * reset.
- */
- h2_response *r = h2_response_die(stream_id, APR_EGENERAL,
- io->request, m->pool);
- status = out_open(m, stream_id, r, NULL, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
- "h2_mplx(%ld-%d): close, no response, no rst",
- m->id, io->id);
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
- "h2_mplx(%ld-%d): close with trailers=%s",
- m->id, io->id, trailers? "yes" : "no");
- status = h2_io_out_close(io, trailers);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close");
-
- have_out_data_for(m, stream_id);
- if (m->aborted) {
- /* if we were the last output, the whole session might
- * have gone down in the meantime.
- */
- return APR_SUCCESS;
- }
- }
- else {
- status = APR_ECONNABORTED;
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->orphaned) {
+ if (!io->response && !io->rst_error) {
+ /* In case a close comes before a response was created,
+ * insert an error one so that our streams can properly
+ * reset.
+ */
+ h2_response *r = h2_response_die(stream_id, APR_EGENERAL,
+ io->request, m->pool);
+ status = out_open(m, stream_id, r, NULL, NULL, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): close, no response, no rst",
+ m->id, io->id);
}
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): close with trailers=%s",
+ m->id, io->id, trailers? "yes" : "no");
+ status = h2_io_out_close(io, trailers);
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close");
+
+ have_out_data_for(m, stream_id);
+ }
+ else {
+ status = APR_ECONNABORTED;
}
apr_thread_mutex_unlock(m->lock);
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->rst_error && !io->orphaned) {
- h2_io_rst(io, error);
- if (!io->response) {
- h2_io_set_add(m->ready_ios, io);
- }
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst");
-
- have_out_data_for(m, stream_id);
- if (io->output_drained) {
- apr_thread_cond_signal(io->output_drained);
- }
- }
- else {
- status = APR_ECONNABORTED;
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->rst_error && !io->orphaned) {
+ h2_io_rst(io, error);
+ if (!io->response) {
+ h2_io_set_add(m->ready_ios, io);
}
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst");
+
+ have_out_data_for(m, stream_id);
+ h2_io_signal(io, H2_IO_WRITE);
+ }
+ else {
+ status = APR_ECONNABORTED;
}
apr_thread_mutex_unlock(m->lock);
}
int has_eos = 0;
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return 0;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io) {
- has_eos = io->orphaned || h2_io_in_has_eos_for(io);
+ if (io && !io->orphaned) {
+ has_eos = h2_io_in_has_eos_for(io);
+ }
+ else {
+ has_eos = 1;
}
apr_thread_mutex_unlock(m->lock);
}
apr_status_t status;
int has_data = 0;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return 0;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io) {
+ if (io && !io->orphaned) {
has_data = h2_io_out_has_data(io);
}
+ else {
+ has_data = 0;
+ }
apr_thread_mutex_unlock(m->lock);
}
return has_data;
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- m->added_output = iowait;
- status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
- if (APLOGctrace2(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): trywait on data for %f ms)",
- m->id, timeout/1000.0);
- }
- m->added_output = NULL;
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): trywait on data for %f ms)",
+ m->id, timeout/1000.0);
+ }
+ m->added_output = NULL;
+ }
apr_thread_mutex_unlock(m->lock);
}
return status;
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- h2_tq_sort(m->q, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): reprioritize tasks", m->id);
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_tq_sort(m->q, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): reprioritize tasks", m->id);
+ }
apr_thread_mutex_unlock(m->lock);
}
- workers_register(m);
return status;
}
m->spare_pool = NULL;
}
- io = h2_io_create(stream_id, io_pool, m->bucket_alloc);
+ io = h2_io_create(stream_id, io_pool);
h2_io_set_add(m->stream_ios, io);
return io;
}
-apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
- const h2_request *req, int eos,
+apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req,
h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
+ int was_empty = 0;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- h2_io *io = open_io(m, stream_id);
- io->request = req;
- io->request_body = !eos;
-
- if (eos) {
- status = h2_io_in_close(io);
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_io *io = open_io(m, stream_id);
+ io->request = req;
+
+ if (!io->request->body) {
+ status = h2_io_in_close(io);
+ }
+
+ was_empty = h2_tq_empty(m->q);
+ h2_tq_add(m->q, io->id, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): process", m->c->id, stream_id);
+ H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process");
}
-
- h2_tq_add(m->q, io->id, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): process", m->c->id, stream_id);
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process");
apr_thread_mutex_unlock(m->lock);
}
-
- if (status == APR_SUCCESS) {
+ if (status == APR_SUCCESS && was_empty) {
workers_register(m);
}
return status;
}
-h2_task *h2_mplx_pop_task(h2_mplx *m, h2_worker *w, int *has_more)
+const h2_request *h2_mplx_pop_request(h2_mplx *m, int *has_more)
{
- h2_task *task = NULL;
+ const h2_request *req = NULL;
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- *has_more = 0;
- return NULL;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- int sid;
- while (!task && (sid = h2_tq_shift(m->q)) > 0) {
- /* Anything not already setup correctly in the task
- * needs to be so now, as task will be executed right about
- * when this method returns. */
- h2_io *io = h2_io_set_get(m->stream_ios, sid);
- if (io) {
- task = h2_worker_create_task(w, m, io->request, !io->request_body);
- }
+ if (m->aborted) {
+ req = NULL;
+ *has_more = 0;
+ }
+ else {
+ req = pop_request(m);
+ *has_more = !h2_tq_empty(m->q);
}
- *has_more = !h2_tq_empty(m->q);
apr_thread_mutex_unlock(m->lock);
}
- return task;
+ return req;
}
/**
* The stream multiplexer. It pushes buckets from the connection
- * thread to the stream task threads and vice versa. It's thread-safe
+ * thread to the stream threads and vice versa. It's thread-safe
* to use.
*
* There is one h2_mplx instance for each h2_session, which sits on top
struct h2_request;
struct h2_io_set;
struct apr_thread_cond_t;
-struct h2_worker;
struct h2_workers;
struct h2_stream_set;
struct h2_task_queue;
volatile int refs;
conn_rec *c;
apr_pool_t *pool;
- apr_bucket_alloc_t *bucket_alloc;
+
+ unsigned int aborted : 1;
struct h2_task_queue *q;
struct h2_io_set *stream_ios;
struct h2_io_set *ready_ios;
+ int max_stream_started; /* highest stream id that started processing */
+
apr_thread_mutex_t *lock;
struct apr_thread_cond_t *added_output;
struct apr_thread_cond_t *join_wait;
- int aborted;
apr_size_t stream_max_mem;
+ int stream_timeout_secs;
apr_pool_t *spare_pool; /* spare pool, ready for next io */
struct h2_workers *workers;
- int file_handles_allowed;
+ apr_size_t tx_handles_reserved;
+ apr_size_t tx_chunk_size;
h2_mplx_consumed_cb *input_consumed;
void *input_consumed_ctx;
const struct h2_config *conf,
struct h2_workers *workers);
-/**
- * Increase the reference counter of this mplx.
- */
-void h2_mplx_reference(h2_mplx *m);
-
-/**
- * Decreases the reference counter of this mplx.
- */
-void h2_mplx_release(h2_mplx *m);
-
/**
* Decreases the reference counter of this mplx and waits for it
* to reached 0, destroy the mplx afterwards.
/**
* Aborts the multiplexer. It will answer all future invocation with
- * APR_ECONNABORTED, leading to early termination of ongoing tasks.
+ * APR_ECONNABORTED, leading to early termination of ongoing streams.
*/
void h2_mplx_abort(h2_mplx *mplx);
-void h2_mplx_task_done(h2_mplx *m, int stream_id);
+void h2_mplx_request_done(h2_mplx **pm, int stream_id, const struct h2_request **preq);
+
+/**
+ * Get the highest stream identifier that has been passed on to processing.
+ * Maybe 0 in case no stream has been processed yet.
+ * @param m the multiplexer
+ * @return highest stream identifier for which processing started
+ */
+int h2_mplx_get_max_stream_started(h2_mplx *m);
/*******************************************************************************
* IO lifetime of streams.
* @param m the multiplexer
* @param stream_id the identifier of the stream
* @param r the request to be processed
- * @param eos if input is complete
* @param cmp the stream priority compare function
* @param ctx context data for the compare function
*/
-apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
- const struct h2_request *r, int eos,
+apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const struct h2_request *r,
h2_stream_pri_cmp *cmp, void *ctx);
/**
- * Stream priorities have changed, reschedule pending tasks.
+ * Stream priorities have changed, reschedule pending requests.
*
* @param m the multiplexer
* @param cmp the stream priority compare function
*/
apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
-struct h2_task *h2_mplx_pop_task(h2_mplx *mplx, struct h2_worker *w, int *has_more);
+const struct h2_request *h2_mplx_pop_request(h2_mplx *mplx, int *has_more);
/**
* Register a callback for the amount of input data consumed per stream. The
*/
apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block,
int stream_id, apr_bucket_brigade *bb,
+ apr_table_t *trailers,
struct apr_thread_cond_t *iowait);
/**
#define H2_HEADER_PATH_LEN 5
#define H2_CRLF "\r\n"
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
#endif
#include <assert.h>
#include <stdio.h>
-#include <apr_strings.h>
#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_hash.h>
+#include <apr_time.h>
+
+#ifdef H2_OPENSSL
+#include <openssl/sha.h>
+#endif
#include <httpd.h>
#include <http_core.h>
#include "h2_push.h"
#include "h2_request.h"
#include "h2_response.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+static const char *policy_str(h2_push_policy policy)
+{
+ switch (policy) {
+ case H2_PUSH_NONE:
+ return "none";
+ case H2_PUSH_FAST_LOAD:
+ return "fast-load";
+ case H2_PUSH_HEAD:
+ return "head";
+ default:
+ return "default";
+ }
+}
typedef struct {
const h2_request *req;
if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) {
if (uri.path && same_authority(ctx->req, &uri)) {
char *path;
+ const char *method;
apr_table_t *headers;
h2_request *req;
h2_push *push;
push = apr_pcalloc(ctx->pool, sizeof(*push));
+ switch (ctx->req->push_policy) {
+ case H2_PUSH_HEAD:
+ method = "HEAD";
+ break;
+ default:
+ method = "GET";
+ break;
+ }
headers = apr_table_make(ctx->pool, 5);
apr_table_do(set_header, headers, ctx->req->headers,
"User-Agent",
"Accept-Language",
NULL);
req = h2_request_createn(0, ctx->pool, ctx->req->config,
- "GET", ctx->req->scheme,
+ method, ctx->req->scheme,
ctx->req->authority,
path, headers);
- h2_request_end_headers(req, ctx->pool, 1);
+ /* atm, we do not push on pushes */
+ h2_request_end_headers(req, ctx->pool, 1, 0);
push->req = req;
if (!ctx->pushes) {
apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
const h2_response *res)
{
- /* Collect push candidates from the request/response pair.
- *
- * One source for pushes are "rel=preload" link headers
- * in the response.
- *
- * TODO: This may be extended in the future by hooks or callbacks
- * where other modules can provide push information directly.
+ if (req && req->push_policy != H2_PUSH_NONE) {
+ /* Collect push candidates from the request/response pair.
+ *
+ * One source for pushes are "rel=preload" link headers
+ * in the response.
+ *
+ * TODO: This may be extended in the future by hooks or callbacks
+ * where other modules can provide push information directly.
+ */
+ if (res->headers) {
+ link_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.req = req;
+ ctx.pool = p;
+
+ apr_table_do(head_iter, &ctx, res->headers, NULL);
+ if (ctx.pushes) {
+ apr_table_setn(res->headers, "push-policy", policy_str(req->push_policy));
+ }
+ return ctx.pushes;
+ }
+ }
+ return NULL;
+}
+
+void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled)
+{
+ h2_push_policy policy = H2_PUSH_NONE;
+ if (push_enabled) {
+ const char *val = apr_table_get(req->headers, "accept-push-policy");
+ if (val) {
+ if (ap_find_token(p, val, "fast-load")) {
+ policy = H2_PUSH_FAST_LOAD;
+ }
+ else if (ap_find_token(p, val, "head")) {
+ policy = H2_PUSH_HEAD;
+ }
+ else if (ap_find_token(p, val, "default")) {
+ policy = H2_PUSH_DEFAULT;
+ }
+ else if (ap_find_token(p, val, "none")) {
+ policy = H2_PUSH_NONE;
+ }
+ else {
+ /* nothing known found in this header, go by default */
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ else {
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ req->push_policy = policy;
+}
+
+/*******************************************************************************
+ * push diary
+ ******************************************************************************/
+
+
+#define GCSLOG_LEVEL APLOG_TRACE1
+
+typedef struct h2_push_diary_entry {
+ apr_uint64_t hash;
+} h2_push_diary_entry;
+
+
+#ifdef H2_OPENSSL
+static void sha256_update(SHA256_CTX *ctx, const char *s)
+{
+ SHA256_Update(ctx, s, strlen(s));
+}
+
+static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ SHA256_CTX sha256;
+ apr_uint64_t val;
+ unsigned char hash[SHA256_DIGEST_LENGTH];
+ int i;
+
+ SHA256_Init(&sha256);
+ sha256_update(&sha256, push->req->scheme);
+ sha256_update(&sha256, "://");
+ sha256_update(&sha256, push->req->authority);
+ sha256_update(&sha256, push->req->path);
+ SHA256_Final(hash, &sha256);
+
+ val = 0;
+ for (i = 0; i != sizeof(val); ++i)
+ val = val * 256 + hash[i];
+ *phash = val >> (64 - diary->mask_bits);
+}
+#endif
+
+
+static unsigned int val_apr_hash(const char *str)
+{
+ apr_ssize_t len = strlen(str);
+ return apr_hashfunc_default(str, &len);
+}
+
+static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ apr_uint64_t val;
+#if APR_UINT64MAX > APR_UINT_MAX
+ val = (val_apr_hash(push->req->scheme) << 32);
+ val ^= (val_apr_hash(push->req->authority) << 16);
+ val ^= val_apr_hash(push->req->path);
+#else
+ val = val_apr_hash(push->req->scheme);
+ val ^= val_apr_hash(push->req->authority);
+ val ^= val_apr_hash(push->req->path);
+#endif
+ *phash = val;
+}
+
+static apr_int32_t ceil_power_of_2(apr_int32_t n)
+{
+ if (n <= 2) return 2;
+ --n;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ return ++n;
+}
+
+static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
+ apr_size_t N)
+{
+ h2_push_diary *diary = NULL;
+
+ if (N > 0) {
+ diary = apr_pcalloc(p, sizeof(*diary));
+
+ diary->NMax = ceil_power_of_2(N);
+ diary->N = diary->NMax;
+ /* the mask we use in value comparision depends on where we got
+ * the values from. If we calculate them ourselves, we can use
+ * the full 64 bits.
+ * If we set the diary via a compressed golomb set, we have less
+ * relevant bits and need to use a smaller mask. */
+ diary->mask_bits = 64;
+ /* grows by doubling, start with a power of 2 */
+ diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry));
+
+ switch (dtype) {
+#ifdef H2_OPENSSL
+ case H2_PUSH_DIGEST_SHA256:
+ diary->dtype = H2_PUSH_DIGEST_SHA256;
+ diary->dcalc = calc_sha256_hash;
+ break;
+#endif /* ifdef H2_OPENSSL */
+ default:
+ diary->dtype = H2_PUSH_DIGEST_APR_HASH;
+ diary->dcalc = calc_apr_hash;
+ break;
+ }
+ }
+
+ return diary;
+}
+
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N)
+{
+ return diary_create(p, H2_PUSH_DIGEST_SHA256, N);
+}
+
+static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
+{
+ if (diary) {
+ h2_push_diary_entry *e;
+ int i;
+
+ /* search from the end, where the last accessed digests are */
+ for (i = diary->entries->nelts-1; i >= 0; --i) {
+ e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry);
+ if (e->hash == hash) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
+{
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ h2_push_diary_entry e;
+ apr_size_t lastidx = diary->entries->nelts-1;
+
+ /* move entry[idx] to the end */
+ if (idx < lastidx) {
+ e = entries[idx];
+ memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
+ entries[lastidx] = e;
+ }
+ return &entries[lastidx];
+}
+
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+{
+ h2_push_diary_entry *ne;
+
+ if (diary->entries->nelts < diary->N) {
+ /* append a new diary entry at the end */
+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+ ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
+ }
+ else {
+ /* replace content with new digest. keeps memory usage constant once diary is full */
+ ne = move_to_last(diary, 0);
+ *ne = *e;
+ }
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
+}
+
+apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
+{
+ apr_array_header_t *npushes = pushes;
+ h2_push_diary_entry e;
+ int i, idx;
+
+ if (session->push_diary && pushes) {
+ npushes = NULL;
+
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push;
+
+ push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ session->push_diary->dcalc(session->push_diary, &e.hash, push);
+ idx = h2_push_diary_find(session->push_diary, e.hash);
+ if (idx >= 0) {
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: already there PUSH %s", push->req->path);
+ move_to_last(session->push_diary, idx);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: adding PUSH %s", push->req->path);
+ if (!npushes) {
+ npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
+ }
+ APR_ARRAY_PUSH(npushes, h2_push*) = push;
+ h2_push_diary_append(session->push_diary, &e);
+ }
+ }
+ }
+ return npushes;
+}
+
+apr_array_header_t *h2_push_collect_update(h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_response *res)
+{
+ h2_session *session = stream->session;
+ const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
+ apr_array_header_t *pushes;
+ apr_status_t status;
+
+ if (cache_digest && session->push_diary) {
+ status = h2_push_diary_digest64_set(session->push_diary, req->authority,
+ cache_digest, stream->pool);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ "h2_session(%ld): push diary set from Cache-Digest: %s",
+ session->id, cache_digest);
+ }
+ }
+ pushes = h2_push_collect(stream->pool, req, res);
+ return h2_push_diary_update(stream->session, pushes);
+}
+
+/* h2_log2(n) iff n is a power of 2 */
+static unsigned char h2_log2(apr_uint32_t n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+static apr_int32_t h2_log2inv(unsigned char log2)
+{
+ return log2? (1 << log2) : 1;
+}
+
+
+typedef struct {
+ h2_push_diary *diary;
+ unsigned char log2p;
+ apr_uint32_t mask_bits;
+ apr_uint32_t delta_bits;
+ apr_uint32_t fixed_bits;
+ apr_uint64_t fixed_mask;
+ apr_pool_t *pool;
+ unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last;
+} gset_encoder;
+
+static int cmp_puint64(const void *p1, const void *p2)
+{
+ const apr_uint64_t *pu1 = p1, *pu2 = p2;
+ return (*pu1 > *pu2)? 1 : ((*pu1 == *pu2)? 0 : -1);
+}
+
+/* in golomb bit stream encoding, bit 0 is the 8th of the first char, or
+ * more generally:
+ * char(bit/8) & cbit_mask[(bit % 8)]
+ */
+static unsigned char cbit_mask[] = {
+ 0x80u,
+ 0x40u,
+ 0x20u,
+ 0x10u,
+ 0x08u,
+ 0x04u,
+ 0x02u,
+ 0x01u,
+};
+
+static apr_status_t gset_encode_bit(gset_encoder *encoder, int bit)
+{
+ if (++encoder->bit >= 8) {
+ if (++encoder->offset >= encoder->datalen) {
+ apr_size_t nlen = encoder->datalen*2;
+ unsigned char *ndata = apr_pcalloc(encoder->pool, nlen);
+ if (!ndata) {
+ return APR_ENOMEM;
+ }
+ memcpy(ndata, encoder->data, encoder->datalen);
+ encoder->data = ndata;
+ encoder->datalen = nlen;
+ }
+ encoder->bit = 0;
+ encoder->data[encoder->offset] = 0xffu;
+ }
+ if (!bit) {
+ encoder->data[encoder->offset] &= ~cbit_mask[encoder->bit];
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
+{
+ apr_uint64_t delta, flex_bits;
+ apr_status_t status = APR_SUCCESS;
+ int i;
+
+ delta = pval - encoder->last;
+ encoder->last = pval;
+ flex_bits = (delta >> encoder->fixed_bits);
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool,
+ "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT" flex_bits=%ld, "
+ "fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT,
+ pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask);
+ for (; flex_bits != 0; --flex_bits) {
+ status = gset_encode_bit(encoder, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ status = gset_encode_bit(encoder, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (i = encoder->fixed_bits-1; i >= 0; --i) {
+ status = gset_encode_bit(encoder, (delta >> i) & 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
+ apr_uint32_t maxP, const char *authority,
+ const char **pdata, apr_size_t *plen)
+{
+ apr_size_t nelts, N, i;
+ unsigned char log2n, log2pmax;
+ gset_encoder encoder;
+ apr_uint64_t *hashes;
+ apr_size_t hash_count;
+
+ nelts = diary->entries->nelts;
+
+ if (nelts > APR_UINT32_MAX) {
+ /* should not happen */
+ return APR_ENOTIMPL;
+ }
+ N = ceil_power_of_2(nelts);
+ log2n = h2_log2(N);
+
+ /* Now log2p is the max number of relevant bits, so that
+ * log2p + log2n == mask_bits. We can uise a lower log2p
+ * and have a shorter set encoding...
*/
- if (res->headers) {
- link_ctx ctx;
+ log2pmax = h2_log2(ceil_power_of_2(maxP));
+
+ memset(&encoder, 0, sizeof(encoder));
+ encoder.diary = diary;
+ encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax);
+ encoder.mask_bits = log2n + encoder.log2p;
+ encoder.delta_bits = diary->mask_bits - encoder.mask_bits;
+ encoder.fixed_bits = encoder.log2p;
+ encoder.fixed_mask = 1;
+ encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1;
+ encoder.pool = pool;
+ encoder.datalen = 512;
+ encoder.data = apr_pcalloc(encoder.pool, encoder.datalen);
+
+ encoder.data[0] = log2n;
+ encoder.data[1] = encoder.log2p;
+ encoder.offset = 1;
+ encoder.bit = 8;
+ encoder.last = 0;
+
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, "
+ "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s",
+ (int)nelts, (int)N, (int)log2n, diary->mask_bits,
+ (int)encoder.mask_bits, (int)encoder.delta_bits,
+ (int)encoder.log2p, authority);
+
+ if (!authority || !diary->authority
+ || !strcmp("*", authority) || !strcmp(diary->authority, authority)) {
+ hash_count = diary->entries->nelts;
+ hashes = apr_pcalloc(encoder.pool, hash_count);
+ for (i = 0; i < hash_count; ++i) {
+ hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash
+ >> encoder.delta_bits);
+ }
- memset(&ctx, 0, sizeof(ctx));
- ctx.req = req;
- ctx.pool = p;
+ qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
+ for (i = 0; i < hash_count; ++i) {
+ if (!i || (hashes[i] != hashes[i-1])) {
+ gset_encode_next(&encoder, hashes[i]);
+ }
+ }
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
+ (int)encoder.offset + 1);
+ }
+ *pdata = (const char *)encoder.data;
+ *plen = encoder.offset + 1;
- apr_table_do(head_iter, &ctx, res->headers, NULL);
- return ctx.pushes;
+ return APR_SUCCESS;
+}
+
+typedef struct {
+ h2_push_diary *diary;
+ apr_pool_t *pool;
+ unsigned char log2p;
+ const unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last_val;
+} gset_decoder;
+
+static int gset_decode_next_bit(gset_decoder *decoder)
+{
+ if (++decoder->bit >= 8) {
+ if (++decoder->offset >= decoder->datalen) {
+ return -1;
+ }
+ decoder->bit = 0;
}
- return NULL;
+ return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
+}
+
+static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
+{
+ apr_uint64_t flex = 0, fixed = 0, delta;
+ int i;
+
+ /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
+ * On a malformed bit-string, this will not fail, but produce results
+ * which are pbly too large. Luckily, the diary will modulo the hash.
+ */
+ while (1) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ if (!bit) {
+ break;
+ }
+ ++flex;
+ }
+
+ for (i = 0; i < decoder->log2p; ++i) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ fixed = (fixed << 1) | bit;
+ }
+
+ delta = (flex << decoder->log2p) | fixed;
+ *phash = delta + decoder->last_val;
+ decoder->last_val = *phash;
+
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
+ "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
+ *phash, delta, (int)flex, fixed);
+
+ return APR_SUCCESS;
}
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len)
+{
+ gset_decoder decoder;
+ unsigned char log2n, log2p;
+ apr_size_t N, i;
+ apr_pool_t *pool = diary->entries->pool;
+ h2_push_diary_entry e;
+ apr_status_t status = APR_SUCCESS;
+
+ if (len < 2) {
+ /* at least this should be there */
+ return APR_EINVAL;
+ }
+ log2n = data[0];
+ log2p = data[1];
+ diary->mask_bits = log2n + log2p;
+ if (diary->mask_bits > 64) {
+ /* cannot handle */
+ return APR_ENOTIMPL;
+ }
+
+ /* whatever is in the digest, it replaces the diary entries */
+ apr_array_clear(diary->entries);
+ if (!authority || !strcmp("*", authority)) {
+ diary->authority = NULL;
+ }
+ else if (!diary->authority || strcmp(diary->authority, authority)) {
+ diary->authority = apr_pstrdup(diary->entries->pool, authority);
+ }
+
+ N = h2_log2inv(log2n + log2p);
+
+ decoder.diary = diary;
+ decoder.pool = pool;
+ decoder.log2p = log2p;
+ decoder.data = (const unsigned char*)data;
+ decoder.datalen = len;
+ decoder.offset = 1;
+ decoder.bit = 8;
+ decoder.last_val = 0;
+
+ diary->N = N;
+ /* Determine effective N we use for storage */
+ if (!N) {
+ /* a totally empty cache digest. someone tells us that she has no
+ * entries in the cache at all. Use our own preferences for N+mask
+ */
+ diary->N = diary->NMax;
+ return APR_SUCCESS;
+ }
+ else if (N > diary->NMax) {
+ /* Store not more than diary is configured to hold. We open us up
+ * to DOS attacks otherwise. */
+ diary->N = diary->NMax;
+ }
+
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: N=%d, log2n=%d, "
+ "diary->mask_bits=%d, dec.log2p=%d",
+ (int)diary->N, (int)log2n, diary->mask_bits,
+ (int)decoder.log2p);
+
+ for (i = 0; i < diary->N; ++i) {
+ if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
+ /* the data may have less than N values */
+ break;
+ }
+ h2_push_diary_append(diary, &e);
+ }
+
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
+ (int)diary->entries->nelts, diary->mask_bits);
+ return status;
+}
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest64_set: digest=%s, dlen=%d",
+ data64url, (int)len);
+ return h2_push_diary_digest_set(diary, authority, data, len);
+}
+
struct h2_request;
struct h2_response;
struct h2_ngheader;
+struct h2_session;
+struct h2_stream;
+
+typedef enum {
+ H2_PUSH_NONE,
+ H2_PUSH_DEFAULT,
+ H2_PUSH_HEAD,
+ H2_PUSH_FAST_LOAD,
+} h2_push_policy;
typedef struct h2_push {
const struct h2_request *req;
} h2_push;
+typedef enum {
+ H2_PUSH_DIGEST_APR_HASH,
+ H2_PUSH_DIGEST_SHA256
+} h2_push_digest_type;
+
+typedef struct h2_push_diary h2_push_diary;
+
+typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
+struct h2_push_diary {
+ apr_array_header_t *entries;
+ apr_size_t NMax; /* Maximum for N, should size change be necessary */
+ apr_size_t N; /* Current maximum number of entries, power of 2 */
+ apr_uint64_t mask; /* mask for relevant bits */
+ unsigned int mask_bits; /* number of relevant bits */
+ const char *authority;
+ h2_push_digest_type dtype;
+ h2_push_digest_calc *dcalc;
+};
+
+/**
+ * Determine the list of h2_push'es to send to the client on behalf of
+ * the given request/response pair.
+ *
+ * @param p the pool to use
+ * @param req the requst from the client
+ * @param res the response from the server
+ * @return array of h2_push addresses or NULL
+ */
apr_array_header_t *h2_push_collect(apr_pool_t *p,
const struct h2_request *req,
const struct h2_response *res);
+/**
+ * Set the push policy for the given request. Takes request headers into
+ * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00
+ * for details.
+ *
+ * @param req the request to determine the policy for
+ * @param p the pool to use
+ * @param push_enabled if HTTP/2 server push is generally enabled for this request
+ */
+void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled);
+
+/**
+ * Create a new push diary for the given maximum number of entries.
+ *
+ * @oaram p the pool to use
+ * @param N the max number of entries, rounded up to 2^x
+ * @return the created diary, might be NULL of max_entries is 0
+ */
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N);
+
+/**
+ * Filters the given pushes against the diary and returns only those pushes
+ * that were newly entered in the diary.
+ */
+apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_header_t *pushes);
+
+/**
+ * Collect pushes for the given request/response pair, enter them into the
+ * diary and return those pushes newly entered.
+ */
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_response *res);
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param authority the authority to get the data for, use NULL/"*" for all
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
+ apr_uint32_t maxP, const char *authority,
+ const char **pdata, apr_size_t *plen);
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param authority the authority to set the data for
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len);
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool);
+
#endif /* defined(__mod_h2__h2_push__) */
#include "h2_private.h"
#include "h2_config.h"
#include "h2_mplx.h"
+#include "h2_push.h"
#include "h2_request.h"
#include "h2_task.h"
#include "h2_util.h"
return status;
}
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos)
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ int eos, int push)
{
const char *s;
}
req->eoh = 1;
+ h2_push_policy_determine(req, pool, push);
/* In the presence of trailers, force behaviour of chunked encoding */
s = apr_table_get(req->headers, "Trailer");
typedef struct h2_request h2_request;
struct h2_request {
- int id; /* stream id */
+ int id; /* stream id */
- /* pseudo header values, see ch. 8.1.2.3 */
- const char *method;
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
const char *scheme;
const char *authority;
const char *path;
apr_time_t request_time;
apr_off_t content_length;
- int chunked;
- int eoh;
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
+ unsigned int eoh : 1; /* iff end-of-headers has been seen and request is complete */
+ unsigned int body : 1; /* iff this request has a body */
+ unsigned int push_policy; /* which push policy to use for this request */
const struct h2_config *config;
};
const char *name, size_t nlen,
const char *value, size_t vlen);
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos);
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ int eos, int push);
void h2_request_copy(apr_pool_t *p, h2_request *dst, const h2_request *src);
#include <nghttp2/nghttp2.h>
#include "h2_private.h"
+#include "h2_filter.h"
#include "h2_h2.h"
#include "h2_util.h"
#include "h2_request.h"
}
}
+static const char *get_sos_filter(apr_table_t *notes)
+{
+ return notes? apr_table_get(notes, H2_RESP_SOS_NOTE) : NULL;
+}
+
static h2_response *h2_response_create_int(int stream_id,
int rst_error,
int http_status,
apr_table_t *headers,
+ apr_table_t *notes,
apr_pool_t *pool)
{
h2_response *response;
return NULL;
}
- response->stream_id = stream_id;
- response->rst_error = rst_error;
- response->http_status = http_status? http_status : 500;
+ response->stream_id = stream_id;
+ response->rst_error = rst_error;
+ response->http_status = http_status? http_status : 500;
response->content_length = -1;
- response->headers = headers;
+ response->headers = headers;
+ response->sos_filter = get_sos_filter(notes);
s = apr_table_get(headers, "Content-Length");
if (s) {
int rst_error,
int http_status,
apr_array_header_t *hlines,
+ apr_table_t *notes,
apr_pool_t *pool)
{
return h2_response_create_int(stream_id, rst_error, http_status,
- parse_headers(hlines, pool), pool);
+ parse_headers(hlines, pool), notes, pool);
}
h2_response *h2_response_rcreate(int stream_id, request_rec *r,
return NULL;
}
- response->stream_id = stream_id;
- response->http_status = r->status;
+ response->stream_id = stream_id;
+ response->http_status = r->status;
response->content_length = -1;
- response->headers = header;
+ response->headers = header;
+ response->sos_filter = get_sos_filter(r->notes);
if (response->http_status == HTTP_FORBIDDEN) {
const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
apr_table_setn(headers, "Date", date);
apr_table_setn(headers, "Server", ap_get_server_banner());
- return h2_response_create_int(stream_id, 0, 500, headers, pool);
+ return h2_response_create_int(stream_id, 0, 500, headers, NULL, pool);
}
h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
{
h2_response *to = apr_pcalloc(pool, sizeof(h2_response));
- to->stream_id = from->stream_id;
- to->http_status = from->http_status;
+
+ to->stream_id = from->stream_id;
+ to->http_status = from->http_status;
to->content_length = from->content_length;
+ to->sos_filter = from->sos_filter;
if (from->headers) {
- to->headers = apr_table_clone(pool, from->headers);
+ to->headers = apr_table_clone(pool, from->headers);
}
if (from->trailers) {
- to->trailers = apr_table_clone(pool, from->trailers);
+ to->trailers = apr_table_clone(pool, from->trailers);
}
return to;
}
struct h2_push;
typedef struct h2_response {
- int stream_id;
- int rst_error;
- int http_status;
- apr_off_t content_length;
+ int stream_id;
+ int rst_error;
+ int http_status;
+ apr_off_t content_length;
apr_table_t *headers;
apr_table_t *trailers;
+ const char *sos_filter;
} h2_response;
/**
int rst_error,
int http_status,
apr_array_header_t *hlines,
+ apr_table_t *notes,
apr_pool_t *pool);
/**
#include <http_core.h>
#include <http_config.h>
#include <http_log.h>
+#include <scoreboard.h>
#include "h2_private.h"
+#include "h2_bucket_eoc.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_filter.h"
#include "h2_h2.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_version.h"
#include "h2_workers.h"
+
static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
static int h2_session_status_from_apr_status(apr_status_t rv)
{
h2_session *session = (h2_session*)ctx;
nghttp2_session_consume(session->ngh2, stream_id, bytes_read);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session(%ld-%d): consumed %ld bytes",
session->id, stream_id, (long)bytes_read);
}
+static apr_status_t h2_session_receive(void *ctx,
+ const char *data, apr_size_t len,
+ apr_size_t *readlen);
+
+static int is_accepting_streams(h2_session *session);
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
h2_stream *h2_session_open_stream(h2_session *session, int stream_id)
{
h2_stream * stream;
apr_pool_t *stream_pool;
- if (session->aborted) {
- return NULL;
- }
if (session->spare) {
stream_pool = session->spare;
h2_stream_set_add(session->streams, stream);
if (H2_STREAM_CLIENT_INITIATED(stream_id)
&& stream_id > session->max_stream_received) {
+ ++session->requests_received;
session->max_stream_received = stream->id;
}
h2_stream *stream, int eos)
{
(void)session;
- return h2_stream_schedule(stream, eos, stream_pri_cmp, session);
+ return h2_stream_schedule(stream, eos, h2_session_push_enabled(session),
+ stream_pri_cmp, session);
}
/*
h2_session *session = (h2_session *)userp;
(void)ngh2;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
- if (APLOGctrace2(session->c)) {
+ if (APLOGcdebug(session->c)) {
char buffer[256];
frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session: callback on_invalid_frame_recv error=%d %s",
- error, buffer);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ "h2_session(%ld): recv unknown FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
}
return 0;
}
int rv;
(void)flags;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ if (!is_accepting_streams(session)) {
+ /* ignore */
+ return 0;
}
stream = h2_session_get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_session: stream(%ld-%d): on_data_chunk for unknown stream",
+ "h2_stream(%ld-%d): on_data_chunk for unknown stream",
session->id, (int)stream_id);
rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
NGHTTP2_INTERNAL_ERROR);
uint32_t error_code)
{
if (!error_code) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): handled, closing",
session->id, (int)stream->id);
if (stream->id > session->max_stream_handled) {
h2_stream *stream;
(void)ngh2;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
stream = h2_session_get_stream(session, stream_id);
if (stream) {
stream_release(session, stream, error_code);
(void)ngh2;
(void)flags;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ if (!is_accepting_streams(session)) {
+ /* just ignore */
+ return 0;
}
stream = h2_session_get_stream(session, frame->hd.stream_id);
apr_status_t status = APR_SUCCESS;
h2_stream *stream;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ "h2_session(%ld): recv FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
}
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): on_frame_rcv #%ld, type=%d",
- session->id, frame->hd.stream_id,
- (long)session->frames_received, frame->hd.type);
++session->frames_received;
switch (frame->hd.type) {
session->id, (int)frame->hd.stream_id,
frame->window_update.window_size_increment);
break;
+ case NGHTTP2_RST_STREAM:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ "h2_session(%ld-%d): RST_STREAM by client, errror=%d",
+ session->id, (int)frame->hd.stream_id,
+ (int)frame->rst_stream.error_code);
+ stream = h2_session_get_stream(session, frame->hd.stream_id);
+ if (stream && stream->initiated_on) {
+ ++session->pushes_reset;
+ }
+ else {
+ ++session->streams_reset;
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
default:
if (APLOGctrace2(session->c)) {
char buffer[256];
(void)ngh2;
(void)source;
- if (session->aborted) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
-
if (frame->data.padlen > H2_MAX_PADLEN) {
return NGHTTP2_ERR_PROTO;
}
frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_session(%ld): frame_send %s",
- session->id, buffer);
+ "h2_session(%ld): sent FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
}
+ ++session->frames_sent;
return 0;
}
return APR_SUCCESS;
}
+static void h2_session_cleanup(h2_session *session)
+{
+ AP_DEBUG_ASSERT(session);
+ /* This is an early cleanup of the session that may
+ * discard what is no longer necessary for *new* streams
+ * and general HTTP/2 processing.
+ * At this point, all frames are in transit or somehwere in
+ * our buffers or passed down output filters.
+ * h2 streams might still being written out.
+ */
+ if (session->c) {
+ h2_ctx_clear(session->c);
+ }
+ if (session->ngh2) {
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ }
+ if (session->spare) {
+ apr_pool_destroy(session->spare);
+ session->spare = NULL;
+ }
+}
+
+static void h2_session_destroy(h2_session *session)
+{
+ AP_DEBUG_ASSERT(session);
+ h2_session_cleanup(session);
+
+ if (APLOGctrace1(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): destroy, %d streams open",
+ session->id, (int)h2_stream_set_size(session->streams));
+ }
+ if (session->mplx) {
+ h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
+ h2_mplx_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
+ }
+ if (session->streams) {
+ h2_stream_set_destroy(session->streams);
+ session->streams = NULL;
+ }
+ if (session->pool) {
+ apr_pool_destroy(session->pool);
+ }
+}
+
+static apr_status_t h2_session_shutdown(h2_session *session, int reason, const char *msg)
+{
+ apr_status_t status = APR_SUCCESS;
+ const char *err = msg;
+
+ AP_DEBUG_ASSERT(session);
+ if (!err && reason) {
+ err = nghttp2_strerror(reason);
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
+ h2_mplx_get_max_stream_started(session->mplx),
+ reason, (uint8_t*)err, err? strlen(err):0);
+ status = nghttp2_session_send(session->ngh2);
+ h2_conn_io_flush(&session->io);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ "session(%ld): sent GOAWAY, err=%d, msg=%s",
+ session->id, reason, err? err : "");
+ dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, reason, err);
+ return status;
+}
+
static apr_status_t session_pool_cleanup(void *data)
{
h2_session *session = data;
+ /* On a controlled connection shutdown, this gets never
+ * called as we deregister and destroy our pool manually.
+ * However when we have an async mpm, and handed it our idle
+ * connection, it will just cleanup once the connection is closed
+ * from the other side (and sometimes even from out side) and
+ * here we arrive then.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "session(%ld): pool_cleanup", session->id);
+ if (session->state != H2_SESSION_ST_DONE
+ && session->state != H2_SESSION_ST_LOCAL_SHUTDOWN) {
+ /* Not good. The connection is being torn down and we have
+ * not sent a goaway. This is considered a protocol error and
+ * the client has to assume that any streams "in flight" may have
+ * been processed and are not safe to retry.
+ * As clients with idle connection may only learn about a closed
+ * connection when sending the next request, this has the effect
+ * that at least this one request will fail.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c,
+ "session(%ld): connection disappeared without proper "
+ "goodbye, clients will be confused, should not happen",
+ session->id);
+ }
/* keep us from destroying the pool, since that is already ongoing. */
session->pool = NULL;
h2_session_destroy(session);
static h2_session *h2_session_create_int(conn_rec *c,
request_rec *r,
- const h2_config *config,
+ h2_ctx *ctx,
h2_workers *workers)
{
nghttp2_session_callbacks *callbacks = NULL;
nghttp2_option *options = NULL;
+ uint32_t n;
apr_pool_t *pool = NULL;
- apr_status_t status = apr_pool_create(&pool, r? r->pool : c->pool);
+ apr_status_t status = apr_pool_create(&pool, c->pool);
h2_session *session;
if (status != APR_SUCCESS) {
return NULL;
session->id = c->id;
session->c = c;
session->r = r;
- session->config = config;
+ session->s = h2_ctx_server_get(ctx);
+ session->config = h2_config_sget(session->s);
+
+ session->state = H2_SESSION_ST_INIT;
session->pool = pool;
apr_pool_pre_cleanup_register(pool, session, session_pool_cleanup);
- session->max_stream_count = h2_config_geti(config, H2_CONF_MAX_STREAMS);
- session->max_stream_mem = h2_config_geti(config, H2_CONF_STREAM_MAX_MEM);
-
+ session->max_stream_count = h2_config_geti(session->config, H2_CONF_MAX_STREAMS);
+ session->max_stream_mem = h2_config_geti(session->config, H2_CONF_STREAM_MAX_MEM);
+ session->timeout_secs = h2_config_geti(session->config, H2_CONF_TIMEOUT_SECS);
+ if (session->timeout_secs <= 0) {
+ session->timeout_secs = apr_time_sec(session->s->timeout);
+ }
+ session->keepalive_secs = h2_config_geti(session->config, H2_CONF_KEEPALIVE_SECS);
+ if (session->keepalive_secs <= 0) {
+ session->keepalive_secs = apr_time_sec(session->s->keep_alive_timeout);
+ }
+
status = apr_thread_cond_create(&session->iowait, session->pool);
if (status != APR_SUCCESS) {
return NULL;
session->streams = h2_stream_set_create(session->pool, session->max_stream_count);
session->workers = workers;
- session->mplx = h2_mplx_create(c, session->pool, config, workers);
+ session->mplx = h2_mplx_create(c, session->pool, session->config, workers);
h2_mplx_set_consumed_cb(session->mplx, update_window, session);
- h2_conn_io_init(&session->io, c, config, session->pool);
+ /* Install the connection input filter that feeds the session */
+ session->cin = h2_filter_cin_create(session->pool, h2_session_receive, session);
+ ap_add_input_filter("H2_IN", session->cin, r, c);
+
+ h2_conn_io_init(&session->io, c, session->config, session->pool);
session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
status = init_callbacks(c, &callbacks);
h2_session_destroy(session);
return NULL;
}
+
+ n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ "session(%ld) created, timeout=%d, keepalive_timeout=%d, "
+ "max_streams=%d, stream_mem=%d, push_diary(type=%d,N=%d)",
+ session->id, session->timeout_secs, session->keepalive_secs,
+ (int)session->max_stream_count, (int)session->max_stream_mem,
+ session->push_diary->dtype,
+ (int)session->push_diary->N);
+ }
}
return session;
}
-h2_session *h2_session_create(conn_rec *c, const h2_config *config,
- h2_workers *workers)
+h2_session *h2_session_create(conn_rec *c, h2_ctx *ctx, h2_workers *workers)
{
- return h2_session_create_int(c, NULL, config, workers);
+ return h2_session_create_int(c, NULL, ctx, workers);
}
-h2_session *h2_session_rcreate(request_rec *r, const h2_config *config,
- h2_workers *workers)
+h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
{
- return h2_session_create_int(r->connection, r, config, workers);
-}
-
-static void h2_session_cleanup(h2_session *session)
-{
- AP_DEBUG_ASSERT(session);
- /* This is an early cleanup of the session that may
- * discard what is no longer necessary for *new* streams
- * and general HTTP/2 processing.
- * At this point, all frames are in transit or somehwere in
- * our buffers or passed down output filters.
- * h2 streams might still being written out.
- */
- if (session->ngh2) {
- nghttp2_session_del(session->ngh2);
- session->ngh2 = NULL;
- }
- if (session->spare) {
- apr_pool_destroy(session->spare);
- session->spare = NULL;
- }
+ return h2_session_create_int(r->connection, r, ctx, workers);
}
-void h2_session_destroy(h2_session *session)
-{
- AP_DEBUG_ASSERT(session);
- h2_session_cleanup(session);
-
- if (session->mplx) {
- h2_mplx_release_and_join(session->mplx, session->iowait);
- session->mplx = NULL;
- }
- if (session->streams) {
- if (!h2_stream_set_is_empty(session->streams)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session(%ld): destroy, %d streams open",
- session->id, (int)h2_stream_set_size(session->streams));
- }
- h2_stream_set_destroy(session->streams);
- session->streams = NULL;
- }
- if (session->pool) {
- apr_pool_destroy(session->pool);
- }
-}
-
-
void h2_session_eoc_callback(h2_session *session)
{
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
h2_session_destroy(session);
}
-static apr_status_t h2_session_abort_int(h2_session *session, int reason)
-{
- AP_DEBUG_ASSERT(session);
- if (!session->aborted) {
- session->aborted = 1;
-
- if (session->ngh2) {
- if (NGHTTP2_ERR_EOF == reason) {
- /* This is our way of indication that the connection is
- * gone. No use to send any GOAWAY frames. */
- nghttp2_session_terminate_session(session->ngh2, reason);
- }
- else if (!reason) {
- nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
- session->max_stream_received,
- reason, NULL, 0);
- nghttp2_session_send(session->ngh2);
- }
- else {
- const char *err = nghttp2_strerror(reason);
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "session(%ld): aborting session, reason=%d %s",
- session->id, reason, err);
-
- /* The connection might still be there and we shut down
- * with GOAWAY and reason information. */
- nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
- session->max_stream_received,
- reason, (const uint8_t *)err,
- strlen(err));
- nghttp2_session_send(session->ngh2);
- }
- }
- h2_mplx_abort(session->mplx);
- }
- return APR_SUCCESS;
-}
-
-apr_status_t h2_session_abort(h2_session *session, apr_status_t reason, int rv)
-{
- AP_DEBUG_ASSERT(session);
- if (rv == 0) {
- rv = NGHTTP2_ERR_PROTO;
- switch (reason) {
- case APR_ENOMEM:
- rv = NGHTTP2_ERR_NOMEM;
- break;
- case APR_SUCCESS: /* all fine, just... */
- case APR_EOF: /* client closed its end... */
- case APR_TIMEUP: /* got bored waiting... */
- rv = 0; /* ...gracefully shut down */
- break;
- case APR_EBADF: /* connection unusable, terminate silently */
- default:
- if (APR_STATUS_IS_ECONNABORTED(reason)
- || APR_STATUS_IS_ECONNRESET(reason)
- || APR_STATUS_IS_EBADF(reason)) {
- rv = NGHTTP2_ERR_EOF;
- }
- break;
- }
- }
- return h2_session_abort_int(session, rv);
-}
-
-apr_status_t h2_session_start(h2_session *session, int *rv)
+static apr_status_t h2_session_start(h2_session *session, int *rv)
{
apr_status_t status = APR_SUCCESS;
nghttp2_settings_entry settings[3];
++slen;
}
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ "h2_session(%ld): start, INITIAL_WINDOW_SIZE=%ld, "
+ "MAX_CONCURRENT_STREAMS=%d",
+ session->id, (long)win_size, (int)session->max_stream_count);
*rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
settings, slen);
if (*rv != 0) {
int resume_count;
} resume_ctx;
-static int resume_on_data(void *ctx, h2_stream *stream) {
+static int resume_on_data(void *ctx, h2_stream *stream)
+{
resume_ctx *rctx = (resume_ctx*)ctx;
h2_session *session = rctx->session;
AP_DEBUG_ASSERT(session);
ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
APLOG_ERR : APLOG_DEBUG, 0, session->c,
APLOGNO(02936)
- "h2_stream(%ld-%d): resuming stream %s",
- session->id, stream->id, nghttp2_strerror(rv));
+ "h2_stream(%ld-%d): resuming %s",
+ session->id, stream->id, rv? nghttp2_strerror(rv) : "");
}
}
return 1;
}
-static int h2_session_resume_streams_with_data(h2_session *session) {
+static int h2_session_resume_streams_with_data(h2_session *session)
+{
AP_DEBUG_ASSERT(session);
if (!h2_stream_set_is_empty(session->streams)
- && session->mplx && !session->aborted) {
+ && session->mplx && !session->mplx->aborted) {
resume_ctx ctx;
ctx.session = session;
return session->last_stream;
}
-/* h2_io_on_read_cb implementation that offers the data read
- * directly to the session for consumption.
- */
-static apr_status_t session_receive(const char *data, apr_size_t len,
- apr_size_t *readlen, int *done,
- void *puser)
-{
- h2_session *session = (h2_session *)puser;
- AP_DEBUG_ASSERT(session);
- if (len > 0) {
- ssize_t n = nghttp2_session_mem_recv(session->ngh2,
- (const uint8_t *)data, len);
- if (n < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EGENERAL,
- session->c,
- "h2_session: nghttp2_session_mem_recv error %d",
- (int)n);
- if (nghttp2_is_fatal((int)n)) {
- *done = 1;
- h2_session_abort_int(session, (int)n);
- return APR_EGENERAL;
- }
- }
- else {
- *readlen = n;
- }
- }
- return APR_SUCCESS;
-}
-
-apr_status_t h2_session_close(h2_session *session)
-{
- AP_DEBUG_ASSERT(session);
- if (!session->aborted) {
- h2_session_abort_int(session, 0);
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0,session->c,
- "h2_session: closing, writing eoc");
-
- h2_session_cleanup(session);
- return h2_conn_io_close(&session->io, session);
-}
-
static ssize_t stream_data_cb(nghttp2_session *ng2s,
int32_t stream_id,
uint8_t *buf,
* to find out how much of the requested length we can send without
* blocking.
* Indicate EOS when we encounter it or DEFERRED if the stream
- * should be suspended.
- * TODO: for handling of TRAILERS, the EOF indication needs
- * to be aware of that.
+ * should be suspended. Beware of trailers.
*/
(void)ng2s;
nread = 0;
h2_stream_set_suspended(stream, 1);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_stream(%ld-%d): suspending stream",
+ "h2_stream(%ld-%d): suspending",
session->id, (int)stream_id);
return NGHTTP2_ERR_DEFERRED;
static apr_status_t submit_response(h2_session *session, h2_stream *stream)
{
apr_status_t status = APR_SUCCESS;
+ h2_response *response = h2_stream_get_response(stream);
int rv = 0;
AP_DEBUG_ASSERT(session);
AP_DEBUG_ASSERT(stream);
- AP_DEBUG_ASSERT(stream->response || stream->rst_error);
+ AP_DEBUG_ASSERT(response || stream->rst_error);
if (stream->submitted) {
rv = NGHTTP2_PROTOCOL_ERROR;
}
- else if (stream->response && stream->response->headers) {
+ else if (response && response->headers) {
nghttp2_data_provider provider;
- h2_response *response = stream->response;
h2_ngheader *ngh;
const h2_priority *prio;
* also have the pushed ones as well.
*/
if (!stream->initiated_on
- && h2_config_geti(session->config, H2_CONF_PUSH)
&& H2_HTTP_2XX(response->http_status)
&& h2_session_push_enabled(session)) {
response->headers);
rv = nghttp2_submit_response(session->ngh2, response->stream_id,
ngh->nv, ngh->nvlen, &provider);
-
}
else {
int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
}
stream->submitted = 1;
+ if (stream->initiated_on) {
+ ++session->pushes_submitted;
+ }
+ else {
+ ++session->responses_submitted;
+ }
if (nghttp2_is_fatal(rv)) {
status = APR_EGENERAL;
- h2_session_abort_int(session, rv);
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
APLOGNO(02940) "submit_response: %s",
nghttp2_strerror(rv));
ngh = h2_util_ngheader_make_req(is->pool, push->req);
nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
ngh->nv, ngh->nvlen, NULL);
-
if (nid <= 0) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_stream(%ld-%d): submitting push promise fail: %s",
session->id, is->id, nghttp2_strerror(nid));
return NULL;
}
-
+ ++session->pushes_promised;
+
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_stream(%ld-%d): promised new stream %d for %s %s on %d",
+ "h2_stream(%ld-%d): SERVER_PUSH %d for %s %s on %d",
session->id, is->id, nid,
push->req->method, push->req->path, is->id);
h2_stream_set_h2_request(stream, is->id, push->req);
status = stream_schedule(session, stream, 1);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): scheduling push stream",
session->id, stream->id);
h2_stream_cleanup(stream);
s = nghttp2_session_find_stream(session->ngh2, stream->id);
if (!s) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): lookup of nghttp2_stream failed",
session->id, stream->id);
return APR_EINVAL;
rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
if (rv < 0) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_stream(%ld-%d): PUSH BEFORE2, weight=%d, "
+ "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, "
"depends=%d, returned=%d",
session->id, id_parent, ps.weight, ps.stream_id, rv);
return APR_EGENERAL;
}
default:
return apr_snprintf(buffer, maxlen,
- "FRAME[type=%d, length=%d, flags=%d, stream=%d]",
+ "type=%d[length=%d, flags=%d, stream=%d]",
frame->hd.type, (int)frame->hd.length,
frame->hd.flags, frame->hd.stream_id);
}
int h2_session_push_enabled(h2_session *session)
{
- return nghttp2_session_get_remote_settings(session->ngh2,
- NGHTTP2_SETTINGS_ENABLE_PUSH);
+ /* iff we can and they can */
+ return (h2_config_geti(session->config, H2_CONF_PUSH)
+ && nghttp2_session_get_remote_settings(session->ngh2,
+ NGHTTP2_SETTINGS_ENABLE_PUSH));
}
-
-apr_status_t h2_session_process(h2_session *session)
+static apr_status_t h2_session_send(h2_session *session)
{
- apr_status_t status = APR_SUCCESS;
- apr_interval_time_t wait_micros = 0;
- static const int MAX_WAIT_MICROS = 200 * 1000;
- int got_streams = 0;
- h2_stream *stream;
-
- while (!session->aborted && (nghttp2_session_want_read(session->ngh2)
- || nghttp2_session_want_write(session->ngh2))) {
- int have_written = 0;
- int have_read = 0;
-
- got_streams = !h2_stream_set_is_empty(session->streams);
- if (got_streams) {
- h2_session_resume_streams_with_data(session);
-
- if (h2_stream_set_has_unsubmitted(session->streams)) {
- int unsent_submits = 0;
-
- /* If we have responses ready, submit them now. */
- while ((stream = h2_mplx_next_submit(session->mplx, session->streams))) {
- status = submit_response(session, stream);
- ++unsent_submits;
-
- /* Unsent push promises are written immediately, as nghttp2
- * 1.5.0 realizes internal stream data structures only on
- * send and we might need them for other submits.
- * Also, to conserve memory, we send at least every 10 submits
- * so that nghttp2 does not buffer all outbound items too
- * long.
- */
- if (status == APR_SUCCESS
- && (session->unsent_promises || unsent_submits > 10)) {
- int rv = nghttp2_session_send(session->ngh2);
- if (rv != 0) {
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_session: send: %s", nghttp2_strerror(rv));
- if (nghttp2_is_fatal(rv)) {
- h2_session_abort(session, status, rv);
- goto end_process;
- }
- }
- else {
- have_written = 1;
- wait_micros = 0;
- session->unsent_promises = 0;
- unsent_submits = 0;
- }
- }
- }
- }
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv != 0) {
+ if (nghttp2_is_fatal(rv)) {
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ return APR_EGENERAL;
}
-
- /* Send data as long as we have it and window sizes allow. We are
- * a server after all.
- */
- if (nghttp2_session_want_write(session->ngh2)) {
- int rv;
-
- rv = nghttp2_session_send(session->ngh2);
- if (rv != 0) {
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_session: send: %s", nghttp2_strerror(rv));
- if (nghttp2_is_fatal(rv)) {
- h2_session_abort(session, status, rv);
- goto end_process;
- }
- }
- else {
- have_written = 1;
- wait_micros = 0;
- session->unsent_promises = 0;
+ }
+
+ session->unsent_promises = 0;
+ session->unsent_submits = 0;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_session_receive(void *ctx, const char *data,
+ apr_size_t len, apr_size_t *readlen)
+{
+ h2_session *session = ctx;
+ ssize_t n;
+
+ if (len > 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): feeding %ld bytes to nghttp2",
+ session->id, (long)len);
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, (int)n, nghttp2_strerror(n));
+ return APR_EGENERAL;
}
}
-
- if (wait_micros > 0) {
- if (APLOGcdebug(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- "h2_session: wait for data, %ld micros",
- (long)wait_micros);
- }
- nghttp2_session_send(session->ngh2);
- h2_conn_io_flush(&session->io);
- status = h2_mplx_out_trywait(session->mplx, wait_micros, session->iowait);
-
- if (status == APR_TIMEUP) {
- if (wait_micros < MAX_WAIT_MICROS) {
- wait_micros *= 2;
- }
- }
+ else {
+ *readlen = n;
+ session->io.bytes_read += n;
}
-
- if (nghttp2_session_want_read(session->ngh2))
- {
- /* When we
- * - and have no streams at all
- * - or have streams, but none is suspended or needs submit and
- * have nothing written on the last try
- *
- * or, the other way around
- * - have only streams where data can be sent, but could
- * not send anything
- *
- * then we are waiting on frames from the client (for
- * example WINDOW_UPDATE or HEADER) and without new frames
- * from the client, we cannot make any progress,
- *
- * and *then* we can safely do a blocking read.
- */
- int may_block = (session->frames_received <= 1);
- if (!may_block) {
- if (got_streams) {
- may_block = (!have_written
- && !h2_stream_set_has_unsubmitted(session->streams)
- && !h2_stream_set_has_suspended(session->streams));
- }
- else {
- may_block = 1;
- }
- }
-
- if (may_block) {
- h2_conn_io_flush(&session->io);
- if (session->c->cs) {
- session->c->cs->state = (got_streams? CONN_STATE_HANDLER
- : CONN_STATE_WRITE_COMPLETION);
- }
- status = h2_conn_io_read(&session->io, APR_BLOCK_READ,
- session_receive, session);
- }
- else {
- if (session->c->cs) {
- session->c->cs->state = CONN_STATE_HANDLER;
- }
- status = h2_conn_io_read(&session->io, APR_NONBLOCK_READ,
- session_receive, session);
- }
+ }
+ return APR_SUCCESS;
+}
- switch (status) {
- case APR_SUCCESS: /* successful read, reset our idle timers */
- have_read = 1;
- wait_micros = 0;
- break;
- case APR_EAGAIN: /* non-blocking read, nothing there */
- break;
- default:
+static apr_status_t h2_session_read(h2_session *session, int block, int loops)
+{
+ apr_status_t status, rstatus = APR_EAGAIN;
+ conn_rec *c = session->c;
+ int i;
+
+ for (i = 0; i < loops; ++i) {
+ /* H2_IN filter handles all incoming data against the session.
+ * We just pull at the filter chain to make it happen */
+ status = ap_get_brigade(c->input_filters,
+ session->bbtmp, AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ /* get rid of any possible data we do not expect to get */
+ apr_brigade_cleanup(session->bbtmp);
+
+ switch (status) {
+ case APR_SUCCESS:
+ /* successful read, reset our idle timers */
+ rstatus = APR_SUCCESS;
+ if (block) {
+ /* successfull blocked read, try unblocked to
+ * get more. */
+ block = 0;
+ }
+ break;
+ case APR_EAGAIN:
+ return rstatus;
+ case APR_TIMEUP:
+ return status;
+ default:
+ if (!i) {
+ /* first attempt failed */
if (APR_STATUS_IS_ETIMEDOUT(status)
|| APR_STATUS_IS_ECONNABORTED(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_EBADF(status)) {
/* common status for a client that has left */
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, session->c,
- "h2_session(%ld): terminating",
- session->id);
- /* Stolen from mod_reqtimeout to speed up lingering when
- * a read timeout happened.
- */
- apr_table_setn(session->c->notes, "short-lingering-close", "1");
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_session(%ld): input gone", session->id);
}
else {
/* uncommon status, log on INFO so that we see this */
- ap_log_cerror( APLOG_MARK, APLOG_INFO, status, session->c,
+ ap_log_cerror( APLOG_MARK, APLOG_INFO, status, c,
APLOGNO(02950)
"h2_session(%ld): error reading, terminating",
session->id);
}
- h2_session_abort(session, status, 0);
- goto end_process;
- }
+ return status;
+ }
+ /* subsequent failure after success(es), return initial
+ * status. */
+ return rstatus;
}
-
- got_streams = !h2_stream_set_is_empty(session->streams);
- if (got_streams) {
- if (session->reprioritize) {
- h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
- session->reprioritize = 0;
- }
+ if (!is_accepting_streams(session)) {
+ break;
+ }
+ }
+ return rstatus;
+}
+
+static apr_status_t h2_session_submit(h2_session *session)
+{
+ apr_status_t status = APR_EAGAIN;
+ h2_stream *stream;
+
+ if (h2_stream_set_has_unsubmitted(session->streams)) {
+ /* If we have responses ready, submit them now. */
+ while ((stream = h2_mplx_next_submit(session->mplx, session->streams))) {
+ status = submit_response(session, stream);
+ ++session->unsent_submits;
- if (!have_read && !have_written) {
- /* Nothing read or written. That means no data yet ready to
- * be send out. Slowly back off...
- */
- if (wait_micros == 0) {
- wait_micros = 10;
+ /* Unsent push promises are written immediately, as nghttp2
+ * 1.5.0 realizes internal stream data structures only on
+ * send and we might need them for other submits.
+ * Also, to conserve memory, we send at least every 10 submits
+ * so that nghttp2 does not buffer all outbound items too
+ * long.
+ */
+ if (status == APR_SUCCESS
+ && (session->unsent_promises || session->unsent_submits > 10)) {
+ status = h2_session_send(session);
+ if (status != APR_SUCCESS) {
+ break;
}
}
-
- /* Check that any pending window updates are sent. */
- status = h2_mplx_in_update_windows(session->mplx);
- if (APR_STATUS_IS_EAGAIN(status)) {
- status = APR_SUCCESS;
+ }
+ }
+ return status;
+}
+
+static const char *StateNames[] = {
+ "INIT", /* H2_SESSION_ST_INIT */
+ "DONE", /* H2_SESSION_ST_DONE */
+ "IDLE", /* H2_SESSION_ST_IDLE */
+ "BUSY", /* H2_SESSION_ST_BUSY */
+ "WAIT", /* H2_SESSION_ST_WAIT */
+ "LSHUTDOWN", /* H2_SESSION_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_SESSION_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_session_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_session *session)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void transit(h2_session *session, const char *action, h2_session_state nstate)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+}
+
+static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ transit(session, "init", H2_SESSION_ST_BUSY);
+ break;
+
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_SESSION_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_SESSION_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_SESSION_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_SESSION_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): conn error -> shutdown", session->id);
+ h2_session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): proto error -> shutdown", session->id);
+ h2_session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_SESSION_ST_DONE);
+ break;
+ default:
+ h2_session_shutdown(session, arg, msg);
+ transit(session, "conn timeout", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ /* nothing for input and output to do. If we remain
+ * in this state, we go into a tight loop and suck up
+ * CPU cycles. Ideally, we'd like to do a blocking read, but that
+ * is not possible if we have scheduled tasks and wait
+ * for them to produce something. */
+ if (h2_stream_set_is_empty(session->streams)) {
+ /* When we have no streams, no task event are possible,
+ * switch to blocking reads */
+ transit(session, "no io", H2_SESSION_ST_IDLE);
}
- else if (status == APR_SUCCESS) {
- /* need to flush window updates onto the connection asap */
- h2_conn_io_flush(&session->io);
+ else if (!h2_stream_set_has_unsubmitted(session->streams)
+ && !h2_stream_set_has_suspended(session->streams)) {
+ /* none of our streams is waiting for a response or
+ * new output data from task processing,
+ * switch to blocking reads. */
+ transit(session, "no io", H2_SESSION_ST_IDLE);
}
+ else {
+ /* Unable to do blocking reads, as we wait on events from
+ * task processing in other threads. Do a busy wait with
+ * backoff timer. */
+ transit(session, "no io", H2_SESSION_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_wait_timeout(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_WAIT:
+ transit(session, "wait timeout", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_stream_ready(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_WAIT:
+ transit(session, "stream ready", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_data_read(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ transit(session, "data read", H2_SESSION_ST_BUSY);
+ break;
+ /* fall through */
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_SESSION_EV_INIT:
+ h2_session_ev_init(session, arg, msg);
+ break;
+ case H2_SESSION_EV_LOCAL_GOAWAY:
+ h2_session_ev_local_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_REMOTE_GOAWAY:
+ h2_session_ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_ERROR:
+ h2_session_ev_conn_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PROTO_ERROR:
+ h2_session_ev_proto_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_TIMEOUT:
+ h2_session_ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NO_IO:
+ h2_session_ev_no_io(session, arg, msg);
+ break;
+ case H2_SESSION_EV_WAIT_TIMEOUT:
+ h2_session_ev_wait_timeout(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_READY:
+ h2_session_ev_stream_ready(session, arg, msg);
+ break;
+ case H2_SESSION_EV_DATA_READ:
+ h2_session_ev_data_read(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NGH2_DONE:
+ h2_session_ev_ngh2_done(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): unknown event %d",
+ session->id, ev);
+ break;
+ }
+
+ if (session->state == H2_SESSION_ST_DONE) {
+ h2_mplx_abort(session->mplx);
+ }
+}
+
+static const int MAX_WAIT_MICROS = 200 * 1000;
+
+apr_status_t h2_session_process(h2_session *session, int async)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = session->c;
+ int rv, have_written, have_read;
+
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_session(%ld): process start, async=%d", session->id, async);
+
+ while (1) {
+ have_read = have_written = 0;
+
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ if (!h2_is_acceptable_connection(c, 1)) {
+ h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL);
+ }
+ else {
+ ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL);
+ status = h2_session_start(session, &rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ "h2_session(%ld): started on %s:%d", session->id,
+ session->s->server_hostname,
+ c->local_addr->port);
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_IDLE:
+ h2_filter_cin_timeout_set(session->cin, session->keepalive_secs);
+ ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL);
+ status = h2_session_read(session, 1, 10);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ break;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ if (nghttp2_session_want_read(session->ngh2)) {
+ ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL);
+ h2_filter_cin_timeout_set(session->cin, session->timeout_secs);
+ status = h2_session_read(session, 0, 10);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ break;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ }
+
+ if (!h2_stream_set_is_empty(session->streams)) {
+ /* resume any streams for which data is available again */
+ h2_session_resume_streams_with_data(session);
+ /* Submit any responses/push_promises that are ready */
+ status = h2_session_submit(session);
+ if (status == APR_SUCCESS) {
+ have_written = 1;
+ }
+ else if (status != APR_EAGAIN) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "submit error");
+ break;
+ }
+ /* send out window updates for our inputs */
+ status = h2_mplx_in_update_windows(session->mplx);
+ if (status != APR_SUCCESS && status != APR_EAGAIN) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "window update error");
+ break;
+ }
+ }
+
+ if (nghttp2_session_want_write(session->ngh2)) {
+ status = h2_session_send(session);
+ if (status == APR_SUCCESS) {
+ have_written = 1;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "writing");
+ break;
+ }
+ }
+
+ if (have_read || have_written) {
+ session->wait_us = 0;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_WAIT:
+ session->wait_us = H2MAX(session->wait_us, 10);
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_session: wait for data, %ld micros",
+ (long)session->wait_us);
+ }
+
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c,
+ "h2_session(%ld): process -> trywait", session->id);
+ status = h2_mplx_out_trywait(session->mplx, session->wait_us,
+ session->iowait);
+ if (status == APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_STREAM_READY, 0, NULL);
+ }
+ else if (status == APR_TIMEUP) {
+ /* nothing, increase timer for graceful backup */
+ session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
+ dispatch_event(session, H2_SESSION_EV_WAIT_TIMEOUT, 0, NULL);
+ }
+ else {
+ h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, "cond wait error");
+ }
+ break;
+
+ case H2_SESSION_ST_DONE:
+ status = APR_EOF;
+ goto out;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ "h2_session(%ld): unknown state %d", session->id, session->state);
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
+ break;
}
-
+
if (have_written) {
h2_conn_io_flush(&session->io);
}
+ else if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+ }
+
+out:
+ if (have_written) {
+ h2_conn_io_flush(&session->io);
+ }
+
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_session(%ld): [%s] process returns",
+ session->id, state_name(session->state));
+
+ if ((session->state != H2_SESSION_ST_DONE)
+ && (APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status))) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
+ status = (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
+ if (session->state == H2_SESSION_ST_DONE) {
+ if (!session->eoc_written) {
+ session->eoc_written = 1;
+ h2_conn_io_write_eoc(&session->io,
+ h2_bucket_eoc_create(session->c->bucket_alloc, session));
+ }
}
-end_process:
return status;
}
struct apr_thread_mutext_t;
struct apr_thread_cond_t;
+struct h2_ctx;
struct h2_config;
+struct h2_filter_cin;
struct h2_mplx;
struct h2_priority;
struct h2_push;
+struct h2_push_diary;
struct h2_response;
struct h2_session;
struct h2_stream;
struct nghttp2_session;
-typedef struct h2_session h2_session;
-
-struct h2_session {
+typedef enum {
+ H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_SESSION_ST_DONE, /* finished, connection close */
+ H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
+ H2_SESSION_ST_BUSY, /* read/write without stop */
+ H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */
+ H2_SESSION_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_SESSION_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_session_state;
+
+typedef enum {
+ H2_SESSION_EV_INIT, /* session was initialized */
+ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_SESSION_EV_CONN_ERROR, /* connection error */
+ H2_SESSION_EV_PROTO_ERROR, /* protocol error */
+ H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_SESSION_EV_NO_IO, /* nothing has been read or written */
+ H2_SESSION_EV_WAIT_TIMEOUT, /* timeout waiting for tasks */
+ H2_SESSION_EV_STREAM_READY, /* stream signalled availability of headers/data */
+ H2_SESSION_EV_DATA_READ, /* connection data has been read */
+ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+} h2_session_event_t;
+
+typedef struct h2_session {
long id; /* identifier of this session, unique
* inside a httpd process */
conn_rec *c; /* the connection this session serves */
request_rec *r; /* the request that started this in case
* of 'h2c', NULL otherwise */
+ server_rec *s; /* server/vhost we're starting on */
const struct h2_config *config; /* Relevant config for this session */
- int aborted; /* this session is being aborted */
- int reprioritize; /* scheduled streams priority needs to
- * be re-evaluated */
- int unsent_promises; /* number of submitted, but not yet sent
- * push promised */
+
+ h2_session_state state; /* state session is in */
+ unsigned int reprioritize : 1; /* scheduled streams priority changed */
+ unsigned int eoc_written : 1; /* h2 eoc bucket written */
+ apr_interval_time_t wait_us; /* timout during BUSY_WAIT state, micro secs */
+
+ int unsent_submits; /* number of submitted, but not yet written responses. */
+ int unsent_promises; /* number of submitted, but not yet written push promised */
+
+ int requests_received; /* number of http/2 requests received */
+ int responses_submitted; /* number of http/2 responses submitted */
+ int streams_reset; /* number of http/2 streams reset by client */
+ int pushes_promised; /* number of http/2 push promises submitted */
+ int pushes_submitted; /* number of http/2 pushed responses submitted */
+ int pushes_reset; /* number of http/2 pushed reset by client */
+
apr_size_t frames_received; /* number of http/2 frames received */
+ apr_size_t frames_sent; /* number of http/2 frames sent */
+
+ int max_stream_received; /* highest stream id created */
+ int max_stream_handled; /* highest stream id completed */
+
apr_size_t max_stream_count; /* max number of open streams */
apr_size_t max_stream_mem; /* max buffer memory for a single stream */
+ int timeout_secs; /* connection timeout (seconds) */
+ int keepalive_secs; /* connection idle timeout (seconds) */
+
apr_pool_t *pool; /* pool to use in session handling */
apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
+ struct h2_filter_cin *cin; /* connection input filter context */
h2_conn_io io; /* io on httpd conn filters */
+
struct h2_mplx *mplx; /* multiplexer for stream data */
struct h2_stream *last_stream; /* last stream worked with */
struct h2_stream_set *streams; /* streams handled by this session */
- int max_stream_received; /* highest stream id created */
- int max_stream_handled; /* highest stream id handled successfully */
-
apr_pool_t *spare; /* spare stream pool */
struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
struct h2_workers *workers; /* for executing stream tasks */
-};
+
+ struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
+} h2_session;
/**
* @param workers the worker pool to use
* @return the created session
*/
-h2_session *h2_session_create(conn_rec *c, const struct h2_config *cfg,
+h2_session *h2_session_create(conn_rec *c, struct h2_ctx *ctx,
struct h2_workers *workers);
/**
* @param workers the worker pool to use
* @return the created session
*/
-h2_session *h2_session_rcreate(request_rec *r, const struct h2_config *cfg,
+h2_session *h2_session_rcreate(request_rec *r, struct h2_ctx *ctx,
struct h2_workers *workers);
/**
*
* @param session the sessionm to process
*/
-apr_status_t h2_session_process(h2_session *session);
-
-/**
- * Destroy the session and all objects it still contains. This will not
- * destroy h2_task instances that have not finished yet.
- * @param session the session to destroy
- */
-void h2_session_destroy(h2_session *session);
+apr_status_t h2_session_process(h2_session *session, int async);
/**
* Cleanup the session and all objects it still contains. This will not
void h2_session_eoc_callback(h2_session *session);
/**
- * Called once at start of session.
- * Sets up the session and sends the initial SETTINGS frame.
- *Â @param session the session to start
- * @param rv error codes in libnghttp2 lingo are returned here
- * @return APR_SUCCESS if all went well
- */
-apr_status_t h2_session_start(h2_session *session, int *rv);
-
-/**
- * Called when an error occured and the session needs to shut down.
- * @param session the session to shut down
- * @param reason the apache status that caused the shutdown
- * @param rv the nghttp2 reason for shutdown, set to 0 if you have none.
- *
+ * Called when a serious error occured and the session needs to terminate
+ * without further connection io.
+ * @param session the session to abort
+ * @param reason the apache status that caused the abort
*/
-apr_status_t h2_session_abort(h2_session *session, apr_status_t reason, int rv);
+void h2_session_abort(h2_session *session, apr_status_t reason);
/**
- * Called before a session gets destroyed, might flush output etc.
+ * Close and deallocate the given session.
*/
-apr_status_t h2_session_close(h2_session *session);
+void h2_session_close(h2_session *session);
/* Start submitting the response to a stream request. This is possible
* once we have all the response headers. */
#include "h2_conn.h"
#include "h2_config.h"
#include "h2_h2.h"
+#include "h2_filter.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_util.h"
-#define H2_STREAM_OUT(lvl,s,msg) \
- do { \
- if (APLOG_C_IS_LEVEL((s)->session->c,lvl)) \
- h2_util_bb_log((s)->session->c,(s)->id,lvl,msg,(s)->bbout); \
- } while(0)
#define H2_STREAM_IN(lvl,s,msg) \
do { \
if (APLOG_C_IS_LEVEL((s)->session->c,lvl)) \
}
}
+static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response);
+
h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session)
{
h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
h2_stream *stream = h2_stream_create(id, pool, session);
set_state(stream, H2_STREAM_ST_OPEN);
stream->request = h2_request_create(id, pool, session->config);
- stream->bbout = apr_brigade_create(stream->pool,
- stream->session->c->bucket_alloc);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_stream(%ld-%d): opened", session->id, stream->id);
stream->rst_error = error_code;
close_input(stream);
close_output(stream);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): reset, error=%d",
stream->session->id, stream->id, error_code);
}
+struct h2_response *h2_stream_get_response(h2_stream *stream)
+{
+ return stream->sos? stream->sos->response : NULL;
+}
+
apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response,
apr_bucket_brigade *bb)
{
apr_status_t status = APR_SUCCESS;
+ h2_sos *sos;
+
if (!output_open(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): output closed",
stream->session->id, stream->id);
return APR_ECONNRESET;
}
- stream->response = response;
- if (bb && !APR_BRIGADE_EMPTY(bb)) {
- int move_all = INT_MAX;
- /* we can move file handles from h2_mplx into this h2_stream as many
- * as we want, since the lifetimes are the same and we are not freeing
- * the ones in h2_mplx->io before this stream is done. */
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream set_response_pre");
- status = h2_util_move(stream->bbout, bb, 16 * 1024, &move_all,
- "h2_stream_set_response");
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream set_response_post");
+ sos = h2_sos_mplx_create(stream, response);
+ if (sos->response->sos_filter) {
+ sos = h2_filter_sos_create(sos->response->sos_filter, sos);
}
+ stream->sos = sos;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, stream->session->c,
+ status = stream->sos->buffer(stream->sos, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
"h2_stream(%ld-%d): set_response(%d)",
- stream->session->id, stream->id, response->http_status);
+ stream->session->id, stream->id, stream->sos->response->http_status);
return status;
}
}
}
-apr_status_t h2_stream_schedule(h2_stream *stream, int eos,
+apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
/* Seeing the end-of-headers, we have everything we need to
* start processing it.
*/
- status = h2_request_end_headers(stream->request, stream->pool, eos);
+ status = h2_request_end_headers(stream->request, stream->pool,
+ eos, push_enabled);
if (status == APR_SUCCESS) {
if (!eos) {
+ stream->request->body = 1;
stream->bbin = apr_brigade_create(stream->pool,
stream->session->c->bucket_alloc);
}
stream->input_remaining = stream->request->content_length;
status = h2_mplx_process(stream->session->mplx, stream->id,
- stream->request, eos, cmp, ctx);
+ stream->request, cmp, ctx);
stream->scheduled = 1;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): scheduled %s %s://%s%s",
stream->session->id, stream->id,
stream->request->method, stream->request->scheme,
}
else {
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s",
stream->session->id, stream->id,
stream->request->method, stream->request->scheme,
status = h2_mplx_in_write(stream->session->mplx, stream->id, stream->bbin);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, stream->session->mplx->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->mplx->c,
"h2_stream(%ld-%d): flushing input data",
stream->session->id, stream->id);
}
}
static apr_status_t input_add_data(h2_stream *stream,
- const char *data, size_t len, int chunked)
+ const char *data, size_t len)
{
- apr_status_t status = APR_SUCCESS;
-
- if (chunked) {
- status = apr_brigade_printf(stream->bbin, input_flush, stream,
- "%lx\r\n", (unsigned long)len);
- if (status == APR_SUCCESS) {
- status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
- if (status == APR_SUCCESS) {
- status = apr_brigade_puts(stream->bbin, input_flush, stream, "\r\n");
- }
- }
- }
- else {
- status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
- }
- return status;
-}
-
-static int input_add_header(void *str, const char *key, const char *value)
-{
- h2_stream *stream = str;
- apr_status_t status = input_add_data(stream, key, strlen(key), 0);
- if (status == APR_SUCCESS) {
- status = input_add_data(stream, ": ", 2, 0);
- if (status == APR_SUCCESS) {
- status = input_add_data(stream, value, strlen(value), 0);
- if (status == APR_SUCCESS) {
- status = input_add_data(stream, "\r\n", 2, 0);
- }
- }
- }
- return (status == APR_SUCCESS);
+ return apr_brigade_write(stream->bbin, input_flush, stream, data, len);
}
apr_status_t h2_stream_close_input(h2_stream *stream)
apr_status_t status = APR_SUCCESS;
AP_DEBUG_ASSERT(stream);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): closing input",
stream->session->id, stream->id);
H2_STREAM_IN(APLOG_TRACE2, stream, "close_pre");
if (close_input(stream) && stream->bbin) {
- if (stream->request->chunked) {
- apr_table_t *trailers = stream->request->trailers;
- if (trailers && !apr_is_empty_table(trailers)) {
- status = input_add_data(stream, "0\r\n", 3, 0);
- apr_table_do(input_add_header, stream, trailers, NULL);
- status = input_add_data(stream, "\r\n", 2, 0);
- }
- else {
- status = input_add_data(stream, "0\r\n\r\n", 5, 0);
- }
- }
-
- if (status == APR_SUCCESS) {
- status = h2_stream_input_flush(stream);
- }
+ status = h2_stream_input_flush(stream);
if (status == APR_SUCCESS) {
status = h2_mplx_in_close(stream->session->mplx, stream->id);
}
AP_DEBUG_ASSERT(stream);
if (input_closed(stream) || !stream->request->eoh || !stream->bbin) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d, bbin=%d",
stream->session->id, stream->id, input_closed(stream),
stream->request->eoh, !!stream->bbin);
return APR_EINVAL;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): add %ld input bytes",
stream->session->id, stream->id, (long)len);
H2_STREAM_IN(APLOG_TRACE2, stream, "write_data_pre");
- if (stream->request->chunked) {
- /* if input may have a body and we have not seen any
- * content-length header, we need to chunk the input data.
- */
- status = input_add_data(stream, data, len, 1);
- }
- else {
+ if (!stream->request->chunked) {
stream->input_remaining -= len;
if (stream->input_remaining < 0) {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
return APR_ECONNABORTED;
}
- status = input_add_data(stream, data, len, 0);
}
+
+ status = input_add_data(stream, data, len);
if (status == APR_SUCCESS) {
status = h2_stream_input_flush(stream);
}
return status;
}
+void h2_stream_set_suspended(h2_stream *stream, int suspended)
+{
+ AP_DEBUG_ASSERT(stream);
+ stream->suspended = !!suspended;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_stream(%ld-%d): suspended=%d",
+ stream->session->id, stream->id, stream->suspended);
+}
+
+int h2_stream_is_suspended(h2_stream *stream)
+{
+ AP_DEBUG_ASSERT(stream);
+ return stream->suspended;
+}
+
apr_status_t h2_stream_prep_read(h2_stream *stream,
apr_off_t *plen, int *peos)
{
- apr_status_t status = APR_SUCCESS;
- const char *src;
- apr_table_t *trailers = NULL;
- int test_read = (*plen == 0);
-
if (stream->rst_error) {
return APR_ECONNRESET;
}
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream prep_read_pre");
- if (!APR_BRIGADE_EMPTY(stream->bbout)) {
- src = "stream";
- status = h2_util_bb_avail(stream->bbout, plen, peos);
- if (!test_read && status == APR_SUCCESS && !*peos && !*plen) {
- apr_brigade_cleanup(stream->bbout);
- return h2_stream_prep_read(stream, plen, peos);
- }
- trailers = stream->response? stream->response->trailers : NULL;
+ if (!stream->sos) {
+ return APR_EGENERAL;
}
- else {
- src = "mplx";
- status = h2_mplx_out_readx(stream->session->mplx, stream->id,
- NULL, NULL, plen, peos, &trailers);
- if (trailers && stream->response) {
- h2_response_set_trailers(stream->response, trailers);
- }
- }
-
- if (!test_read && status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
- }
-
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream prep_read_post");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): prep_read %s, len=%ld eos=%d, trailers=%s",
- stream->session->id, stream->id, src, (long)*plen, *peos,
- trailers? "yes" : "no");
- return status;
+ return stream->sos->prep_read(stream->sos, plen, peos);
}
apr_status_t h2_stream_readx(h2_stream *stream,
h2_io_data_cb *cb, void *ctx,
apr_off_t *plen, int *peos)
{
- apr_status_t status = APR_SUCCESS;
- apr_table_t *trailers = NULL;
- const char *src;
-
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream readx_pre");
if (stream->rst_error) {
return APR_ECONNRESET;
}
- *peos = 0;
- if (!APR_BRIGADE_EMPTY(stream->bbout)) {
- apr_off_t origlen = *plen;
-
- src = "stream";
- status = h2_util_bb_readx(stream->bbout, cb, ctx, plen, peos);
- if (status == APR_SUCCESS && !*peos && !*plen) {
- apr_brigade_cleanup(stream->bbout);
- *plen = origlen;
- return h2_stream_readx(stream, cb, ctx, plen, peos);
- }
- }
- else {
- src = "mplx";
- status = h2_mplx_out_readx(stream->session->mplx, stream->id,
- cb, ctx, plen, peos, &trailers);
+ if (!stream->sos) {
+ return APR_EGENERAL;
}
-
- if (trailers && stream->response) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): readx, saving trailers",
- stream->session->id, stream->id);
- h2_response_set_trailers(stream->response, trailers);
- }
-
- if (status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
- }
-
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream readx_post");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): readx %s, len=%ld eos=%d",
- stream->session->id, stream->id, src, (long)*plen, *peos);
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream readx_post");
-
- return status;
+ return stream->sos->readx(stream->sos, cb, ctx, plen, peos);
}
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
{
- apr_status_t status = APR_SUCCESS;
- apr_table_t *trailers = NULL;
-
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream read_to_pre");
if (stream->rst_error) {
return APR_ECONNRESET;
}
-
- if (APR_BRIGADE_EMPTY(stream->bbout)) {
- apr_off_t tlen = *plen;
- int eos;
- status = h2_mplx_out_read_to(stream->session->mplx, stream->id,
- stream->bbout, &tlen, &eos, &trailers);
- }
-
- if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->bbout)) {
- status = h2_transfer_brigade(bb, stream->bbout, stream->pool,
- plen, peos);
- }
- else {
- *plen = 0;
- *peos = 0;
- }
-
- if (trailers && stream->response) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): read_to, saving trailers",
- stream->session->id, stream->id);
- h2_response_set_trailers(stream->response, trailers);
- }
-
- if (status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
+ if (!stream->sos) {
+ return APR_EGENERAL;
}
- H2_STREAM_OUT(APLOG_TRACE2, stream, "h2_stream read_to_post");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
- stream->session->id, stream->id, (long)*plen, *peos);
- return status;
-}
-
-void h2_stream_set_suspended(h2_stream *stream, int suspended)
-{
- AP_DEBUG_ASSERT(stream);
- stream->suspended = !!suspended;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- "h2_stream(%ld-%d): suspended=%d",
- stream->session->id, stream->id, stream->suspended);
-}
-
-int h2_stream_is_suspended(h2_stream *stream)
-{
- AP_DEBUG_ASSERT(stream);
- return stream->suspended;
+ return stream->sos->read_to(stream->sos, bb, plen, peos);
}
int h2_stream_input_is_open(h2_stream *stream)
apr_array_header_t *pushes;
int i;
- pushes = h2_push_collect(stream->pool, stream->request, stream->response);
+ pushes = h2_push_collect_update(stream, stream->request,
+ h2_stream_get_response(stream));
if (pushes && !apr_is_empty_array(pushes)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): found %d push candidates",
apr_table_t *h2_stream_get_trailers(h2_stream *stream)
{
- return stream->response? stream->response->trailers : NULL;
+ return stream->sos? stream->sos->get_trailers(stream->sos) : NULL;
}
const h2_priority *h2_stream_get_priority(h2_stream *stream)
{
- if (stream->initiated_on && stream->response) {
- const char *ctype = apr_table_get(stream->response->headers, "content-type");
+ h2_response *response = h2_stream_get_response(stream);
+
+ if (stream->initiated_on && response) {
+ const char *ctype = apr_table_get(response->headers, "content-type");
if (ctype) {
/* FIXME: Not good enough, config needs to come from request->server */
return h2_config_get_priority(stream->session->config, ctype);
return NULL;
}
+/*******************************************************************************
+ * h2_sos_mplx
+ ******************************************************************************/
+
+typedef struct h2_sos_mplx {
+ h2_mplx *m;
+ apr_bucket_brigade *bb;
+ apr_table_t *trailers;
+} h2_sos_mplx;
+
+#define H2_SOS_MPLX_OUT(lvl,msos,msg) \
+ do { \
+ if (APLOG_C_IS_LEVEL((msos)->m->c,lvl)) \
+ h2_util_bb_log((msos)->m->c,(msos)->m->id,lvl,msg,(msos)->bb); \
+ } while(0)
+
+
+static apr_status_t h2_sos_mplx_read_to(h2_sos *sos, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ h2_sos_mplx *msos = sos->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_table_t *trailers = NULL;
+
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx read_to_pre");
+
+ if (APR_BRIGADE_EMPTY(msos->bb)) {
+ apr_off_t tlen = *plen;
+ int eos;
+ status = h2_mplx_out_read_to(msos->m, sos->stream->id,
+ msos->bb, &tlen, &eos, &trailers);
+ }
+
+ if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(msos->bb)) {
+ status = h2_transfer_brigade(bb, msos->bb, sos->stream->pool,
+ plen, peos);
+ }
+ else {
+ *plen = 0;
+ *peos = 0;
+ }
+
+ if (trailers) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
+ "h2_stream(%ld-%d): read_to, saving trailers",
+ msos->m->id, sos->stream->id);
+ msos->trailers = trailers;
+ }
+
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
+ }
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx read_to_post");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
+ "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
+ msos->m->id, sos->stream->id, (long)*plen, *peos);
+ return status;
+}
+
+static apr_status_t h2_sos_mplx_prep_read(h2_sos *sos, apr_off_t *plen, int *peos)
+{
+ h2_sos_mplx *msos = sos->ctx;
+ apr_status_t status = APR_SUCCESS;
+ const char *src;
+ apr_table_t *trailers = NULL;
+ int test_read = (*plen == 0);
+
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prep_read_pre");
+ if (!APR_BRIGADE_EMPTY(msos->bb)) {
+ src = "stream";
+ status = h2_util_bb_avail(msos->bb, plen, peos);
+ if (!test_read && status == APR_SUCCESS && !*peos && !*plen) {
+ apr_brigade_cleanup(msos->bb);
+ return h2_sos_mplx_prep_read(sos, plen, peos);
+ }
+ }
+ else {
+ src = "mplx";
+ status = h2_mplx_out_readx(msos->m, sos->stream->id,
+ NULL, NULL, plen, peos, &trailers);
+ if (trailers) {
+ msos->trailers = trailers;
+ }
+ }
+
+ if (!test_read && status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
+ }
+
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prep_read_post");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
+ "h2_stream(%ld-%d): prep_read %s, len=%ld eos=%d, trailers=%s",
+ msos->m->id, sos->stream->id, src, (long)*plen, *peos,
+ msos->trailers? "yes" : "no");
+ return status;
+}
+
+static apr_status_t h2_sos_mplx_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos)
+{
+ h2_sos_mplx *msos = sos->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_table_t *trailers = NULL;
+ const char *src;
+
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx readx_pre");
+ *peos = 0;
+ if (!APR_BRIGADE_EMPTY(msos->bb)) {
+ apr_off_t origlen = *plen;
+
+ src = "stream";
+ status = h2_util_bb_readx(msos->bb, cb, ctx, plen, peos);
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ apr_brigade_cleanup(msos->bb);
+ *plen = origlen;
+ return h2_sos_mplx_readx(sos, cb, ctx, plen, peos);
+ }
+ }
+ else {
+ src = "mplx";
+ status = h2_mplx_out_readx(msos->m, sos->stream->id,
+ cb, ctx, plen, peos, &trailers);
+ }
+
+ if (trailers) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
+ "h2_stream(%ld-%d): readx, saving trailers",
+ msos->m->id, sos->stream->id);
+ msos->trailers = trailers;
+ }
+
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
+ }
+
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_stream readx_post");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
+ "h2_stream(%ld-%d): readx %s, len=%ld eos=%d",
+ msos->m->id, sos->stream->id, src, (long)*plen, *peos);
+
+ return status;
+}
+
+static apr_table_t *h2_sos_mplx_get_trailers(h2_sos *sos)
+{
+ h2_sos_mplx *msos = sos->ctx;
+
+ return msos->trailers;
+}
+
+static apr_status_t h2_sos_mplx_buffer(h2_sos *sos, apr_bucket_brigade *bb)
+{
+ h2_sos_mplx *msos = sos->ctx;
+ apr_status_t status = APR_SUCCESS;
+
+ if (bb && !APR_BRIGADE_EMPTY(bb)) {
+ apr_size_t move_all = INT_MAX;
+ /* we can move file handles from h2_mplx into this h2_stream as many
+ * as we want, since the lifetimes are the same and we are not freeing
+ * the ones in h2_mplx->io before this stream is done. */
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_pre");
+ status = h2_util_move(msos->bb, bb, 16 * 1024, &move_all,
+ "h2_stream_set_response");
+ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_post");
+ }
+ return status;
+}
+
+static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response)
+{
+ h2_sos *sos;
+ h2_sos_mplx *msos;
+
+ msos = apr_pcalloc(stream->pool, sizeof(*msos));
+ msos->m = stream->session->mplx;
+ msos->bb = apr_brigade_create(stream->pool, msos->m->c->bucket_alloc);
+
+ sos = apr_pcalloc(stream->pool, sizeof(*sos));
+ sos->stream = stream;
+ sos->response = response;
+
+ sos->ctx = msos;
+ sos->buffer = h2_sos_mplx_buffer;
+ sos->prep_read = h2_sos_mplx_prep_read;
+ sos->readx = h2_sos_mplx_readx;
+ sos->read_to = h2_sos_mplx_read_to;
+ sos->get_trailers = h2_sos_mplx_get_trailers;
+
+ sos->response = response;
+
+ return sos;
+}
+
struct h2_request;
struct h2_response;
struct h2_session;
-struct h2_task;
+struct h2_sos;
typedef struct h2_stream h2_stream;
apr_pool_t *pool; /* the memory pool for this stream */
struct h2_request *request; /* the request made in this stream */
- struct h2_response *response; /* the response, once ready */
-
- int aborted; /* was aborted */
- int suspended; /* DATA sending has been suspended */
int rst_error; /* stream error for RST_STREAM */
- int scheduled; /* stream has been scheduled */
- int submitted; /* response HEADER has been sent */
+
+ unsigned int aborted : 1; /* was aborted */
+ unsigned int suspended : 1; /* DATA sending has been suspended */
+ unsigned int scheduled : 1; /* stream has been scheduled */
+ unsigned int submitted : 1; /* response HEADER has been sent */
apr_off_t input_remaining; /* remaining bytes on input as advertised via content-length */
apr_bucket_brigade *bbin; /* input DATA */
-
- apr_bucket_brigade *bbout; /* output DATA */
+
+ struct h2_sos *sos; /* stream output source, e.g. to read output from */
apr_off_t data_frames_sent; /* # of DATA frames sent out for this stream */
};
* @param cmp priority comparision
* @param ctx context for comparision
*/
-apr_status_t h2_stream_schedule(h2_stream *stream, int eos,
+apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
h2_stream_pri_cmp *cmp, void *ctx);
/**
*/
int h2_stream_is_scheduled(h2_stream *stream);
+struct h2_response *h2_stream_get_response(h2_stream *stream);
+
/**
* Set the response for this stream. Invoked when all meta data for
* the stream response has been collected.
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos);
+/**
+ * Get optional trailers for this stream, may be NULL. Meaningful
+ * results can only be expected when the end of the response body has
+ * been reached.
+ *
+ * @param stream to ask for trailers
+ * @return trailers for NULL
+ */
+apr_table_t *h2_stream_get_trailers(h2_stream *stream);
+
/**
* Set the suspended state of the stream.
* @param stream the stream to change state on
*/
apr_status_t h2_stream_submit_pushes(h2_stream *stream);
-/**
- * Get optional trailers for this stream, may be NULL. Meaningful
- * results can only be expected when the end of the response body has
- * been reached.
- *
- * @param stream to ask for trailers
- * @return trailers for NULL
- */
-apr_table_t *h2_stream_get_trailers(h2_stream *stream);
-
/**
* Get priority information set for this stream.
*/
apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s)
{
(void)pool;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "h2_switch init");
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_switch init");
return APR_SUCCESS;
}
}
if (found) {
- h2_ctx *ctx = h2_ctx_get(c);
+ h2_ctx *ctx = h2_ctx_get(c, 1);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"switching protocol to '%s'", protocol);
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
/* Ok, start an h2_conn on this one. */
- status = h2_conn_process(r->connection, r, r->server);
- if (status != DONE) {
- /* Nothing really to do about this. */
+ h2_ctx_server_set(ctx, r->server);
+ status = h2_conn_setup(ctx, r->connection, r);
+ if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r,
- "session proessed, unexpected status");
+ "session setup");
+ return status;
}
+
+ h2_conn_run(ctx, c);
+ return DONE;
}
return DONE;
}
ap_hook_protocol_switch(h2_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_protocol_get(h2_protocol_get, NULL, NULL, APR_HOOK_MIDDLE);
}
-
apr_bucket_brigade* brigade,
ap_input_mode_t mode,
apr_read_type_e block,
- apr_off_t readbytes) {
+ apr_off_t readbytes)
+{
h2_task *task = filter->ctx;
AP_DEBUG_ASSERT(task);
if (!task->input) {
}
static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
- apr_bucket_brigade* brigade) {
+ apr_bucket_brigade* brigade)
+{
h2_task *task = filter->ctx;
AP_DEBUG_ASSERT(task);
if (!task->output) {
}
static apr_status_t h2_filter_read_response(ap_filter_t* f,
- apr_bucket_brigade* bb) {
+ apr_bucket_brigade* bb)
+{
h2_task *task = f->ctx;
AP_DEBUG_ASSERT(task);
if (!task->output || !task->output->from_h1) {
*/
ap_hook_pre_connection(h2_task_pre_conn,
NULL, mod_ssl, APR_HOOK_FIRST);
- /* When the connection processing actually starts, we might to
+ /* When the connection processing actually starts, we might
* take over, if the connection is for a task.
*/
ap_hook_process_connection(h2_task_process_conn,
static int h2_task_pre_conn(conn_rec* c, void *arg)
{
+ h2_ctx *ctx;
- h2_ctx *ctx = h2_ctx_get(c);
+ if (!c->master) {
+ return OK;
+ }
+ ctx = h2_ctx_get(c, 0);
(void)arg;
if (h2_ctx_is_task(ctx)) {
h2_task *task = h2_ctx_get_task(ctx);
}
h2_task *h2_task_create(long session_id, const h2_request *req,
- apr_pool_t *pool, h2_mplx *mplx, int eos)
+ apr_pool_t *pool, h2_mplx *mplx)
{
- h2_task *task = apr_pcalloc(pool, sizeof(h2_task));
+ h2_task *task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
ap_log_perror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, pool,
APLOGNO(02941) "h2_task(%ld-%d): create stream task",
return NULL;
}
- task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
- task->stream_id = req->id;
- task->pool = pool;
- task->mplx = mplx;
- task->c = h2_conn_create(mplx->c, task->pool);
+ task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
+ task->stream_id = req->id;
+ task->mplx = mplx;
+ task->request = req;
+ task->input_eos = !req->body;
+ task->ser_headers = h2_config_geti(req->config, H2_CONF_SER_HEADERS);
- task->request = req;
- task->input_eos = eos;
-
return task;
}
-apr_status_t h2_task_destroy(h2_task *task)
+apr_status_t h2_task_do(h2_task *task, conn_rec *c, apr_thread_cond_t *cond,
+ apr_socket_t *socket)
{
- (void)task;
- return APR_SUCCESS;
-}
-
-apr_status_t h2_task_do(h2_task *task, h2_worker *worker)
-{
- apr_status_t status = APR_SUCCESS;
-
AP_DEBUG_ASSERT(task);
+ task->io = cond;
+ task->input = h2_task_input_create(task, c->pool, c->bucket_alloc);
+ task->output = h2_task_output_create(task, c->pool);
- task->serialize_headers = h2_config_geti(task->request->config, H2_CONF_SER_HEADERS);
-
- status = h2_worker_setup_task(worker, task);
-
- /* save in connection that this one is a pseudo connection */
- h2_ctx_create_for(task->c, task);
-
- if (status == APR_SUCCESS) {
- task->input = h2_task_input_create(task, task->pool,
- task->c->bucket_alloc);
- task->output = h2_task_output_create(task, task->pool);
-
- ap_process_connection(task->c, h2_worker_get_socket(worker));
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_task(%s): processing done", task->id);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, task->c,
- APLOGNO(02957) "h2_task(%s): error setting up h2_task",
- task->id);
- }
-
- if (task->input) {
- h2_task_input_destroy(task->input);
- task->input = NULL;
- }
+ ap_process_connection(c, socket);
- if (task->output) {
- h2_task_output_close(task->output);
- h2_task_output_destroy(task->output);
- task->output = NULL;
- }
-
- if (task->io) {
- apr_thread_cond_signal(task->io);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): processing done", task->id);
- h2_worker_release_task(worker, task);
- h2_mplx_task_done(task->mplx, task->stream_id);
+ h2_task_input_destroy(task->input);
+ h2_task_output_close(task->output);
+ h2_task_output_destroy(task->output);
+ task->io = NULL;
- return status;
+ return APR_SUCCESS;
}
static apr_status_t h2_task_process_request(const h2_request *req, conn_rec *c)
static int h2_task_process_conn(conn_rec* c)
{
- h2_ctx *ctx = h2_ctx_get(c);
+ h2_ctx *ctx;
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
if (h2_ctx_is_task(ctx)) {
- if (!ctx->task->serialize_headers) {
+ if (!ctx->task->ser_headers) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
"h2_h2, processing request directly");
h2_task_process_request(ctx->task->request, c);
}
return DECLINED;
}
-
-
-
-
-
struct h2_task {
const char *id;
int stream_id;
- struct h2_mplx *mplx;
-
+ struct h2_mplx *mplx;
const struct h2_request *request;
- int input_eos;
-
- int serialize_headers;
-
- struct conn_rec *c;
-
- apr_pool_t *pool; /* pool for task lifetime things */
- apr_bucket_alloc_t *bucket_alloc;
+
+ unsigned int filters_set : 1;
+ unsigned int input_eos : 1;
+ unsigned int ser_headers : 1;
+
struct h2_task_input *input;
struct h2_task_output *output;
-
struct apr_thread_cond_t *io; /* used to wait for events on */
};
h2_task *h2_task_create(long session_id, const struct h2_request *req,
- apr_pool_t *pool, struct h2_mplx *mplx,
- int eos);
-
-apr_status_t h2_task_destroy(h2_task *task);
+ apr_pool_t *pool, struct h2_mplx *mplx);
-apr_status_t h2_task_do(h2_task *task, struct h2_worker *worker);
+apr_status_t h2_task_do(h2_task *task, conn_rec *c,
+ struct apr_thread_cond_t *cond, apr_socket_t *socket);
void h2_task_register_hooks(void);
input->task = task;
input->bb = NULL;
- if (task->serialize_headers) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ if (task->ser_headers) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool,
"h2_task_input(%s): serialize request %s %s",
task->id, task->request->method, task->request->path);
input->bb = apr_brigade_create(pool, bucket_alloc);
}
if ((bblen == 0) && input->task->input_eos) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task_input(%s): eos", input->task->id);
return APR_EOF;
}
never calling us again. */
status = h2_mplx_in_read(input->task->mplx, APR_BLOCK_READ,
input->task->stream_id, input->bb,
+ f->r? f->r->trailers_in : NULL,
input->task->io);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
"h2_task_input(%s): mplx in read returned",
if (!APR_BRIGADE_EMPTY(input->bb)) {
if (mode == AP_MODE_EXHAUSTIVE) {
/* return all we have */
- return h2_util_move(bb, input->bb, readbytes, NULL,
- "task_input_read(exhaustive)");
+ status = h2_util_move(bb, input->bb, readbytes, NULL,
+ "task_input_read(exhaustive)");
}
else if (mode == AP_MODE_READBYTES) {
- return h2_util_move(bb, input->bb, readbytes, NULL,
- "task_input_read(readbytes)");
+ status = h2_util_move(bb, input->bb, readbytes, NULL,
+ "task_input_read(readbytes)");
}
else if (mode == AP_MODE_SPECULATIVE) {
/* return not more than was asked for */
- return h2_util_copy(bb, input->bb, readbytes,
- "task_input_read(speculative)");
+ status = h2_util_copy(bb, input->bb, readbytes,
+ "task_input_read(speculative)");
}
else if (mode == AP_MODE_GETLINE) {
/* we are reading a single LF line, e.g. the HTTP headers */
"h2_task_input(%s): getline: %s",
input->task->id, buffer);
}
- return status;
}
else {
/* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
APLOGNO(02942)
"h2_task_input, unsupported READ mode %d", mode);
- return APR_ENOTIMPL;
+ status = APR_ENOTIMPL;
}
+
+ if (APLOGctrace1(f->c)) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task_input(%s): return %ld data bytes",
+ input->task->id, (long)bblen);
+ }
+ return status;
}
if (is_aborted(f)) {
return APR_ECONNABORTED;
}
- return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ status = (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task_input(%s): no data", input->task->id);
+ return status;
}
struct h2_task *task;
h2_task_output_state_t state;
struct h2_from_h1 *from_h1;
- int trailers_passed;
+ unsigned int trailers_passed : 1;
};
h2_task_output *h2_task_output_create(struct h2_task *task, apr_pool_t *pool);
#include <httpd.h>
#include <http_core.h>
-#include "h2_task.h"
#include "h2_task_queue.h"
{
AP_DEBUG_ASSERT(q->nalloc <= nlen);
if (nlen > q->nalloc) {
- int *nq = apr_pcalloc(q->pool, sizeof(h2_task *) * nlen);
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
if (q->nelts > 0) {
int l = ((q->head + q->nelts) % q->nalloc) - q->head;
}
return i;
}
-
-
-
#ifndef __mod_h2__h2_task_queue__
#define __mod_h2__h2_task_queue__
-struct h2_task;
-
/**
* h2_task_queue keeps a list of sorted h2_task* in ascending order.
*/
}
-char *h2_strlwr(char *s)
-{
- char *p;
- for (p = s; *p; ++p) {
- if (*p >= 'A' && *p <= 'Z') {
- *p += 'a' - 'A';
- }
- }
- return s;
-}
-
void h2_util_camel_case_header(char *s, size_t len)
{
size_t start = 1;
}
}
-static const int BASE64URL_TABLE[] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57,
- 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1
+static const int BASE64URL_UINT6[] = {
+/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 1 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, /* 2 */
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, /* 3 */
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, 63, /* 5 */
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, /* 7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 8 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 9 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* a */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* b */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* c */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* d */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* e */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 /* f */
+};
+static const char BASE64URL_CHARS[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30 - 39 */
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40 - 49 */
+ 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50 - 59 */
+ '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */
};
apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
int n;
apr_size_t len, mlen, remain, i;
- while (*p && BASE64URL_TABLE[ *p ] == -1) {
+ while (*p && BASE64URL_UINT6[ *p ] != -1) {
++p;
}
len = p - e;
i = 0;
d = (unsigned char*)*decoded;
for (; i < mlen; i += 4) {
- n = ((BASE64URL_TABLE[ e[i+0] ] << 18) +
- (BASE64URL_TABLE[ e[i+1] ] << 12) +
- (BASE64URL_TABLE[ e[i+2] ] << 6) +
- BASE64URL_TABLE[ e[i+3] ]);
+ n = ((BASE64URL_UINT6[ e[i+0] ] << 18) +
+ (BASE64URL_UINT6[ e[i+1] ] << 12) +
+ (BASE64URL_UINT6[ e[i+2] ] << 6) +
+ (BASE64URL_UINT6[ e[i+3] ]));
*d++ = n >> 16;
*d++ = n >> 8 & 0xffu;
*d++ = n & 0xffu;
remain = len - mlen;
switch (remain) {
case 2:
- n = ((BASE64URL_TABLE[ e[mlen+0] ] << 18) +
- (BASE64URL_TABLE[ e[mlen+1] ] << 12));
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12));
*d++ = n >> 16;
break;
case 3:
- n = ((BASE64URL_TABLE[ e[mlen+0] ] << 18) +
- (BASE64URL_TABLE[ e[mlen+1] ] << 12) +
- (BASE64URL_TABLE[ e[mlen+2] ] << 6));
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12) +
+ (BASE64URL_UINT6[ e[mlen+2] ] << 6));
*d++ = n >> 16;
*d++ = n >> 8 & 0xffu;
break;
default: /* do nothing */
break;
}
- return len;
+ return mlen/4*3 + remain;
+}
+
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool)
+{
+ apr_size_t mlen = ((len+2)/3)*3;
+ apr_size_t slen = (mlen/3)*4;
+ apr_size_t i;
+ const unsigned char *udata = (const unsigned char*)data;
+ char *enc, *p = apr_pcalloc(pool, slen+1); /* 0 terminated */
+
+ enc = p;
+ for (i = 0; i < mlen; i+= 3) {
+ *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ (udata[i] << 4) +
+ ((i+1 < len)? (udata[i+1] >> 4) : 0) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) +
+ ((i+2 < len)? (udata[i+2] >> 6) : 0) & 0x3fu ];
+ if (i+2 < len) {
+ *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
+ }
+ }
+
+ return enc;
}
int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token)
static apr_status_t last_not_included(apr_bucket_brigade *bb,
apr_off_t maxlen,
int same_alloc,
- int *pfile_buckets_allowed,
+ apr_size_t *pfile_buckets_allowed,
apr_bucket **pend)
{
apr_bucket *b;
#define LOG_LEVEL APLOG_INFO
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, int *pfile_handles_allowed,
+ apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
const char *msg)
{
apr_status_t status = APR_SUCCESS;
AP_DEBUG_ASSERT(to);
AP_DEBUG_ASSERT(from);
- same_alloc = (to->bucket_alloc == from->bucket_alloc);
+ same_alloc = (to->bucket_alloc == from->bucket_alloc
+ || to->p == from->p);
if (!FILE_MOVE) {
- pfile_handles_allowed = NULL;
+ pfile_buckets_allowed = NULL;
}
if (!APR_BRIGADE_EMPTY(from)) {
apr_bucket *b, *end;
status = last_not_included(from, maxlen, same_alloc,
- pfile_handles_allowed, &end);
+ pfile_buckets_allowed, &end);
if (status != APR_SUCCESS) {
return status;
}
/* ignore */
}
}
- else if (pfile_handles_allowed
- && *pfile_handles_allowed > 0
+ else if (pfile_buckets_allowed
+ && *pfile_buckets_allowed > 0
&& APR_BUCKET_IS_FILE(b)) {
/* We do not want to read files when passing buckets, if
* we can avoid it. However, what we've come up so far
}
apr_brigade_insert_file(to, fd, b->start, b->length,
to->p);
- --(*pfile_handles_allowed);
+ --(*pfile_buckets_allowed);
}
else {
const char *data;
return status;
}
-int h2_util_has_flush_or_eos(apr_bucket_brigade *bb) {
+int h2_util_has_flush_or_eos(apr_bucket_brigade *bb)
+{
apr_bucket *b;
for (b = APR_BRIGADE_FIRST(bb);
b != APR_BRIGADE_SENTINEL(bb);
b != APR_BRIGADE_SENTINEL(bb);
b = APR_BUCKET_NEXT(b))
{
- if (!APR_BUCKET_IS_METADATA(b)) {
+ if (!AP_BUCKET_IS_EOR(b)) {
return 1;
}
}
} literal;
#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
-#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
static literal IgnoredRequestHeaders[] = {
const char *name, size_t namelen,
const char *value, size_t valuelen);
-char *h2_strlwr(char *s);
-
void h2_util_camel_case_header(char *s, size_t len);
int h2_req_ignore_header(const char *name, size_t len);
apr_size_t h2_util_base64url_decode(const char **decoded,
const char *encoded,
apr_pool_t *pool);
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool);
#define H2_HD_MATCH_LIT(l, name, nlen) \
((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
* @param msg message for use in logging
*/
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, int *pfile_buckets_allowed,
+ apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
const char *msg);
/**
#ifndef mod_h2_h2_version_h
#define mod_h2_h2_version_h
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
/**
* @macro
- * Version number of the h2 module as c string
+ * Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.0.11"
+#define MOD_HTTP2_VERSION "1.2.2"
/**
* @macro
- * Numerical representation of the version number of the h2 module
+ * Numerical representation of the version number of the http2 module
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x01000b
+#define MOD_HTTP2_VERSION_NUM 0x010202
#endif /* mod_h2_h2_version_h */
#include <apr_thread_cond.h>
+#include <mpm_common.h>
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
#include "h2_private.h"
#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
#include "h2_mplx.h"
#include "h2_request.h"
#include "h2_task.h"
static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx)
{
h2_worker *worker = (h2_worker *)wctx;
- apr_status_t status = APR_SUCCESS;
- h2_mplx *m;
- (void)thread;
+ apr_status_t status;
- /* Furthermore, other code might want to see the socket for
- * this connection. Allocate one without further function...
+ (void)thread;
+ /* Other code might want to see a socket for this connection this
+ * worker processes. Allocate one without further function...
*/
status = apr_socket_create(&worker->socket,
APR_INET, SOCK_STREAM,
return NULL;
}
- worker->task = NULL;
- m = NULL;
while (!worker->aborted) {
- status = worker->get_next(worker, &m, &worker->task, worker->ctx);
+ h2_mplx *m;
+ const h2_request *req;
- if (worker->task) {
- h2_task_do(worker->task, worker);
- worker->task = NULL;
- apr_thread_cond_signal(worker->io);
+ /* Get a h2_mplx + h2_request from the main workers queue. */
+ status = worker->get_next(worker, &m, &req, worker->ctx);
+
+ while (req) {
+ conn_rec *c, *master = m->c;
+ int stream_id = req->id;
+
+ c = h2_slave_create(master, worker->task_pool,
+ worker->thread, worker->socket);
+ if (!c) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
+ APLOGNO(02957) "h2_request(%ld-%d): error setting up slave connection",
+ m->id, stream_id);
+ h2_mplx_out_rst(m, stream_id, H2_ERR_INTERNAL_ERROR);
+ }
+ else {
+ h2_task *task;
+
+ task = h2_task_create(m->id, req, worker->task_pool, m);
+ h2_ctx_create_for(c, task);
+ h2_task_do(task, c, worker->io, worker->socket);
+ task = NULL;
+
+ apr_thread_cond_signal(worker->io);
+ }
+
+ /* clean our references and report request as done. Signal
+ * that we want another unless we have been aborted */
+ /* TODO: this will keep a worker attached to this h2_mplx as
+ * long as it has requests to handle. Might no be fair to
+ * other mplx's. Perhaps leave after n requests? */
+ req = NULL;
+ apr_pool_clear(worker->task_pool);
+ h2_mplx_request_done(&m, stream_id, worker->aborted? NULL : &req);
}
}
- status = worker->get_next(worker, &m, NULL, worker->ctx);
- m = NULL;
-
if (worker->socket) {
apr_socket_close(worker->socket);
worker->socket = NULL;
return NULL;
}
-static apr_status_t cleanup_join_thread(void *ctx)
-{
- h2_worker *w = ctx;
- /* do the join only when the worker is aborted. Otherwise,
- * we are probably in a process shutdown.
- */
- if (w->thread && w->aborted) {
- apr_status_t rv;
- apr_thread_join(&rv, w->thread);
- }
- return APR_SUCCESS;
-}
-
h2_worker *h2_worker_create(int id,
apr_pool_t *parent_pool,
apr_threadattr_t *attr,
h2_worker *w;
apr_status_t status;
- status = apr_allocator_create(&allocator);
- if (status != APR_SUCCESS) {
- return NULL;
- }
-
- status = apr_pool_create_ex(&pool, parent_pool, NULL, allocator);
- if (status != APR_SUCCESS) {
- return NULL;
- }
+ apr_allocator_create(&allocator);
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&pool, parent_pool, NULL, allocator);
apr_allocator_owner_set(allocator, pool);
w = apr_pcalloc(pool, sizeof(h2_worker));
return NULL;
}
- apr_pool_pre_cleanup_register(w->pool, w, cleanup_join_thread);
+ apr_pool_create(&w->task_pool, w->pool);
apr_thread_create(&w->thread, attr, execute, w, w->pool);
}
return w;
apr_status_t h2_worker_destroy(h2_worker *worker)
{
+ if (worker->thread) {
+ apr_status_t status;
+ apr_thread_join(&status, worker->thread);
+ worker->thread = NULL;
+ }
if (worker->io) {
apr_thread_cond_destroy(worker->io);
worker->io = NULL;
}
h2_task *h2_worker_create_task(h2_worker *worker, h2_mplx *m,
- const h2_request *req, int eos)
+ const h2_request *req)
{
h2_task *task;
- /* Create a subpool from the worker one to be used for all things
- * with life-time of this task execution.
- */
- if (!worker->task_pool) {
- apr_pool_create(&worker->task_pool, worker->pool);
- }
- task = h2_task_create(m->id, req, worker->task_pool, m, eos);
-
- /* Link the task to the worker which provides useful things such
- * as mutex, a socket etc. */
- task->io = worker->io;
-
+ task = h2_task_create(m->id, req, worker->task_pool, m);
return task;
}
-apr_status_t h2_worker_setup_task(h2_worker *worker, h2_task *task) {
- apr_status_t status;
-
-
- status = h2_conn_setup(task, apr_bucket_alloc_create(task->pool),
- worker->thread, worker->socket);
-
- return status;
-}
-
-void h2_worker_release_task(h2_worker *worker, struct h2_task *task)
-{
- task->io = NULL;
- task->pool = NULL;
- apr_pool_clear(worker->task_pool);
-}
-
-apr_socket_t *h2_worker_get_socket(h2_worker *worker)
-{
- return worker->socket;
-}
-
* gets aborted (idle timeout, for example). */
typedef apr_status_t h2_worker_mplx_next_fn(h2_worker *worker,
struct h2_mplx **pm,
- struct h2_task **ptask,
+ const struct h2_request **preq,
void *ctx);
/* Invoked just before the worker thread exits. */
h2_worker_done_fn *worker_done;
void *ctx;
- int aborted;
- struct h2_task *task;
+ unsigned int aborted : 1;
};
/**
int h2_worker_is_aborted(h2_worker *worker);
struct h2_task *h2_worker_create_task(h2_worker *worker, struct h2_mplx *m,
- const struct h2_request *req, int eos);
-apr_status_t h2_worker_setup_task(h2_worker *worker, struct h2_task *task);
-void h2_worker_release_task(h2_worker *worker, struct h2_task *task);
-
-apr_socket_t *h2_worker_get_socket(h2_worker *worker);
-
+ const struct h2_request *req);
+
#endif /* defined(__mod_h2__h2_worker__) */
#include <apr_thread_mutex.h>
#include <apr_thread_cond.h>
+#include <mpm_common.h>
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
#include "h2_private.h"
#include "h2_mplx.h"
-#include "h2_task.h"
+#include "h2_request.h"
#include "h2_task_queue.h"
#include "h2_worker.h"
#include "h2_workers.h"
+
static int in_list(h2_workers *workers, h2_mplx *m)
{
h2_mplx *e;
return 0;
}
-static void cleanup_zombies(h2_workers *workers, int lock) {
+static void cleanup_zombies(h2_workers *workers, int lock)
+{
if (lock) {
apr_thread_mutex_lock(workers->lock);
}
while (!H2_WORKER_LIST_EMPTY(&workers->zombies)) {
h2_worker *zombie = H2_WORKER_LIST_FIRST(&workers->zombies);
H2_WORKER_REMOVE(zombie);
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_workers: cleanup zombie %d", zombie->id);
h2_worker_destroy(zombie);
}
}
}
-
/**
* Get the next task for the given worker. Will block until a task arrives
* or the max_wait timer expires and more than min workers exist.
* the h2_workers lock.
*/
static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm,
- h2_task **ptask, void *ctx)
+ const h2_request **preq, void *ctx)
{
apr_status_t status;
- h2_mplx *m = NULL;
- h2_task *task = NULL;
apr_time_t max_wait, start_wait;
- int has_more = 0;
h2_workers *workers = (h2_workers *)ctx;
- if (*pm && ptask != NULL) {
- /* We have a h2_mplx instance and the worker wants the next task.
- * Try to get one from the given mplx. */
- *ptask = h2_mplx_pop_task(*pm, worker, &has_more);
- if (*ptask) {
- return APR_SUCCESS;
- }
- }
-
- if (*pm) {
- /* Got a mplx handed in, but did not get or want a task from it.
- * Release it, as the workers reference will be wiped.
- */
- h2_mplx_release(*pm);
- *pm = NULL;
- }
-
- if (!ptask) {
- /* the worker does not want a next task, we're done.
- */
- return APR_SUCCESS;
- }
-
max_wait = apr_time_from_sec(apr_atomic_read32(&workers->max_idle_secs));
start_wait = apr_time_now();
status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
+ const h2_request *req = NULL;
+ h2_mplx *m = NULL;
+ int has_more = 0;
+
++workers->idle_worker_count;
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_worker(%d): looking for work", h2_worker_get_id(worker));
- while (!task && !h2_worker_is_aborted(worker) && !workers->aborted) {
+ while (!req && !h2_worker_is_aborted(worker) && !workers->aborted) {
/* Get the next h2_mplx to process that has a task to hand out.
* If it does, place it at the end of the queu and return the
* we do a timed wait or block indefinitely.
*/
m = NULL;
- while (!task && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
+ while (!req && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
m = H2_MPLX_LIST_FIRST(&workers->mplxs);
H2_MPLX_REMOVE(m);
- task = h2_mplx_pop_task(m, worker, &has_more);
- if (task) {
+ req = h2_mplx_pop_request(m, &has_more);
+ if (req) {
if (has_more) {
H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m);
}
}
}
- if (!task) {
- /* Need to wait for either a new mplx to arrive.
+ if (!req) {
+ /* Need to wait for a new mplx to arrive.
*/
cleanup_zombies(workers, 0);
if (now >= (start_wait + max_wait)) {
/* waited long enough without getting a task. */
if (workers->worker_count > workers->min_size) {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0,
workers->s,
"h2_workers: aborting idle worker");
h2_worker_abort(worker);
break;
}
}
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_worker(%d): waiting signal, "
"worker_count=%d", worker->id,
(int)workers->worker_count);
workers->lock, max_wait);
}
else {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_worker(%d): waiting signal (eternal), "
"worker_count=%d", worker->id,
(int)workers->worker_count);
/* Here, we either have gotten task and mplx for the worker or
* needed to give up with more than enough workers.
*/
- if (task) {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
- "h2_worker(%d): start task(%s)",
- h2_worker_get_id(worker), task->id);
- /* Since we hand out a reference to the worker, we increase
- * its ref count.
- */
- h2_mplx_reference(m);
+ if (req) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_worker(%d): start request(%ld-%d)",
+ h2_worker_get_id(worker), m->id, req->id);
*pm = m;
- *ptask = task;
+ *preq = req;
if (has_more && workers->idle_worker_count > 1) {
apr_thread_cond_signal(workers->mplx_added);
h2_workers *workers = (h2_workers *)ctx;
apr_status_t status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_worker(%d): done", h2_worker_get_id(worker));
H2_WORKER_REMOVE(worker);
--workers->worker_count;
}
}
-
static apr_status_t add_worker(h2_workers *workers)
{
h2_worker *w = h2_worker_create(workers->next_worker_id++,
if (!w) {
return APR_ENOMEM;
}
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_workers: adding worker(%d)", h2_worker_get_id(w));
++workers->worker_count;
H2_WORKER_LIST_INSERT_TAIL(&workers->workers, w);
return APR_SUCCESS;
}
-static apr_status_t h2_workers_start(h2_workers *workers) {
+static apr_status_t h2_workers_start(h2_workers *workers)
+{
apr_status_t status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_workers: starting");
while (workers->worker_count < workers->min_size
}
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool,
- int min_size, int max_size)
+ int min_size, int max_size,
+ apr_size_t max_tx_handles)
{
apr_status_t status;
h2_workers *workers;
workers->max_size = max_size;
apr_atomic_set32(&workers->max_idle_secs, 10);
+ workers->max_tx_handles = max_tx_handles;
+ workers->spare_tx_handles = workers->max_tx_handles;
+
apr_threadattr_create(&workers->thread_attr, workers->pool);
+ if (ap_thread_stacksize != 0) {
+ apr_threadattr_stacksize_set(workers->thread_attr,
+ ap_thread_stacksize);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: using stacksize=%ld",
+ (long)ap_thread_stacksize);
+ }
APR_RING_INIT(&workers->workers, h2_worker, link);
APR_RING_INIT(&workers->zombies, h2_worker, link);
status = apr_thread_cond_create(&workers->mplx_added, workers->pool);
}
+ if (status == APR_SUCCESS) {
+ status = apr_thread_mutex_create(&workers->tx_lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ }
+
if (status == APR_SUCCESS) {
status = h2_workers_start(workers);
}
{
apr_status_t status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_TRACE2, status, workers->s,
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, status, workers->s,
"h2_workers: register mplx(%ld)", m->id);
if (in_list(workers, m)) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: already registered mplx(%ld)", m->id);
status = APR_EAGAIN;
}
else {
if (workers->idle_worker_count > 0) {
apr_thread_cond_signal(workers->mplx_added);
}
- else if (workers->worker_count < workers->max_size) {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ else if (status == APR_SUCCESS
+ && workers->worker_count < workers->max_size) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
"h2_workers: got %d worker, adding 1",
workers->worker_count);
add_worker(workers);
}
-
- /* cleanup any zombie workers that may have accumulated */
- cleanup_zombies(workers, 0);
-
apr_thread_mutex_unlock(workers->lock);
}
return status;
H2_MPLX_REMOVE(m);
status = APR_SUCCESS;
}
- /* cleanup any zombie workers that may have accumulated */
- cleanup_zombies(workers, 0);
-
apr_thread_mutex_unlock(workers->lock);
}
return status;
apr_atomic_set32(&workers->max_idle_secs, idle_secs);
}
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ count = H2MIN(workers->spare_tx_handles, count);
+ workers->spare_tx_handles -= count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: reserved %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ return count;
+ }
+ return 0;
+}
+
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ workers->spare_tx_handles += count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: freed %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ }
+}
+
struct apr_thread_mutex_t;
struct apr_thread_cond_t;
struct h2_mplx;
+struct h2_request;
struct h2_task;
struct h2_task_queue;
struct h2_workers {
server_rec *s;
apr_pool_t *pool;
- int aborted;
int next_worker_id;
int min_size;
int max_size;
+ apr_size_t max_tx_handles;
+ apr_size_t spare_tx_handles;
+
+ unsigned int aborted : 1;
+
apr_threadattr_t *thread_attr;
APR_RING_HEAD(h2_worker_list, h2_worker) workers;
struct apr_thread_mutex_t *lock;
struct apr_thread_cond_t *mplx_added;
+
+ struct apr_thread_mutex_t *tx_lock;
};
* threads.
*/
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
- int min_size, int max_size);
+ int min_size, int max_size,
+ apr_size_t max_tx_handles);
/* Destroy the worker pool and all its threads.
*/
* out of tasks, it will be automatically be unregistered. Should
* new tasks arrive, it needs to be registered again.
*/
-apr_status_t h2_workers_register(h2_workers *workers,
- struct h2_mplx *m);
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m);
/**
* Remove a h2_mplx from the worker registry.
*/
-apr_status_t h2_workers_unregister(h2_workers *workers,
- struct h2_mplx *m);
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m);
/**
* Set the amount of seconds a h2_worker should wait for new tasks
*/
void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs);
+/**
+ * Reservation of file handles available for transfer between workers
+ * and master connections.
+ *
+ * When handling output from request processing, file handles are often
+ * encountered when static files are served. The most efficient way is then
+ * to forward the handle itself to the master connection where it can be
+ * read or sendfile'd to the client. But file handles are a scarce resource,
+ * so there needs to be a limit on how many handles are transferred this way.
+ *
+ * h2_workers keeps track of the number of reserved handles and observes a
+ * configurable maximum value.
+ *
+ * @param workers the workers instance
+ * @param count how many handles the caller wishes to reserve
+ * @return the number of reserved handles, may be 0.
+ */
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count);
+
+/**
+ * Return a number of reserved file handles back to the pool. The number
+ * overall may not exceed the numbers reserved.
+ * @param workers the workers instance
+ * @param count how many handles are returned to the pool
+ */
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count);
+
#endif /* defined(__mod_h2__h2_workers__) */
#include <apr_want.h>
#include <httpd.h>
+#include <http_protocol.h>
+#include <http_request.h>
#include <http_log.h>
-#include "mod_h2.h"
+#include "mod_http2.h"
#include <nghttp2/nghttp2.h>
#include "h2_stream.h"
#include "h2_alt_svc.h"
#include "h2_conn.h"
+#include "h2_filter.h"
#include "h2_task.h"
#include "h2_session.h"
#include "h2_config.h"
#include "h2_ctx.h"
#include "h2_h2.h"
+#include "h2_push.h"
+#include "h2_request.h"
#include "h2_switch.h"
#include "h2_version.h"
h2_hooks
};
+static int h2_h2_fixups(request_rec *r);
+
/* The module initialization. Called once as apache hook, before any multi
* processing (threaded or not) happens. It is typically at least called twice,
* see
return status;
}
+static char *http2_var_lookup(apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *name);
+static int http2_is_h2(conn_rec *);
+
/* Runs once per created child process. Perform any process
* related initionalization here.
*/
ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
APLOGNO(02949) "initializing connection handling");
}
+
+ APR_REGISTER_OPTIONAL_FN(http2_is_h2);
+ APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
}
/* Install this module into the apache2 infrastructure.
{
static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
- ap_log_perror(APLOG_MARK, APLOG_INFO, 0, pool, "installing hooks");
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
/* Run once after configuration is set, but before mpm children initialize.
*/
h2_alt_svc_register_hooks();
+ /* Setup subprocess env for certain variables
+ */
+ ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE);
+
+ /* test http2 connection status handler */
+ ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static char *value_of_HTTP2(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r)
+{
+ return c && http2_is_h2(c)? "on" : "off";
+}
+
+static char *value_of_H2PUSH(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r)
+{
+ h2_ctx *ctx;
+ if (r) {
+ ctx = h2_ctx_rget(r);
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ return (task && task->request->push_policy != H2_PUSH_NONE)? "on" : "off";
+ }
+ }
+ else if (c) {
+ ctx = h2_ctx_get(c, 0);
+ return ctx && h2_session_push_enabled(ctx->session)? "on" : "off";
+ }
+ else if (s) {
+ const h2_config *cfg = h2_config_sget(s);
+ return cfg && h2_config_geti(cfg, H2_CONF_PUSH)? "on" : "off";
+ }
+ return "off";
}
+typedef char *h2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r);
+typedef struct h2_var_def {
+ const char *name;
+ h2_var_lookup *lookup;
+ unsigned int subprocess : 1; /* should be set in r->subprocess_env */
+} h2_var_def;
+static h2_var_def H2_VARS[] = {
+ { "HTTP2", value_of_HTTP2, 1 },
+ { "H2PUSH", value_of_H2PUSH, 1 },
+};
+
+#ifndef H2_ALEN
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+#endif
+
+
+static int http2_is_h2(conn_rec *c)
+{
+ return h2_ctx_get(c->master? c->master : c, 0) != NULL;
+}
+
+static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, char *name)
+{
+ int i;
+ /* If the # of vars grow, we need to put definitions in a hash */
+ for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (!strcmp(vdef->name, name)) {
+ return vdef->lookup(p, s, c, r);
+ }
+ }
+ return "";
+}
+
+static int h2_h2_fixups(request_rec *r)
+{
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ int i;
+
+ for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (vdef->subprocess) {
+ apr_table_setn(r->subprocess_env, vdef->name,
+ vdef->lookup(r->pool, r->server, r->connection, r));
+ }
+ }
+ }
+ return DECLINED;
+}
--- /dev/null
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_http2_mod_http2_h
+#define mod_http2_mod_http2_h
+
+/** The http2_var_lookup() optional function retrieves HTTP2 environment
+ * variables. */
+APR_DECLARE_OPTIONAL_FN(char *, http2_var_lookup,
+ (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *,
+ char *));
+
+/** An optional function which returns non-zero if the given connection
+ * or its master connection is using HTTP/2. */
+APR_DECLARE_OPTIONAL_FN(int, http2_is_h2, (conn_rec *));
+
+#endif