-*- coding: utf-8 -*-
Changes with Apache 2.4.44
+ *) mod_http2: The module now handles master/secondary connections and has marked
+ methods according to use. [Stefan Eissing]
+
*) core: Drop an invalid Last-Modified header value coming
from a FCGI/CGI script instead of replacing it with Unix epoch.
[Luca Toscano]
PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
[ start all new proposals below, under PATCHES PROPOSED. ]
- *) mod_http2: connection terminology renamed to master/secondary.
- trunk patch: http://svn.apache.org/r1878926
- http://svn.apache.org/r1879156
- 2.4.x patch: https://svn.apache.org/repos/asf/httpd/httpd/patches/2.4.x/h2-master-secondary.patch
- +1: icing, ylavic, minfrin
- ylavic: nitpicking, mixed "H2_secondary_IN" and "H2_secondary_OUT" case to
- register the filters, but not for adding them. IIRC filters names
- are case insentive so shouldn't matter, just popped at my eyes..
- icing: updated patch and added r1879156 to fix the eye bleed.
- jailletc36: CHANGES could also be looked at if it makes sense to update the terminology
- also here
PATCHES PROPOSED TO BACKPORT FROM TRUNK:
[ New proposals should be added at the end of the list ]
ap_register_input_filter("H2_IN", h2_filter_core_input,
NULL, AP_FTYPE_CONNECTION);
- status = h2_mplx_child_init(pool, s);
+ status = h2_mplx_m_child_init(pool, s);
if (status == APR_SUCCESS) {
status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
return DONE;
}
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent)
{
apr_allocator_t *allocator;
apr_status_t status;
ap_assert(master);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
- "h2_stream(%ld-%d): create slave", master->id, slave_id);
+ "h2_stream(%ld-%d): create secondary", master->id, sec_id);
/* We create a pool with its own allocator to be used for
* processing a request. This is the only way to have the processing
status = apr_pool_create_ex(&pool, parent, NULL, allocator);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
- APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
- master->id, slave_id);
+ APLOGNO(10004) "h2_session(%ld-%d): create secondary pool",
+ master->id, sec_id);
return NULL;
}
apr_allocator_owner_set(allocator, pool);
- apr_pool_tag(pool, "h2_slave_conn");
+ apr_pool_tag(pool, "h2_secondary_conn");
c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
if (c == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
- APLOGNO(02913) "h2_session(%ld-%d): create slave",
- master->id, slave_id);
+ APLOGNO(02913) "h2_session(%ld-%d): create secondary",
+ master->id, sec_id);
apr_pool_destroy(pool);
return NULL;
}
c->clogging_input_filters = 1;
c->log = NULL;
c->log_id = apr_psprintf(pool, "%ld-%d",
- master->id, slave_id);
+ master->id, sec_id);
c->aborted = 0;
- /* We cannot install the master connection socket on the slaves, as
+ /* We cannot install the master connection socket on the secondary, as
* modules mess with timeouts/blocking of the socket, with
* unwanted side effects to the master connection processing.
- * Fortunately, since we never use the slave socket, we can just install
+ * Fortunately, since we never use the secondary socket, we can just install
* a single, process-wide dummy and everyone is happy.
*/
ap_set_module_config(c->conn_config, &core_module, dummy_socket);
/* TODO: these should be unique to this thread */
c->sbh = master->sbh;
- /* TODO: not all mpm modules have learned about slave connections yet.
- * copy their config from master to slave.
+ /* TODO: not all mpm modules have learned about secondary connections yet.
+ * copy their config from master to secondary.
*/
if ((mpm = h2_conn_mpm_module()) != NULL) {
cfg = ap_get_module_config(master->conn_config, mpm);
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
- "h2_slave(%s): created", c->log_id);
+ "h2_secondary(%s): created", c->log_id);
return c;
}
-void h2_slave_destroy(conn_rec *slave)
+void h2_secondary_destroy(conn_rec *secondary)
{
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
- "h2_slave(%s): destroy", slave->log_id);
- slave->sbh = NULL;
- apr_pool_destroy(slave->pool);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary,
+ "h2_secondary(%s): destroy", secondary->log_id);
+ secondary->sbh = NULL;
+ apr_pool_destroy(secondary->pool);
}
-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd)
{
- if (slave->keepalives == 0) {
+ if (secondary->keepalives == 0) {
/* Simulate that we had already a request on this connection. Some
* hooks trigger special behaviour when keepalives is 0.
* (Not necessarily in pre_connection, but later. Set it here, so it
* is in place.) */
- slave->keepalives = 1;
+ secondary->keepalives = 1;
/* We signal that this connection will be closed after the request.
* Which is true in that sense that we throw away all traffic data
- * on this slave connection after each requests. Although we might
+ * on this secondary connection after each requests. Although we might
* reuse internal structures like memory pools.
* The wanted effect of this is that httpd does not try to clean up
* any dangling data on this connection when a request is done. Which
* is unnecessary on a h2 stream.
*/
- slave->keepalive = AP_CONN_CLOSE;
- return ap_run_pre_connection(slave, csd);
+ secondary->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(secondary, csd);
}
- ap_assert(slave->output_filters);
+ ap_assert(secondary->output_filters);
return APR_SUCCESS;
}
const char *h2_conn_mpm_name(void);
int h2_mpm_supported(void);
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
-void h2_slave_destroy(conn_rec *slave);
+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent);
+void h2_secondary_destroy(conn_rec *secondary);
-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
-void h2_slave_run_connection(conn_rec *slave);
+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd);
+void h2_secondary_run_connection(conn_rec *secondary);
#endif /* defined(__mod_h2__h2_conn__) */
x.s = s;
x.idx = 0;
bbout(bb, " \"streams\": {");
- h2_mplx_stream_do(s->mplx, add_stream, &x);
+ h2_mplx_m_stream_do(s->mplx, add_stream, &x);
bbout(bb, "\n }%s\n", last? "" : ",");
}
static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
{
h2_mplx *m = task->mplx;
- h2_stream *stream = h2_mplx_stream_get(m, task->stream_id);
+ h2_stream *stream = h2_mplx_t_stream_get(m, task);
h2_session *s;
conn_rec *c;
{
h2_ctx *ctx;
- /* slave connection? */
+ /* secondary connection? */
if (c->master) {
return DECLINED;
}
static int h2_h2_post_read_req(request_rec *r)
{
- /* slave connection? */
+ /* secondary connection? */
if (r->connection->master) {
struct h2_task *task = h2_ctx_get_task(r->connection);
/* This hook will get called twice on internal redirects. Take care
ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
for (f = r->input_filters; f; f = f->next) {
- if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
+ if (!strcmp("H2_SECONDARY_IN", f->frec->name)) {
f->r = r;
break;
}
static int h2_h2_late_fixups(request_rec *r)
{
- /* slave connection? */
+ /* secondary connection? */
if (r->connection->master) {
struct h2_task *task = h2_ctx_get_task(r->connection);
if (task) {
task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
if (task->output.copy_files) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_slave_out(%s): copy_files on", task->id);
+ "h2_secondary_out(%s): copy_files on", task->id);
h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
}
check_push(r, "late_fixup");
apr_size_t count;
} stream_iter_ctx;
-static apr_status_t mplx_be_happy(h2_mplx *m);
-static apr_status_t mplx_be_annoyed(h2_mplx *m);
+/**
+ * Naming convention for static functions:
+ * - m_*: function only called from the master connection
+ * - s_*: function only called from a secondary connection
+ * - t_*: function only called from a h2_task holder
+ * - mst_*: function called from everyone
+ */
-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task);
+static apr_status_t m_be_annoyed(h2_mplx *m);
+
+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
{
return APR_SUCCESS;
}
#define H2_MPLX_LEAVE_MAYBE(m, dolock) \
if (dolock) apr_thread_mutex_unlock(m->lock)
-static void check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked);
+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked);
-static void stream_output_consumed(void *ctx,
- h2_bucket_beam *beam, apr_off_t length)
+static void mst_stream_output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
{
}
-static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
+static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam)
{
h2_stream *stream = ctx;
h2_mplx *m = stream->session->mplx;
apr_atomic_set32(&m->event_pending, 1);
}
-static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
{
h2_stream_in_consumed(ctx, length);
}
-static void stream_joined(h2_mplx *m, h2_stream *stream)
+static void ms_stream_joined(h2_mplx *m, h2_stream *stream)
{
ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
h2_ihash_add(m->spurge, stream);
}
-static void stream_cleanup(h2_mplx *m, h2_stream *stream)
+static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
{
ap_assert(stream->state == H2_SS_CLEANUP);
h2_iq_remove(m->q, stream->id);
if (!h2_task_has_started(stream->task) || stream->task->done_done) {
- stream_joined(m, stream);
+ ms_stream_joined(m, stream);
}
else {
h2_ififo_remove(m->readyq, stream->id);
* their HTTP/1 cousins, the separate allocator seems to work better
* than protecting a shared h2_session one with an own lock.
*/
-h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
- h2_workers *workers)
+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
+ h2_workers *workers)
{
apr_status_t status = APR_SUCCESS;
apr_allocator_t *allocator;
m->s = s;
/* We create a pool with its own allocator to be used for
- * processing slave connections. This is the only way to have the
+ * processing secondary connections. This is the only way to have the
* processing independent of its parent pool in the sense that it
* can work in another thread. Also, the new allocator needs its own
* mutex to synchronize sub-pools.
m->last_mood_change = apr_time_now();
m->mood_update_interval = apr_time_from_msec(100);
- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+ m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*));
}
return m;
}
-int h2_mplx_shutdown(h2_mplx *m)
+int h2_mplx_m_shutdown(h2_mplx *m)
{
int max_stream_started = 0;
return max_stream_started;
}
-static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
+static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream)
{
if (stream->input) {
return h2_beam_report_consumption(stream->input);
return 0;
}
-static int report_consumption_iter(void *ctx, void *val)
+static int m_report_consumption_iter(void *ctx, void *val)
{
h2_stream *stream = val;
h2_mplx *m = ctx;
- input_consumed_signal(m, stream);
+ m_input_consumed_signal(m, stream);
if (stream->state == H2_SS_CLOSED_L
&& (!stream->task || stream->task->worker_done)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
return 1;
}
-static int output_consumed_signal(h2_mplx *m, h2_task *task)
+static int s_output_consumed_signal(h2_mplx *m, h2_task *task)
{
if (task->output.beam) {
return h2_beam_report_consumption(task->output.beam);
return 0;
}
-static int stream_destroy_iter(void *ctx, void *val)
+static int m_stream_destroy_iter(void *ctx, void *val)
{
h2_mplx *m = ctx;
h2_stream *stream = val;
if (stream->input) {
/* Process outstanding events before destruction */
- input_consumed_signal(m, stream);
+ m_input_consumed_signal(m, stream);
h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
h2_beam_destroy(stream->input);
stream->input = NULL;
if (stream->task) {
h2_task *task = stream->task;
- conn_rec *slave;
- int reuse_slave = 0;
+ conn_rec *secondary;
+ int reuse_secondary = 0;
stream->task = NULL;
- slave = task->c;
- if (slave) {
+ secondary = task->c;
+ if (secondary) {
/* On non-serialized requests, the IO logging has not accounted for any
* meta data send over the network: response headers and h2 frame headers. we
* counted this on the stream and need to add this now.
if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
if (unaccounted > 0) {
- h2_task_logio_add_bytes_out(slave, unaccounted);
+ h2_task_logio_add_bytes_out(secondary, unaccounted);
}
}
- if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
- reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
- && !task->rst_error);
+ if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) {
+ reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2))
+ && !task->rst_error);
}
- task->c = NULL;
- if (reuse_slave) {
+ if (reuse_secondary) {
h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
- APLOGNO(03385) "h2_task_destroy, reuse slave");
+ APLOGNO(03385) "h2_task_destroy, reuse secondary");
h2_task_destroy(task);
- APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary;
}
else {
h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
- "h2_task_destroy, destroy slave");
- h2_slave_destroy(slave);
+ "h2_task_destroy, destroy secondary");
+ h2_secondary_destroy(secondary);
}
}
}
return 0;
}
-static void purge_streams(h2_mplx *m, int lock)
+static void m_purge_streams(h2_mplx *m, int lock)
{
if (!h2_ihash_empty(m->spurge)) {
H2_MPLX_ENTER_MAYBE(m, lock);
- while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
+ while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) {
/* repeat until empty */
}
H2_MPLX_LEAVE_MAYBE(m, lock);
void *ctx;
} stream_iter_ctx_t;
-static int stream_iter_wrap(void *ctx, void *stream)
+static int m_stream_iter_wrap(void *ctx, void *stream)
{
stream_iter_ctx_t *x = ctx;
return x->cb(stream, x->ctx);
}
-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
{
stream_iter_ctx_t x;
x.cb = cb;
x.ctx = ctx;
- h2_ihash_iter(m->streams, stream_iter_wrap, &x);
+ h2_ihash_iter(m->streams, m_stream_iter_wrap, &x);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
-static int report_stream_iter(void *ctx, void *val) {
+static int m_report_stream_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
h2_task *task = stream->task;
return 1;
}
-static int unexpected_stream_iter(void *ctx, void *val) {
+static int m_unexpected_stream_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
return 1;
}
-static int stream_cancel_iter(void *ctx, void *val) {
+static int m_stream_cancel_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
h2_stream_rst(stream, H2_ERR_NO_ERROR);
/* All connection data has been sent, simulate cleanup */
h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
- stream_cleanup(m, stream);
+ m_stream_cleanup(m, stream);
return 0;
}
-void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
{
apr_status_t status;
int i, wait_secs = 60, old_aborted;
H2_MPLX_ENTER_ALWAYS(m);
- /* While really terminating any slave connections, treat the master
+ /* While really terminating any secondary connections, treat the master
* connection as aborted. It's not as if we could send any more data
* at this point. */
old_aborted = m->c->aborted;
"h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
m->id, (int)h2_ihash_count(m->streams),
(int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
+ while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
/* until empty */
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
"h2_mplx(%ld): waited %d sec for %d tasks",
m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
- h2_ihash_iter(m->shold, report_stream_iter, m);
+ h2_ihash_iter(m->shold, m_report_stream_iter, m);
}
}
m->join_wait = NULL;
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
"h2_mplx(%ld): unexpected %d streams in hold",
m->id, (int)h2_ihash_count(m->shold));
- h2_ihash_iter(m->shold, unexpected_stream_iter, m);
+ h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
m->c->aborted = old_aborted;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
}
-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream)
{
H2_MPLX_ENTER(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
H2_STRM_MSG(stream, "cleanup"));
- stream_cleanup(m, stream);
+ m_stream_cleanup(m, stream);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
-h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
+h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task)
{
h2_stream *s = NULL;
H2_MPLX_ENTER_ALWAYS(m);
- s = h2_ihash_get(m->streams, id);
+ s = h2_ihash_get(m->streams, task->stream_id);
H2_MPLX_LEAVE(m);
return s;
}
-static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
{
h2_stream *stream = ctx;
h2_mplx *m = stream->session->mplx;
- check_data_for(m, stream, 0);
+ mst_check_data_for(m, stream, 0);
}
-static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
{
h2_stream *stream = h2_ihash_get(m->streams, stream_id);
stream->output = beam;
if (APLOGctrace2(m->c)) {
- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
+ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
"h2_mplx(%s): out open", stream->task->id);
}
- h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
- h2_beam_on_produced(stream->output, output_produced, stream);
+ h2_beam_on_consumed(stream->output, NULL, mst_stream_output_consumed, stream);
+ h2_beam_on_produced(stream->output, mst_output_produced, stream);
if (stream->task->output.copy_files) {
h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
}
/* we might see some file buckets in the output, see
* if we have enough handles reserved. */
- check_data_for(m, stream, 1);
+ mst_check_data_for(m, stream, 1);
return APR_SUCCESS;
}
-apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
{
apr_status_t status;
status = APR_ECONNABORTED;
}
else {
- status = out_open(m, stream_id, beam);
+ status = t_out_open(m, stream_id, beam);
}
H2_MPLX_LEAVE(m);
return status;
}
-static apr_status_t out_close(h2_mplx *m, h2_task *task)
+static apr_status_t s_out_close(h2_mplx *m, h2_task *task)
{
apr_status_t status = APR_SUCCESS;
h2_stream *stream;
return APR_ECONNABORTED;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
"h2_mplx(%s): close", task->id);
status = h2_beam_close(task->output.beam);
- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
- output_consumed_signal(m, task);
- check_data_for(m, stream, 1);
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
+ s_output_consumed_signal(m, task);
+ mst_check_data_for(m, stream, 1);
return status;
}
-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
- apr_thread_cond_t *iowait)
+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ apr_thread_cond_t *iowait)
{
apr_status_t status;
if (m->aborted) {
status = APR_ECONNABORTED;
}
- else if (h2_mplx_has_master_events(m)) {
+ else if (h2_mplx_m_has_master_events(m)) {
status = APR_SUCCESS;
}
else {
- purge_streams(m, 0);
- h2_ihash_iter(m->streams, report_consumption_iter, m);
+ m_purge_streams(m, 0);
+ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
m->added_output = iowait;
status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
if (APLOGctrace2(m->c)) {
return status;
}
-static void check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked)
+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked)
{
/* If m->lock is already held, we must release during h2_ififo_push()
* which can wait on its not_full condition, causing a deadlock because
}
}
-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
return status;
}
-static void register_if_needed(h2_mplx *m)
+static void ms_register_if_needed(h2_mplx *m, int from_master)
{
if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
apr_status_t status = h2_workers_register(m->workers, m);
if (status == APR_SUCCESS) {
m->is_registered = 1;
}
- else {
+ else if (from_master) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
"h2_mplx(%ld): register at workers", m->id);
}
}
}
-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
- h2_stream_pri_cmp *cmp, void *ctx)
+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
h2_ihash_add(m->streams, stream);
if (h2_stream_is_ready(stream)) {
/* already have a response */
- check_data_for(m, stream, 1);
+ mst_check_data_for(m, stream, 1);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
H2_STRM_MSG(stream, "process, add to readyq"));
}
else {
h2_iq_add(m->q, stream->id, cmp, ctx);
- register_if_needed(m);
+ ms_register_if_needed(m, 1);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
H2_STRM_MSG(stream, "process, added to q"));
}
return status;
}
-static h2_task *next_stream_task(h2_mplx *m)
+static h2_task *s_next_stream_task(h2_mplx *m)
{
h2_stream *stream;
int sid;
stream = h2_ihash_get(m->streams, sid);
if (stream) {
- conn_rec *slave, **pslave;
+ conn_rec *secondary, **psecondary;
- pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
- if (pslave) {
- slave = *pslave;
- slave->aborted = 0;
+ psecondary = (conn_rec **)apr_array_pop(m->spare_secondary);
+ if (psecondary) {
+ secondary = *psecondary;
+ secondary->aborted = 0;
}
else {
- slave = h2_slave_create(m->c, stream->id, m->pool);
+ secondary = h2_secondary_create(m->c, stream->id, m->pool);
}
if (!stream->task) {
m->max_stream_started = sid;
}
if (stream->input) {
- h2_beam_on_consumed(stream->input, stream_input_ev,
- stream_input_consumed, stream);
+ h2_beam_on_consumed(stream->input, mst_stream_input_ev,
+ m_stream_input_consumed, stream);
}
- stream->task = h2_task_create(slave, stream->id,
+ stream->task = h2_task_create(secondary, stream->id,
stream->request, m, stream->input,
stream->session->s->timeout,
m->stream_max_mem);
if (!stream->task) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary,
H2_STRM_LOG(APLOGNO(02941), stream,
"create task"));
return NULL;
return NULL;
}
-apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
{
apr_status_t rv = APR_EOF;
rv = APR_EOF;
}
else {
- *ptask = next_stream_task(m);
+ *ptask = s_next_stream_task(m);
rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
}
if (APR_EAGAIN != rv) {
return rv;
}
-static void task_done(h2_mplx *m, h2_task *task)
+static void s_task_done(h2_mplx *m, h2_task *task)
{
h2_stream *stream;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_mplx(%ld): task(%s) done", m->id, task->id);
- out_close(m, task);
+ s_out_close(m, task);
task->worker_done = 1;
task->done_at = apr_time_now();
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
"h2_mplx(%s): request done, %f ms elapsed", task->id,
(task->done_at - task->started_at) / 1000.0);
if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
- mplx_be_happy(m);
+ s_mplx_be_happy(m, task);
}
ap_assert(task->done_done == 0);
/* reset and schedule again */
h2_task_redo(task);
h2_iq_add(m->q, stream->id, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
H2_STRM_MSG(stream, "redo, added to q"));
}
else {
/* stream not cleaned up, stay around */
task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
H2_STRM_MSG(stream, "task_done, stream open"));
if (stream->input) {
h2_beam_leave(stream->input);
}
/* more data will not arrive, resume the stream */
- check_data_for(m, stream, 1);
+ mst_check_data_for(m, stream, 1);
}
}
else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
/* stream is done, was just waiting for this. */
task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
H2_STRM_MSG(stream, "task_done, in hold"));
if (stream->input) {
h2_beam_leave(stream->input);
}
- stream_joined(m, stream);
+ ms_stream_joined(m, stream);
}
else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,
H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
ap_assert("stream should not be in spurge" == NULL);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
"h2_mplx(%s): task_done, stream not found",
task->id);
ap_assert("stream should still be available" == NULL);
}
}
-void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
{
H2_MPLX_ENTER_ALWAYS(m);
--m->tasks_active;
- task_done(m, task);
+ s_task_done(m, task);
if (m->join_wait) {
apr_thread_cond_signal(m->join_wait);
}
if (ptask) {
/* caller wants another task */
- *ptask = next_stream_task(m);
+ *ptask = s_next_stream_task(m);
}
- register_if_needed(m);
+ ms_register_if_needed(m, 0);
H2_MPLX_LEAVE(m);
}
* h2_mplx DoS protection
******************************************************************************/
-static int timed_out_busy_iter(void *data, void *val)
+static int m_timed_out_busy_iter(void *data, void *val)
{
stream_iter_ctx *ctx = data;
h2_stream *stream = val;
return 1;
}
-static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
+static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m)
{
stream_iter_ctx ctx;
ctx.m = m;
ctx.stream = NULL;
ctx.now = apr_time_now();
- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+ h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx);
return ctx.stream;
}
-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+static int m_latest_repeatable_unsubmitted_iter(void *data, void *val)
{
stream_iter_ctx *ctx = data;
h2_stream *stream = val;
return 1;
}
-static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
+static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
{
stream_iter_ctx ctx;
ctx.m = m;
ctx.stream = NULL;
ctx.count = 0;
- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+ h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx);
if (m->tasks_active - ctx.count > m->limit_active) {
/* we are above the limit of running tasks, accounting for the ones
* already throttled. */
return APR_EAGAIN;
}
/* above limit, be seeing no candidate for easy throttling */
- if (get_timed_out_busy_stream(m)) {
+ if (m_get_timed_out_busy_stream(m)) {
/* Too many busy workers, unable to cancel enough streams
* and with a busy, timed out stream, we tell the client
* to go away... */
return APR_SUCCESS;
}
-static apr_status_t unschedule_slow_tasks(h2_mplx *m)
+static apr_status_t m_unschedule_slow_tasks(h2_mplx *m)
{
h2_task *task;
apr_status_t rv;
/* Try to get rid of streams that occupy workers. Look for safe requests
* that are repeatable. If none found, fail the connection.
*/
- while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
+ while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
"h2_mplx(%s): unschedule, resetting task for redo later",
task->id);
return rv;
}
-static apr_status_t mplx_be_happy(h2_mplx *m)
+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task)
{
apr_time_t now;
m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
m->last_mood_change = now;
m->irritations_since = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_mplx(%ld): mood update, increasing worker limit to %d",
m->id, m->limit_active);
}
return APR_SUCCESS;
}
-static apr_status_t mplx_be_annoyed(h2_mplx *m)
+static apr_status_t m_be_annoyed(h2_mplx *m)
{
apr_status_t status = APR_SUCCESS;
apr_time_t now;
}
if (m->tasks_active > m->limit_active) {
- status = unschedule_slow_tasks(m);
+ status = m_unschedule_slow_tasks(m);
}
return status;
}
-apr_status_t h2_mplx_idle(h2_mplx *m)
+apr_status_t h2_mplx_m_idle(h2_mplx *m)
{
apr_status_t status = APR_SUCCESS;
apr_size_t scount;
* of busy workers we allow for this connection until it
* well behaves.
*/
- status = mplx_be_annoyed(m);
+ status = m_be_annoyed(m);
}
else if (!h2_iq_empty(m->q)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
h2_beam_is_closed(stream->output),
(long)h2_beam_get_buffered(stream->output));
h2_ihash_add(m->streams, stream);
- check_data_for(m, stream, 1);
+ mst_check_data_for(m, stream, 1);
stream->out_checked = 1;
status = APR_EAGAIN;
}
}
}
}
- register_if_needed(m);
+ ms_register_if_needed(m, 1);
H2_MPLX_LEAVE(m);
return status;
* mplx master events dispatching
******************************************************************************/
-int h2_mplx_has_master_events(h2_mplx *m)
+int h2_mplx_m_has_master_events(h2_mplx *m)
{
return apr_atomic_read32(&m->event_pending) > 0;
}
-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
- stream_ev_callback *on_resume,
- void *on_ctx)
+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
+ void *on_ctx)
{
h2_stream *stream;
int n, id;
apr_atomic_set32(&m->event_pending, 0);
/* update input windows for streams */
- h2_ihash_iter(m->streams, report_consumption_iter, m);
- purge_streams(m, 1);
+ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
+ m_purge_streams(m, 1);
n = h2_ififo_count(m->readyq);
while (n > 0
return APR_SUCCESS;
}
-apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream)
{
- check_data_for(m, stream, 0);
+ mst_check_data_for(m, stream, 0);
return APR_SUCCESS;
}
-int h2_mplx_awaits_data(h2_mplx *m)
+int h2_mplx_m_awaits_data(h2_mplx *m)
{
int waiting = 1;
return waiting;
}
-apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
{
h2_stream *stream;
apr_status_t status = APR_SUCCESS;
H2_MPLX_ENTER_ALWAYS(m);
stream = h2_ihash_get(m->streams, stream_id);
if (stream && stream->task) {
- status = mplx_be_annoyed(m);
+ status = m_be_annoyed(m);
}
H2_MPLX_LEAVE(m);
return status;
* queued in the multiplexer. If a task thread tries to write more
* data, it is blocked until space becomes available.
*
- * Writing input is never blocked. In order to use flow control on the input,
- * the mplx can be polled for input data consumption.
+ * Naming Convention:
+ * "h2_mplx_m_" are methods only to be called by the main connection
+ * "h2_mplx_s_" are method only to be called by a secondary connection
+ * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary)
*/
struct apr_pool_t;
apr_size_t stream_max_mem;
apr_pool_t *spare_io_pool;
- apr_array_header_t *spare_slaves; /* spare slave connections */
+ apr_array_header_t *spare_secondary; /* spare secondary connections */
struct h2_workers *workers;
};
-
-
/*******************************************************************************
- * Object lifecycle and information.
+ * From the main connection processing: h2_mplx_m_*
******************************************************************************/
-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s);
/**
* Create the multiplexer for the given HTTP2 session.
* Implicitly has reference count 1.
*/
-h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master,
- struct h2_workers *workers);
+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master,
+ struct h2_workers *workers);
/**
* Decreases the reference counter of this mplx and waits for it
* @param m the mplx to be released and destroyed
* @param wait condition var to wait on for ref counter == 0
*/
-void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
-
-apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
-
-void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
+void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
/**
* Shut down the multiplexer gracefully. Will no longer schedule new streams
* but let the ongoing ones finish normally.
* @return the highest stream id being/been processed
*/
-int h2_mplx_shutdown(h2_mplx *m);
-
-int h2_mplx_is_busy(h2_mplx *m);
-
-/*******************************************************************************
- * IO lifetime of streams.
- ******************************************************************************/
-
-struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
+int h2_mplx_m_shutdown(h2_mplx *m);
/**
* Notifies mplx that a stream has been completely handled on the main
* @param m the mplx itself
* @param stream the stream ready for cleanup
*/
-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
/**
* Waits on output data from any stream in this session to become available.
* Returns APR_TIMEUP if no data arrived in the given time.
*/
-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
- struct apr_thread_cond_t *iowait);
-
-apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ struct apr_thread_cond_t *iowait);
-/*******************************************************************************
- * Stream processing.
- ******************************************************************************/
+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream);
/**
* Process a stream request.
* @param cmp the stream priority compare function
* @param ctx context data for the compare function
*/
-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
- h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx);
/**
* Stream priorities have changed, reschedule pending requests.
* @param cmp the stream priority compare function
* @param ctx context data for the compare function
*/
-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
* Check if the multiplexer has events for the master connection pending.
* @return != 0 iff there are events pending
*/
-int h2_mplx_has_master_events(h2_mplx *m);
+int h2_mplx_m_has_master_events(h2_mplx *m);
/**
* Dispatch events for the master connection, such as
* @param on_resume new output data has arrived for a suspended stream
* @param ctx user supplied argument to invocation.
*/
-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
- stream_ev_callback *on_resume,
- void *ctx);
+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
+ void *ctx);
-int h2_mplx_awaits_data(h2_mplx *m);
+int h2_mplx_m_awaits_data(h2_mplx *m);
typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
-apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
-
-/*******************************************************************************
- * Output handling of streams.
- ******************************************************************************/
+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id);
/**
- * Opens the output for the given stream with the specified response.
+ * Master connection has entered idle mode.
+ * @param m the mplx instance of the master connection
+ * @return != SUCCESS iff connection should be terminated
*/
-apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
- struct h2_bucket_beam *beam);
+apr_status_t h2_mplx_m_idle(h2_mplx *m);
/*******************************************************************************
- * h2_mplx list Manipulation.
+ * From a secondary connection processing: h2_mplx_s_*
******************************************************************************/
+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask);
+void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
-/**
- * The magic pointer value that indicates the head of a h2_mplx list
- * @param b The mplx list
- * @return The magic pointer value
- */
-#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link)
-
-/**
- * Determine if the mplx list is empty
- * @param b The list to check
- * @return true or false
- */
-#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link)
-
-/**
- * Return the first mplx in a list
- * @param b The list to query
- * @return The first mplx in the list
- */
-#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b)
-
-/**
- * Return the last mplx in a list
- * @param b The list to query
- * @return The last mplx int he list
- */
-#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b)
-
-/**
- * Insert a single mplx at the front of a list
- * @param b The list to add to
- * @param e The mplx to insert
- */
-#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \
-h2_mplx *ap__b = (e); \
-APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \
-} while (0)
-
-/**
- * Insert a single mplx at the end of a list
- * @param b The list to add to
- * @param e The mplx to insert
- */
-#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \
-h2_mplx *ap__b = (e); \
-APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
-} while (0)
+/*******************************************************************************
+ * From a h2_task owner: h2_mplx_s_*
+ * (a task is transfered from master to secondary connection and back in
+ * its normal lifetime).
+ ******************************************************************************/
/**
- * Get the next mplx in the list
- * @param e The current mplx
- * @return The next mplx
- */
-#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link)
-/**
- * Get the previous mplx in the list
- * @param e The current mplx
- * @return The previous mplx
+ * Opens the output for the given stream with the specified response.
*/
-#define H2_MPLX_PREV(e) APR_RING_PREV((e), link)
+apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id,
+ struct h2_bucket_beam *beam);
/**
- * Remove a mplx from its list
- * @param e The mplx to remove
+ * Get the stream that belongs to the given task.
*/
-#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link)
-
-/*******************************************************************************
- * h2_mplx DoS protection
- ******************************************************************************/
+struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task);
-/**
- * Master connection has entered idle mode.
- * @param m the mplx instance of the master connection
- * @return != SUCCESS iff connection should be terminated
- */
-apr_status_t h2_mplx_idle(h2_mplx *m);
#endif /* defined(__mod_h2__h2_mplx__) */
if (r->method_number == M_GET && r->method[0] == 'H') {
r->header_only = 1;
}
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ req->method, req->path ? req->path : "");
+ r->headers_in = apr_table_clone(r->pool, req->headers);
rpath = (req->path ? req->path : "");
ap_parse_uri(r, rpath);
*/
r->hostname = NULL;
ap_update_vhost_from_headers(r);
-
+ r->protocol = "HTTP/2.0";
+ r->proto_num = HTTP_VERSION(2, 0);
+
/* we may have switched to another server */
r->per_dir_config = r->server->lookup_defaults;
static void cleanup_unprocessed_streams(h2_session *session)
{
- h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
+ h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session);
}
static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
else {
/* A stream reset on a request it sent us. Could happen in a browser
* when the user navigates away or cancels loading - maybe. */
- h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
+ h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id);
++session->streams_reset;
}
break;
}
static int h2_session_continue_data(h2_session *session) {
- if (h2_mplx_has_master_events(session->mplx)) {
+ if (h2_mplx_m_has_master_events(session->mplx)) {
return 0;
}
if (h2_conn_io_needs_flush(&session->io)) {
* Remove all streams greater than this number without submitting
* a RST_STREAM frame, since that should be clear from the GOAWAY
* we send. */
- session->local.accepted_max = h2_mplx_shutdown(session->mplx);
+ session->local.accepted_max = h2_mplx_m_shutdown(session->mplx);
session->local.error = error;
}
else {
}
transit(session, trigger, H2_SESSION_ST_CLEANUP);
- h2_mplx_release_and_join(session->mplx, session->iowait);
+ h2_mplx_m_release_and_join(session->mplx, session->iowait);
session->mplx = NULL;
ap_assert(session->ngh2);
/* if the session is still there, now is the last chance
* to perform cleanup. Normally, cleanup should have happened
* earlier in the connection pre_close. Main reason is that
- * any ongoing requests on slave connections might still access
+ * any ongoing requests on secondary connections might still access
* data which has, at this time, already been freed. An example
* is mod_ssl that uses request hooks. */
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
session->monitor->on_state_event = on_stream_state_event;
session->monitor->on_event = on_stream_event;
- session->mplx = h2_mplx_create(c, s, session->pool, workers);
+ session->mplx = h2_mplx_m_create(c, s, session->pool, workers);
/* connection input filter that feeds the session */
session->cin = h2_filter_cin_create(session);
if (stream) {
ap_assert(!stream->scheduled);
if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
- h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
+ h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session);
}
else {
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
session->open_streams);
h2_conn_io_flush(&session->io);
if (session->open_streams > 0) {
- if (h2_mplx_awaits_data(session->mplx)) {
+ if (h2_mplx_m_awaits_data(session->mplx)) {
/* waiting for at least one stream to produce data */
transit(session, "no io", H2_SESSION_ST_WAIT);
}
break;
case H2_SS_CLEANUP:
nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
- h2_mplx_stream_cleanup(session->mplx, stream);
+ h2_mplx_m_stream_cleanup(session->mplx, stream);
break;
default:
break;
static apr_status_t dispatch_master(h2_session *session) {
apr_status_t status;
- status = h2_mplx_dispatch_master_events(session->mplx,
+ status = h2_mplx_m_dispatch_master_events(session->mplx,
on_stream_resume, session);
if (status == APR_EAGAIN) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
session->have_read = 1;
}
else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
- status = h2_mplx_idle(session->mplx);
+ status = h2_mplx_m_idle(session->mplx);
if (status == APR_EAGAIN) {
break;
}
/* We wait in smaller increments, using a 1 second timeout.
* That gives us the chance to check for MPMQ_STOPPING often.
*/
- status = h2_mplx_idle(session->mplx);
+ status = h2_mplx_m_idle(session->mplx);
if (status == APR_EAGAIN) {
break;
}
"h2_session: wait for data, %ld micros",
(long)session->wait_us);
}
- status = h2_mplx_out_trywait(session->mplx, session->wait_us,
+ status = h2_mplx_m_out_trywait(session->mplx, session->wait_us,
session->iowait);
if (status == APR_SUCCESS) {
session->wait_us = 0;
dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
}
if (session->reprioritize) {
- h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
+ h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session);
session->reprioritize = 0;
}
}
const char *last_status_msg; /* the one already reported */
struct h2_iqueue *in_pending; /* all streams with input pending */
- struct h2_iqueue *in_process; /* all streams ready for processing on slave */
+ struct h2_iqueue *in_process; /* all streams ready for processing on a secondary */
} h2_session;
if (status == APR_EAGAIN) {
/* TODO: ugly, someone needs to retrieve the response first */
- h2_mplx_keep_active(stream->session->mplx, stream);
+ h2_mplx_m_keep_active(stream->session->mplx, stream);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
H2_STRM_MSG(stream, "prep, response eagain"));
return status;
task->request->authority,
task->request->path);
task->output.opened = 1;
- return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
+ return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam);
}
static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
* request_rec out filter chain) into the h2_mplx for further sending
* on the master connection.
*/
-static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
- apr_bucket_brigade* bb)
+static apr_status_t secondary_out(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb)
{
apr_bucket *b;
apr_status_t rv = APR_SUCCESS;
if (APR_SUCCESS == rv) {
/* could not write all, buffer the rest */
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
- "h2_slave_out(%s): saving brigade", task->id);
+ "h2_secondary_out(%s): saving brigade", task->id);
ap_assert(NULL);
rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
flush = 1;
}
out:
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
- "h2_slave_out(%s): slave_out leave", task->id);
+ "h2_secondary_out(%s): secondary_out leave", task->id);
return rv;
}
}
/*******************************************************************************
- * task slave connection filters
+ * task secondary connection filters
******************************************************************************/
-static apr_status_t h2_filter_slave_in(ap_filter_t* f,
- apr_bucket_brigade* bb,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
+static apr_status_t h2_filter_secondary_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
{
h2_task *task;
apr_status_t status = APR_SUCCESS;
if (trace1) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld",
+ "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld",
task->id, mode, block, (long)readbytes);
}
/* Get more input data for our request. */
if (trace1) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_slave_in(%s): get more data from mplx, block=%d, "
+ "h2_secondary_in(%s): get more data from mplx, block=%d, "
"readbytes=%ld", task->id, block, (long)readbytes);
}
if (task->input.beam) {
if (trace1) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
- "h2_slave_in(%s): read returned", task->id);
+ "h2_secondary_in(%s): read returned", task->id);
}
if (APR_STATUS_IS_EAGAIN(status)
&& (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
if (APR_BRIGADE_EMPTY(task->input.bb)) {
if (trace1) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_slave_in(%s): no data", task->id);
+ "h2_secondary_in(%s): no data", task->id);
}
return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
}
buffer[len] = 0;
if (trace1) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_slave_in(%s): getline: %s",
+ "h2_secondary_in(%s): getline: %s",
task->id, buffer);
}
}
* to support it. Seems to work. */
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
APLOGNO(03472)
- "h2_slave_in(%s), unsupported READ mode %d",
+ "h2_secondary_in(%s), unsupported READ mode %d",
task->id, mode);
status = APR_ENOTIMPL;
}
if (trace1) {
apr_brigade_length(bb, 0, &bblen);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
+ "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen);
}
return status;
}
-static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
- apr_bucket_brigade* brigade)
+static apr_status_t h2_filter_secondary_output(ap_filter_t* filter,
+ apr_bucket_brigade* brigade)
{
h2_task *task = h2_ctx_get_task(filter->c);
apr_status_t status;
ap_assert(task);
- status = slave_out(task, filter, brigade);
+ status = secondary_out(task, filter, brigade);
if (status != APR_SUCCESS) {
h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
}
ap_hook_process_connection(h2_task_process_conn,
NULL, NULL, APR_HOOK_FIRST);
- ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
+ ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in,
NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
+ ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output,
NULL, AP_FTYPE_NETWORK);
ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
NULL, AP_FTYPE_NETWORK);
(void)arg;
if (ctx->task) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_slave(%s), pre_connection, adding filters", c->log_id);
- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
+ "h2_secondary(%s), pre_connection, adding filters", c->log_id);
+ ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c);
ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
+ ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c);
}
return OK;
}
-h2_task *h2_task_create(conn_rec *slave, int stream_id,
+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
const h2_request *req, h2_mplx *m,
h2_bucket_beam *input,
apr_interval_time_t timeout,
apr_pool_t *pool;
h2_task *task;
- ap_assert(slave);
+ ap_assert(secondary);
ap_assert(req);
- apr_pool_create(&pool, slave->pool);
+ apr_pool_create(&pool, secondary->pool);
apr_pool_tag(pool, "h2_task");
task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
}
task->id = "000";
task->stream_id = stream_id;
- task->c = slave;
+ task->c = secondary;
task->mplx = m;
task->pool = pool;
task->request = req;
*
* Each conn_rec->id is supposed to be unique at a point in time. Since
* some modules (and maybe external code) uses this id as an identifier
- * for the request_rec they handle, it needs to be unique for slave
+ * for the request_rec they handle, it needs to be unique for secondary
* connections also.
*
* The MPM module assigns the connection ids and mod_unique_id is using
* works for HTTP/1.x, the parallel execution of several requests per
* connection will generate duplicate identifiers on load.
*
- * The original implementation for slave connection identifiers used
+ * The original implementation for secondary connection identifiers used
* to shift the master connection id up and assign the stream id to the
* lower bits. This was cramped on 32 bit systems, but on 64bit there was
* enough space.
h2_ctx_create_for(c, task);
apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
- h2_slave_run_pre_connection(c, ap_get_conn_socket(c));
+ h2_secondary_run_pre_connection(c, ap_get_conn_socket(c));
task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
if (task->request->serialize) {
}
else {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "slave_conn(%ld): has no task", c->id);
+ "secondary_conn(%ld): has no task", c->id);
}
return DECLINED;
}
apr_bucket *eor;
};
-h2_task *h2_task_create(conn_rec *slave, int stream_id,
+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
const h2_request *req, struct h2_mplx *m,
struct h2_bucket_beam *input,
apr_interval_time_t timeout,
{
apr_status_t rv;
- rv = h2_mplx_pop_task(m, &slot->task);
+ rv = h2_mplx_s_pop_task(m, &slot->task);
if (slot->task) {
/* Ok, we got something to give back to the worker for execution.
* If we still have idle workers, we let the worker be sticky,
* mplx the opportunity to give us back a new task right away.
*/
if (!slot->aborted && (--slot->sticks > 0)) {
- h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
+ h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task);
}
else {
- h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
+ h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL);
slot->task = NULL;
}
}
if (ctx) {
if (r) {
if (ctx->task) {
- h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
if (stream && stream->push_policy != H2_PUSH_NONE) {
return "on";
}
{
if (ctx) {
if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
- h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
if (stream) {
return apr_itoa(p, stream->initiated_on);
}