credentials could be refused in case of concurrent accesses from
different users. PR 63124. [Simon Kappel <simon.kappel axis.com>]
+ *) mod_http2: enable re-use of slave connections again. Fixed slave connection
+ keepalives counter. [Stefan Eissing]
+
*) mod_proxy_wstunnel: Fix websocket proxy over UDS.
PR 62932 <pavel dcmsys.com>
(trunk works, modulo CHANGES)
+1: ylavic, icing, jim
- *) mod_http2: enable re-use of slave connections again. Fixed slave connection
- keepalives counter. [Stefan Eissing]
- trunk patch: http://svn.apache.org/r1852038
- http://svn.apache.org/r1852101
- 2.4.x patch: https://svn.apache.org/repos/asf/httpd/httpd/patches/2.4.x/h2-slave-keepalives.patch
- +1: icing, ylavic, jim
-
-
-
PATCHES PROPOSED TO BACKPORT FROM TRUNK:
[ New proposals should be added at the end of the list ]
c->notes = apr_table_make(pool, 5);
c->input_filters = NULL;
c->output_filters = NULL;
+ c->keepalives = 0;
+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
+ c->filter_conn_ctx = NULL;
+#endif
c->bucket_alloc = apr_bucket_alloc_create(pool);
c->data_in_input_filters = 0;
c->data_in_output_filters = 0;
ap_set_module_config(c->conn_config, mpm, cfg);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_stream(%ld-%d): created slave", master->id, slave_id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
+ "h2_slave(%s): created", c->log_id);
return c;
}
void h2_slave_destroy(conn_rec *slave)
{
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
- "h2_stream(%s): destroy slave",
- apr_table_get(slave->notes, H2_TASK_ID_NOTE));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
+ "h2_slave(%s): destroy", slave->log_id);
slave->sbh = NULL;
apr_pool_destroy(slave->pool);
}
slave->keepalive = AP_CONN_CLOSE;
return ap_run_pre_connection(slave, csd);
}
+ ap_assert(slave->output_filters);
return APR_SUCCESS;
}
&& !task->rst_error);
}
- if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
+ task->c = NULL;
+ if (reuse_slave) {
h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
APLOGNO(03385) "h2_task_destroy, reuse slave");
h2_task_destroy(task);
apr_status_t status;
int i, wait_secs = 60;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release", m->id);
/* How to shut down a h2 connection:
* 0. abort and tell the workers that no more tasks will come from us */
m->aborted = 1;
*/
n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): unschedule, resetting task for redo later",
+ stream->task->id);
h2_task_rst(stream->task, H2_ERR_CANCEL);
h2_ihash_add(m->sredo, stream);
--n;
(void)arg;
if (h2_ctx_is_task(ctx)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, pre_connection, found stream task");
+ "h2_slave(%s), pre_connection, adding filters", c->log_id);
ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
void h2_task_destroy(h2_task *task)
{
if (task->output.beam) {
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
h2_beam_destroy(task->output.beam);
task->output.beam = NULL;
}