Changes with Apache 2.4.21
+ *) mod_http2: Fix async write issue that led to selection of wrong timeout
+ vs. keepalive timeout selection for idle sessions. [Stefan Eissing]
+
+ *) mod_http2: checking LimitRequestLine, LimitRequestFields and
+ LimitRequestFieldSize configurated values for incoming streams. Returning
+ HTTP status 431 for too long/many headers fields and 414 for a too long
+ pseudo header. [Stefan Eissing]
+
+ *) mod_http2: tracking conn_rec->current_thread on slave connections, so
+ that mod_lua finds the correct one. Fixes PR 59542. [Stefan Eissing]
+
*) mod_proxy_http2: new experimental http2 proxy module for h2: and h2c: proxy
urls. Part of the httpd mod_proxy framework, common settings apply.
Requests from the same HTTP/2 frontend connection against the same backend
@echo $(DL)GEN $@$(DL)
@echo $(DL) (HTTP2)$(DL) > $@
@echo $(DL) http2_module,$(DL) >> $@
+ @echo $(DL) h2_casecmpstrn,$(DL) >> $@
@echo $(DL) h2_ihash_add,$(DL) >> $@
@echo $(DL) h2_ihash_clear,$(DL) >> $@
@echo $(DL) h2_ihash_count,$(DL) >> $@
}
}
-static void set_basic_http_header(request_rec *r, apr_table_t *headers)
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
{
char *date = NULL;
const char *proxy_date = NULL;
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
- if (r->proxyreq != PROXYREQ_NONE) {
+ if (r && r->proxyreq != PROXYREQ_NONE) {
proxy_date = apr_table_get(r->headers_out, "Date");
if (!proxy_date) {
/*
* our own Date header and pass it over to proxy_date later to
* avoid a compiler warning.
*/
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
}
server = apr_table_get(r->headers_out, "Server");
}
else {
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r? r->request_time : apr_time_now());
}
apr_table_setn(headers, "Date", proxy_date ? proxy_date : date );
- apr_table_unset(r->headers_out, "Date");
+ if (r) {
+ apr_table_unset(r->headers_out, "Date");
+ }
if (!server && *us) {
server = us;
}
if (server) {
apr_table_setn(headers, "Server", server);
- apr_table_unset(r->headers_out, "Server");
+ if (r) {
+ apr_table_unset(r->headers_out, "Server");
+ }
}
}
headers = apr_table_make(r->pool, 10);
- set_basic_http_header(r, headers);
+ h2_from_h1_set_basic_http_header(headers, r, r->pool);
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
(void *) headers, r->headers_out,
apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb);
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool);
+
#endif /* defined(__mod_h2__h2_from_h1__) */
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->q = h2_iq_create(m->pool, m->max_streams);
m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
- m->ready_tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
m->stream_timeout = stream_timeout;
m->workers = workers;
&& !task->rst_error);
h2_ihash_remove(m->tasks, task->stream_id);
- h2_ihash_remove(m->ready_tasks, task->stream_id);
if (m->redo_tasks) {
h2_ihash_remove(m->redo_tasks, task->stream_id);
}
* stream destruction until the task is done.
*/
h2_iq_remove(m->q, stream->id);
- h2_ihash_remove(m->ready_tasks, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
h2_ihash_remove(m->streams, stream->id);
if (stream->input) {
m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
return status;
}
-static int task_iter_first(void *ctx, void *val)
+static int stream_iter_first(void *ctx, void *val)
{
- task_iter_ctx *tctx = ctx;
- h2_task *task = val;
- tctx->task = task;
+ h2_stream **pstream = ctx;
+ *pstream = val;
return 0;
}
AP_DEBUG_ASSERT(m);
if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- task_iter_ctx ctx;
- ctx.m = m;
- ctx.task = NULL;
- h2_ihash_iter(m->ready_tasks, task_iter_first, &ctx);
-
- if (ctx.task && !m->aborted) {
- h2_task *task = ctx.task;
-
- h2_ihash_remove(m->ready_tasks, task->stream_id);
- stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream && task) {
+ h2_ihash_iter(m->sready, stream_iter_first, &stream);
+ if (stream) {
+ h2_task *task = h2_ihash_get(m->tasks, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
+ if (task) {
task->submitted = 1;
if (task->rst_error) {
h2_stream_rst(stream, task->rst_error);
task->output.beam);
}
}
- else if (task) {
- /* We have the io ready, but the stream has gone away, maybe
- * reset by the client. Should no longer happen since such
- * streams should clear io's from the ready queue.
- */
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03347)
- "h2_mplx(%s): stream for response closed, "
- "resetting io to close request processing",
- task->id);
- h2_task_rst(task, H2_ERR_STREAM_CLOSED);
- if (!task->worker_started || task->worker_done) {
- task_destroy(m, task, 1);
- }
- else {
- /* hang around until the h2_task is done, but
- * shutdown output */
- h2_task_shutdown(task, 0);
- }
+ else {
+ /* We have the stream ready without a task. This happens
+ * when we fail streams early. A response should already
+ * be present. */
+ AP_DEBUG_ASSERT(stream->response || stream->rst_error);
}
}
leave_mutex(m, acquired);
h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m);
}
- h2_ihash_add(m->ready_tasks, task);
+ h2_ihash_add(m->sready, stream);
if (response && response->http_status < 300) {
/* we might see some file buckets in the output, see
* if we have enough handles reserved. */
if (!task->response && !task->rst_error) {
/* In case a close comes before a response was created,
- * insert an error one so that our streams can properly
- * reset.
+ * insert an error one so that our streams can properly reset.
*/
- h2_response *r = h2_response_die(task->stream_id, APR_EGENERAL,
+ h2_response *r = h2_response_die(task->stream_id, 500,
task->request, m->pool);
status = out_open(m, task->stream_id, r);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
if (m->aborted) {
status = APR_ECONNABORTED;
}
+ else if (stream->response) {
+ /* already have a respone, schedule for submit */
+ h2_ihash_add(m->sready, stream);
+ }
else {
h2_beam_create(&stream->input, stream->pool, stream->id,
"input", 0);
unsigned int need_registration : 1;
struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *sready; /* all streams ready for response */
struct h2_ihash_t *shold; /* all streams done with task ongoing */
struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
struct h2_iqueue *q; /* all stream ids that need to be started */
struct h2_ihash_t *tasks; /* all tasks started and not destroyed */
- struct h2_ihash_t *ready_tasks; /* all tasks ready for submit */
struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
apr_uint32_t max_streams; /* max # of concurrent streams */
ngn->no_live++;
*ptask = entry->task;
entry->task->assigned = ngn;
+ /* task will now run in ngn's own thread. Modules like lua
+ * seem to require the correct thread set in the conn_rec.
+ * See PR 59542. */
+ if (entry->task->c && ngn->c) {
+ entry->task->c->current_thread = ngn->c->current_thread;
+ }
return APR_SUCCESS;
}
{
apr_table_t *headers = apr_table_make(pool, 5);
char *date = NULL;
+ int status = (type >= 200 && type < 600)? type : 500;
date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, req->request_time);
apr_table_setn(headers, "Date", date);
apr_table_setn(headers, "Server", ap_get_server_banner());
- return h2_response_create_int(stream_id, 0, 500, headers, NULL, pool);
+ return h2_response_create_int(stream_id, 0, status, headers, NULL, pool);
}
h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
stream = h2_stream_open(stream_id, stream_pool, session,
initiated_on, req);
- ++session->open_streams;
++session->unanswered_streams;
nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
h2_ihash_add(session->streams, stream);
session->remote.emitted_max = stream->id;
}
}
+ dispatch_event(session, H2_SESSION_EV_STREAM_OPEN, 0, NULL);
return stream;
}
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063)
- "h2_session(%ld): recv unknown FRAME[%s], frames=%ld/%ld (r/s)",
+ "h2_session(%ld): recv invalid FRAME[%s], frames=%ld/%ld (r/s)",
session->id, buffer, (long)session->frames_received,
(long)session->frames_sent);
}
h2_stream * stream;
apr_status_t status;
- (void)ngh2;
(void)flags;
if (!is_accepting_streams(session)) {
/* just ignore */
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02920)
- "h2_session: stream(%ld-%d): on_header for unknown stream",
+ "h2_session: stream(%ld-%d): on_header unknown stream",
session->id, (int)frame->hd.stream_id);
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
status = h2_stream_add_header(stream, (const char *)name, namelen,
(const char *)value, valuelen);
-
- if (status != APR_SUCCESS) {
+ if (status != APR_SUCCESS && !stream->response) {
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
return 0;
stream->id, err);
}
+ stream->submitted = 1;
--session->unanswered_streams;
if (stream->request && stream->request->initiated_on) {
++session->pushes_submitted;
else {
++session->responses_submitted;
}
-
+
if (nghttp2_is_fatal(rv)) {
status = APR_EGENERAL;
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
"h2_stream(%ld-%d): EOS bucket cleanup -> done",
session->id, stream->id);
h2_ihash_remove(session->streams, stream->id);
- --session->open_streams;
--session->unanswered_streams;
h2_mplx_stream_done(session->mplx, stream);
+ dispatch_event(session, H2_SESSION_EV_STREAM_DONE, 0, NULL);
return APR_SUCCESS;
}
* CPU cycles. Ideally, we'd like to do a blocking read, but that
* is not possible if we have scheduled tasks and wait
* for them to produce something. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): NO_IO event, %d streams open",
+ session->id, session->open_streams);
if (!session->open_streams) {
if (!is_accepting_streams(session)) {
/* We are no longer accepting new streams and have
apr_time_t now = apr_time_now();
/* When we have no streams, no task event are possible,
* switch to blocking reads */
- transit(session, "no io", H2_SESSION_ST_IDLE);
+ transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
session->idle_until = (session->remote.emitted_count?
session->s->keep_alive_timeout :
session->s->timeout) + now;
}
else if (!has_unsubmitted_streams(session)
&& !has_suspended_streams(session)) {
+ transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
+ session->idle_until = apr_time_now() + session->s->timeout;
+ session->keep_sync_until = session->idle_until;
/* none of our streams is waiting for a response or
* new output data from task processing,
* switch to blocking reads. We are probably waiting on
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
return;
}
- transit(session, "no io", H2_SESSION_ST_IDLE);
- session->idle_until = apr_time_now() + session->s->timeout;
- session->keep_sync_until = session->idle_until;
}
else {
/* Unable to do blocking reads, as we wait on events from
}
}
+static void h2_session_ev_stream_open(h2_session *session, int arg, const char *msg)
+{
+ ++session->open_streams;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 1) {
+ /* enter tiomeout, since we have a stream again */
+ session->idle_until = (session->s->timeout + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_stream_done(h2_session *session, int arg, const char *msg)
+{
+ --session->open_streams;
+ if (session->open_streams <= 0) {
+ }
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 0) {
+ /* enter keepalive timeout, since we no longer have streams */
+ session->idle_until = (session->s->keep_alive_timeout
+ + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int arg, const char *msg)
{
case H2_SESSION_EV_PRE_CLOSE:
h2_session_ev_pre_close(session, arg, msg);
break;
+ case H2_SESSION_EV_STREAM_OPEN:
+ h2_session_ev_stream_open(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_DONE:
+ h2_session_ev_stream_done(session, arg, msg);
+ break;
default:
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_session(%ld): unknown event %d",
/* make certain, we send everything before we idle */
if (!session->keep_sync_until && async && !session->open_streams
&& !session->r && session->remote.emitted_count) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): async idle, nonblock read", session->id);
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): async idle, nonblock read, "
+ "%d streams open", session->id,
+ session->open_streams);
/* We do not return to the async mpm immediately, since under
* load, mpms show the tendency to throw keep_alive connections
* away very rapidly.
}
}
else {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): sync idle, stutter 1-sec, "
+ "%d streams open", session->id,
+ session->open_streams);
/* We wait in smaller increments, using a 1 second timeout.
* That gives us the chance to check for MPMQ_STOPPING often.
*/
session->keep_sync_until = 0;
}
if (now > session->idle_until) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive timeout",
+ session->id);
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
}
+ else {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive, %f sec left",
+ session->id, (session->idle_until - now) / 1000000.0f);
+ }
/* continue reading handling */
}
else {
H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
+ H2_SESSION_EV_STREAM_OPEN, /* stream has been opened */
+ H2_SESSION_EV_STREAM_DONE, /* stream has been handled completely */
} h2_session_event_t;
typedef struct h2_session {
const char *value, size_t vlen)
{
AP_DEBUG_ASSERT(stream);
+ if (!stream->response) {
+ if (name[0] == ':') {
+ if ((vlen) > stream->session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): pseudo header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_URI_TOO_LARGE);
+ }
+ }
+ else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
+ /* header too long */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+
+ if (name[0] != ':') {
+ ++stream->request_headers_added;
+ if (stream->request_headers_added
+ > stream->session->s->limit_req_fields) {
+ /* too many header lines */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): too many header lines",
+ stream->session->id, stream->id);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+ }
+ }
+
if (h2_stream_is_scheduled(stream)) {
return h2_request_add_trailer(stream->request, stream->pool,
name, nlen, value, vlen);
close_input(stream);
}
+ if (stream->response) {
+ /* already have a resonse, probably a HTTP error code */
+ return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
+ }
+
/* Seeing the end-of-headers, we have everything we need to
* start processing it.
*/
return status;
}
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
+{
+ h2_response *response;
+
+ if (stream->submitted) {
+ return APR_EINVAL;
+ }
+ response = h2_response_die(stream->id, http_status, stream->request,
+ stream->pool);
+ return h2_stream_set_response(stream, response, NULL);
+}
+
static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9);
apr_status_t h2_stream_out_prepare(h2_stream *stream,
apr_pool_t *pool; /* the memory pool for this stream */
struct h2_request *request; /* the request made in this stream */
struct h2_bucket_beam *input;
-
+ int request_headers_added; /* number of request headers added */
+
struct h2_response *response;
struct h2_bucket_beam *output;
apr_bucket_brigade *buffer;
struct h2_response *response,
struct h2_bucket_beam *output);
+/**
+ * Set the HTTP error status as response.
+ */
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status);
+
/**
* Do a speculative read on the stream output to determine the
* amount of data that can be read.
task->blocking = blocking;
}
-apr_status_t h2_task_do(h2_task *task)
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread)
{
AP_DEBUG_ASSERT(task);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): process connection", task->id);
+ task->c->current_thread = thread;
ap_run_process_connection(task->c);
if (task->frozen) {
void h2_task_destroy(h2_task *task);
-apr_status_t h2_task_do(h2_task *task);
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread);
void h2_task_set_response(h2_task *task, struct h2_response *response);
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.5.4"
+#define MOD_HTTP2_VERSION "1.5.5"
/**
* @macro
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010504
+#define MOD_HTTP2_VERSION_NUM 0x010505
#endif /* mod_h2_h2_version_h */
worker->get_next(worker, worker->ctx, &task, &sticky);
while (task) {
- h2_task_do(task);
+ h2_task_do(task, thread);
/* report the task done and maybe get another one from the same
* mplx (= master connection), if we can be sticky.
*/