From: Stefan Eissing Date: Wed, 26 Oct 2016 15:14:11 +0000 (+0000) Subject: mod_http2: AP_DEBUG_ASSERT changed to ap_assert X-Git-Tag: 2.5.0-alpha~1064 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a7719a1eedfe008283cb7b172516d8b4fea08113;p=thirdparty%2Fapache%2Fhttpd.git mod_http2: AP_DEBUG_ASSERT changed to ap_assert git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1766691 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/CHANGES b/CHANGES index 31b0fac848c..63fe4d4c8ca 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ -*- coding: utf-8 -*- Changes with Apache 2.5.0 + *) mod_http2: changed all AP_DEBUG_ASSERT to ap_assert to have them + available also in normal deployments. [Stefan Eissing] + *) mpm_unix: Apache fails to start if previously crashed then restarted with the same PID (e.g. in container). PR 60261. [Val , Yann Ylavic] diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c index 6c40687943a..22b2e909c97 100644 --- a/modules/http2/h2_bucket_beam.c +++ b/modules/http2/h2_bucket_beam.c @@ -377,7 +377,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy) APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not " "in hold, n=%d", beam->id, beam->tag, (int)proxy->n); - AP_DEBUG_ASSERT(!proxy->bred); + ap_assert(!proxy->bred); } } /* notify anyone waiting on space to become available */ @@ -413,6 +413,32 @@ static apr_status_t beam_close(h2_bucket_beam *beam) } static void beam_set_red_pool(h2_bucket_beam *beam, apr_pool_t *pool); +static void beam_set_green_pool(h2_bucket_beam *beam, apr_pool_t *pool); + +static apr_status_t beam_green_cleanup(void *data) +{ + h2_bucket_beam *beam = data; + + if (beam->green) { + apr_brigade_destroy(beam->green); + beam->green = NULL; + } + beam->green_pool = NULL; + return APR_SUCCESS; +} + +static void beam_set_green_pool(h2_bucket_beam *beam, apr_pool_t *pool) +{ + if (beam->green_pool != pool) { + if (beam->green_pool) { + apr_pool_cleanup_kill(beam->green_pool, beam, beam_green_cleanup); + } + beam->green_pool = pool; + if (beam->green_pool) { + apr_pool_pre_cleanup_register(beam->green_pool, beam, beam_green_cleanup); + } + } +} static apr_status_t beam_red_cleanup(void *data) { @@ -429,8 +455,7 @@ static apr_status_t beam_red_cleanup(void *data) } h2_blist_cleanup(&beam->purge); h2_blist_cleanup(&beam->hold); - beam_set_red_pool(beam, NULL); - + beam->red_pool = NULL; return APR_SUCCESS; } @@ -453,10 +478,16 @@ static apr_status_t beam_cleanup(void *data) apr_status_t status; beam_close(beam); + if (beam->green_pool) { + apr_pool_cleanup_kill(beam->green_pool, beam, beam_green_cleanup); + status = beam_green_cleanup(beam); + } + if (beam->red_pool) { + apr_pool_cleanup_kill(beam->red_pool, beam, beam_red_cleanup); status = beam_red_cleanup(beam); } - return APR_SUCCESS; + return status; } apr_status_t h2_beam_destroy(h2_bucket_beam *beam) @@ -582,27 +613,15 @@ apr_status_t h2_beam_close(h2_bucket_beam *beam) return beam->aborted? APR_ECONNABORTED : APR_SUCCESS; } -apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block, - int clear_buffers) +apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block) { apr_status_t status; h2_beam_lock bl; if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) { - if (clear_buffers) { - r_purge_reds(beam); - h2_blist_cleanup(&beam->red); - if (!bl.mutex && beam->green) { - /* not protected, may process green in red call */ - apr_brigade_destroy(beam->green); - beam->green = NULL; - } - } - beam_close(beam); - - while (status == APR_SUCCESS - && (!H2_BPROXY_LIST_EMPTY(&beam->proxies) - || (beam->green && !APR_BRIGADE_EMPTY(beam->green)))) { + while (status == APR_SUCCESS + && !H2_BLIST_EMPTY(&beam->red) + && !H2_BPROXY_LIST_EMPTY(&beam->proxies)) { if (block == APR_NONBLOCK_READ || !bl.mutex) { status = APR_EAGAIN; break; @@ -810,6 +829,7 @@ transfer: } /* transfer enough buckets from our green brigade, if we have one */ + beam_set_green_pool(beam, bb->p); while (beam->green && !APR_BRIGADE_EMPTY(beam->green) && (readbytes <= 0 || remain >= 0)) { @@ -1068,11 +1088,6 @@ int h2_beam_holds_proxies(h2_bucket_beam *beam) return has_proxies; } -int h2_beam_closed(h2_bucket_beam *beam) -{ - return beam->closed; -} - int h2_beam_was_received(h2_bucket_beam *beam) { int happend = 0; diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h index 655e03091b1..4c779d1f21a 100644 --- a/modules/http2/h2_bucket_beam.h +++ b/modules/http2/h2_bucket_beam.h @@ -88,7 +88,7 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax, * Care needs to be taken when terminating the beam. The beam registers at * the pool it was created with and will cleanup after itself. However, if * received buckets do still exist, already freed memory might be accessed. - * The beam does a AP_DEBUG_ASSERT on this condition. + * The beam does a assertion on this condition. * * The proper way of shutting down a beam is to first make sure there are no * more green buckets out there, then cleanup the beam to purge eventually @@ -179,6 +179,7 @@ struct h2_bucket_beam { apr_bucket_brigade *green; h2_bproxy_list proxies; apr_pool_t *red_pool; + apr_pool_t *green_pool; apr_size_t max_buf_size; apr_interval_time_t timeout; @@ -259,13 +260,6 @@ apr_status_t h2_beam_receive(h2_bucket_beam *beam, apr_read_type_e block, apr_off_t readbytes); -/** - * Determine if beam is closed. May still contain buffered data. - * - * Call from red or green side. - */ -int h2_beam_closed(h2_bucket_beam *beam); - /** * Determine if beam is empty. * @@ -305,8 +299,7 @@ apr_status_t h2_beam_close(h2_bucket_beam *beam); * * Call from the red side only. */ -apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block, - int clear_buffers); +apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block); void h2_beam_mutex_set(h2_bucket_beam *beam, h2_beam_mutex_enter m_enter, diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c index 251c3f05d2f..5613e8a479e 100644 --- a/modules/http2/h2_config.c +++ b/modules/http2/h2_config.c @@ -198,7 +198,7 @@ const h2_config *h2_config_sget(server_rec *s) { h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config, &http2_module); - AP_DEBUG_ASSERT(cfg); + ap_assert(cfg); return cfg; } diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c index d4c139472a3..a0915c3eb44 100644 --- a/modules/http2/h2_conn.c +++ b/modules/http2/h2_conn.c @@ -248,7 +248,7 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, conn_rec *c; void *cfg; - AP_DEBUG_ASSERT(master); + ap_assert(master); ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master, "h2_conn(%ld): create slave", master->id); diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c index 6ba24faa6fb..303860eeb89 100644 --- a/modules/http2/h2_conn_io.c +++ b/modules/http2/h2_conn_io.c @@ -206,7 +206,7 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b) return APR_SUCCESS; } - AP_DEBUG_ASSERT(b->length <= (io->ssize - io->slen)); + ap_assert(b->length <= (io->ssize - io->slen)); if (APR_BUCKET_IS_FILE(b)) { apr_bucket_file *f = (apr_bucket_file *)b->data; apr_file_t *fd = f->fd; diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c index 4b596a3d78f..e79b5f805d0 100644 --- a/modules/http2/h2_ctx.c +++ b/modules/http2/h2_ctx.c @@ -27,7 +27,7 @@ static h2_ctx *h2_ctx_create(const conn_rec *c) { h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx)); - AP_DEBUG_ASSERT(ctx); + ap_assert(ctx); ap_set_module_config(c->conn_config, &http2_module, ctx); h2_ctx_server_set(ctx, c->base_server); return ctx; @@ -35,7 +35,7 @@ static h2_ctx *h2_ctx_create(const conn_rec *c) void h2_ctx_clear(const conn_rec *c) { - AP_DEBUG_ASSERT(c); + ap_assert(c); ap_set_module_config(c->conn_config, &http2_module, NULL); } diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c index 7eb835fd903..2b4f79ac140 100644 --- a/modules/http2/h2_from_h1.c +++ b/modules/http2/h2_from_h1.c @@ -727,7 +727,7 @@ apr_status_t h2_filter_request_in(ap_filter_t* f, ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r, "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100); - if (!task->input.chunked) { + if (!task->request->chunked) { status = ap_get_brigade(f->next, bb, mode, block, readbytes); /* pipe data through, just take care of trailers */ for (b = APR_BRIGADE_FIRST(bb); diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index 4edd4a61c15..705857b3a53 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -90,14 +90,14 @@ static apr_status_t enter_mutex(h2_mplx *m, int *pacquired) * This allow recursive entering of the mutex from the saem thread, * which is what we need in certain situations involving callbacks */ - AP_DEBUG_ASSERT(m); + ap_assert(m); apr_threadkey_private_get(&mutex, thread_lock); if (mutex == m->lock) { *pacquired = 0; return APR_SUCCESS; } - AP_DEBUG_ASSERT(m->lock); + ap_assert(m->lock); status = apr_thread_mutex_lock(m->lock); *pacquired = (status == APR_SUCCESS); if (*pacquired) { @@ -221,13 +221,13 @@ static void purge_streams(h2_mplx *m) /* repeat until empty */ } h2_ihash_clear(m->spurge); - AP_DEBUG_ASSERT(h2_ihash_empty(m->spurge)); + ap_assert(h2_ihash_empty(m->spurge)); } } static void h2_mplx_destroy(h2_mplx *m) { - AP_DEBUG_ASSERT(m); + ap_assert(m); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): destroy, tasks=%d", m->id, (int)h2_ihash_count(m->tasks)); @@ -256,7 +256,7 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, apr_status_t status = APR_SUCCESS; apr_allocator_t *allocator = NULL; h2_mplx *m; - AP_DEBUG_ASSERT(conf); + ap_assert(conf); status = apr_allocator_create(&allocator); if (status != APR_SUCCESS) { @@ -353,7 +353,6 @@ static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master) { conn_rec *slave = NULL; int reuse_slave = 0; - apr_status_t status; ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c, "h2_task(%s): destroy", task->id); @@ -365,22 +364,14 @@ static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master) } } - /* The pool is cleared/destroyed which also closes all - * allocated file handles. Give this count back to our - * file handle pool. */ if (task->output.beam) { - m->tx_handles_reserved += - h2_beam_get_files_beamed(task->output.beam); h2_beam_on_produced(task->output.beam, NULL, NULL); - status = h2_beam_shutdown(task->output.beam, APR_NONBLOCK_READ, 1); - if (status != APR_SUCCESS){ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, m->c, - APLOGNO(03385) "h2_task(%s): output shutdown " - "incomplete, beam empty=%d, holds proxies=%d", - task->id, - h2_beam_empty(task->output.beam), - h2_beam_holds_proxies(task->output.beam)); - } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, + APLOGNO(03385) "h2_task(%s): destroy " + "output beam empty=%d, holds proxies=%d", + task->id, + h2_beam_empty(task->output.beam), + h2_beam_holds_proxies(task->output.beam)); } slave = task->c; @@ -452,6 +443,9 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error) /* Remove mutex after, so that abort still finds cond to signal */ h2_beam_mutex_set(stream->input, NULL, NULL, NULL); } + if (stream->output) { + m->tx_handles_reserved += h2_beam_get_files_beamed(stream->output); + } h2_stream_cleanup(stream); task = h2_ihash_get(m->tasks, stream->id); @@ -597,7 +591,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) while (!h2_ihash_iter(m->streams, stream_done_iter, m)) { /* iterate until all streams have been removed */ } - AP_DEBUG_ASSERT(h2_ihash_empty(m->streams)); + ap_assert(h2_ihash_empty(m->streams)); if (!h2_ihash_empty(m->shold)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, @@ -652,7 +646,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) m->id, (int)h2_ihash_count(m->tasks)); h2_ihash_iter(m->tasks, task_print, m); } - AP_DEBUG_ASSERT(h2_ihash_empty(m->shold)); + ap_assert(h2_ihash_empty(m->shold)); if (!h2_ihash_empty(m->spurge)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld): 3. release_join %d streams to purge", @@ -677,7 +671,6 @@ void h2_mplx_abort(h2_mplx *m) { int acquired; - AP_DEBUG_ASSERT(m); if (!m->aborted && enter_mutex(m, &acquired) == APR_SUCCESS) { m->aborted = 1; h2_ngn_shed_abort(m->ngn_shed); @@ -690,7 +683,6 @@ apr_status_t h2_mplx_stream_done(h2_mplx *m, h2_stream *stream) apr_status_t status = APR_SUCCESS; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld-%d): marking stream as done.", @@ -707,7 +699,6 @@ h2_stream *h2_mplx_stream_get(h2_mplx *m, int id) h2_stream *s = NULL; int acquired; - AP_DEBUG_ASSERT(m); if ((enter_mutex(m, &acquired)) == APR_SUCCESS) { s = h2_ihash_get(m->streams, id); leave_mutex(m, acquired); @@ -728,7 +719,6 @@ static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes) h2_stream *stream; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { stream = h2_ihash_get(m->streams, beam->id); if (stream) { @@ -780,7 +770,6 @@ apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) apr_status_t status; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; @@ -825,7 +814,6 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, apr_status_t status; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; @@ -866,7 +854,6 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) apr_status_t status; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; @@ -888,7 +875,6 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, int do_registration = 0; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; @@ -981,7 +967,6 @@ h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more) apr_status_t status; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { *has_more = 0; @@ -1411,7 +1396,6 @@ apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, h2_stream *stream; size_t i, n; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c, "h2_mplx(%ld): dispatch events", m->id); @@ -1442,7 +1426,6 @@ apr_status_t h2_mplx_keep_active(h2_mplx *m, int stream_id) apr_status_t status; int acquired; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_stream *s = h2_ihash_get(m->streams, stream_id); if (s) { @@ -1458,7 +1441,6 @@ int h2_mplx_awaits_data(h2_mplx *m) apr_status_t status; int acquired, waiting = 1; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (h2_ihash_empty(m->streams)) { waiting = 0; diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c index d65fa1b68d8..2b132f0a13f 100644 --- a/modules/http2/h2_ngn_shed.c +++ b/modules/http2/h2_ngn_shed.c @@ -215,7 +215,7 @@ apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, "h2_ngn_shed(%ld): create engine %s (%s)", shed->c->id, newngn->id, newngn->type); if (status == APR_SUCCESS) { - AP_DEBUG_ASSERT(task->engine == NULL); + ap_assert(task->engine == NULL); newngn->task = task; task->engine = newngn; task->assigned = newngn; @@ -252,7 +252,7 @@ apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, { h2_ngn_entry *entry; - AP_DEBUG_ASSERT(ngn); + ap_assert(ngn); *pr = NULL; ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396) "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d", diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c index c5d00fe4978..a79c5da4795 100644 --- a/modules/http2/h2_proxy_session.c +++ b/modules/http2/h2_proxy_session.c @@ -926,7 +926,7 @@ static apr_status_t session_shutdown(h2_proxy_session *session, int reason, apr_status_t status = APR_SUCCESS; const char *err = msg; - AP_DEBUG_ASSERT(session); + ap_assert(session); if (!err && reason) { err = nghttp2_strerror(reason); } diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c index 4c732788f3d..8089dde5e53 100644 --- a/modules/http2/h2_proxy_util.c +++ b/modules/http2/h2_proxy_util.c @@ -425,11 +425,11 @@ h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p, h2_proxy_ngheader *ngh; size_t n; - AP_DEBUG_ASSERT(req); - AP_DEBUG_ASSERT(req->scheme); - AP_DEBUG_ASSERT(req->authority); - AP_DEBUG_ASSERT(req->path); - AP_DEBUG_ASSERT(req->method); + ap_assert(req); + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); n = 4; apr_table_do(count_header, &n, req->headers, NULL); @@ -608,10 +608,10 @@ apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, req->authority = authority; req->path = path; - AP_DEBUG_ASSERT(req->scheme); - AP_DEBUG_ASSERT(req->authority); - AP_DEBUG_ASSERT(req->path); - AP_DEBUG_ASSERT(req->method); + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); x.pool = pool; x.headers = req->headers; diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c index 78bd7d72bdb..27ed9197b51 100644 --- a/modules/http2/h2_session.c +++ b/modules/http2/h2_session.c @@ -711,7 +711,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb) static void h2_session_destroy(h2_session *session) { - AP_DEBUG_ASSERT(session); + ap_assert(session); h2_ihash_clear(session->streams); if (session->mplx) { @@ -743,7 +743,7 @@ static apr_status_t h2_session_shutdown_notice(h2_session *session) { apr_status_t status; - AP_DEBUG_ASSERT(session); + ap_assert(session); if (!session->local.accepting) { return APR_SUCCESS; } @@ -764,7 +764,7 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error, { apr_status_t status = APR_SUCCESS; - AP_DEBUG_ASSERT(session); + ap_assert(session); if (session->local.shutdown) { return APR_SUCCESS; } @@ -1034,7 +1034,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) size_t slen; int win_size; - AP_DEBUG_ASSERT(session); + ap_assert(session); /* Start the conversation by submitting our SETTINGS frame */ *rv = 0; if (session->r) { @@ -1154,7 +1154,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, int eos = 0; apr_status_t status; h2_stream *stream; - AP_DEBUG_ASSERT(session); + ap_assert(session); /* The session wants to send more DATA for the stream. We need * to find out how much of the requested length we can send without diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c index 01c7b7c2973..3f177cf54e2 100644 --- a/modules/http2/h2_stream.c +++ b/modules/http2/h2_stream.c @@ -173,6 +173,7 @@ static apr_status_t stream_pool_cleanup(void *ctx) h2_stream *stream = ctx; apr_status_t status; + ap_assert(stream->can_be_cleaned); if (stream->files) { apr_file_t *file; int i; @@ -213,31 +214,35 @@ h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session, void h2_stream_cleanup(h2_stream *stream) { - AP_DEBUG_ASSERT(stream); + apr_status_t status; + + ap_assert(stream); if (stream->out_buffer) { + /* remove any left over output buckets that may still have + * references into request pools */ apr_brigade_cleanup(stream->out_buffer); } - if (stream->input) { - apr_status_t status; - status = h2_beam_shutdown(stream->input, APR_NONBLOCK_READ, 1); - if (status == APR_EAGAIN) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, - "h2_stream(%ld-%d): wait on input shutdown", - stream->session->id, stream->id); - status = h2_beam_shutdown(stream->input, APR_BLOCK_READ, 1); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, - "h2_stream(%ld-%d): input shutdown returned", - stream->session->id, stream->id); - } + h2_beam_abort(stream->input); + status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ); + if (status == APR_EAGAIN) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + "h2_stream(%ld-%d): wait on input drain", + stream->session->id, stream->id); + status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, + "h2_stream(%ld-%d): input drain returned", + stream->session->id, stream->id); } } void h2_stream_destroy(h2_stream *stream) { - AP_DEBUG_ASSERT(stream); + ap_assert(stream); + ap_assert(!h2_mplx_stream_get(stream->session->mplx, stream->id)); ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c, "h2_stream(%ld-%d): destroy", stream->session->id, stream->id); + stream->can_be_cleaned = 1; if (stream->pool) { apr_pool_destroy(stream->pool); } @@ -327,7 +332,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream, const char *name, size_t nlen, const char *value, size_t vlen) { - AP_DEBUG_ASSERT(stream); + ap_assert(stream); if (!stream->has_response) { if (name[0] == ':') { @@ -383,9 +388,9 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status = APR_EINVAL; - AP_DEBUG_ASSERT(stream); - AP_DEBUG_ASSERT(stream->session); - AP_DEBUG_ASSERT(stream->session->mplx); + ap_assert(stream); + ap_assert(stream->session); + ap_assert(stream->session->mplx); if (!stream->scheduled) { if (eos) { @@ -444,7 +449,9 @@ int h2_stream_is_scheduled(const h2_stream *stream) apr_status_t h2_stream_close_input(h2_stream *stream) { conn_rec *c = stream->session->c; - apr_status_t status = APR_SUCCESS, rv; + apr_status_t status; + apr_bucket_brigade *tmp; + apr_bucket *b; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): closing input", @@ -453,27 +460,20 @@ apr_status_t h2_stream_close_input(h2_stream *stream) return APR_ECONNRESET; } - if (!stream->input) { - h2_beam_create(&stream->input, stream->pool, stream->id, "input", 0); - } - + tmp = apr_brigade_create(stream->pool, c->bucket_alloc); if (stream->trailers && !apr_is_empty_table(stream->trailers)) { h2_headers *r = h2_headers_create(HTTP_OK, stream->trailers, NULL, stream->pool); - apr_bucket *b = h2_bucket_headers_create(c->bucket_alloc, r); - apr_bucket_brigade *tmp; - - tmp = apr_brigade_create(stream->pool, c->bucket_alloc); + b = h2_bucket_headers_create(c->bucket_alloc, r); APR_BRIGADE_INSERT_TAIL(tmp, b); - status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ); - apr_brigade_destroy(tmp); - stream->trailers = NULL; } - close_input(stream); - rv = h2_beam_close(stream->input); - return status ? status : rv; + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(tmp, b); + status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ); + apr_brigade_destroy(tmp); + return status; } apr_status_t h2_stream_write_data(h2_stream *stream, @@ -483,7 +483,7 @@ apr_status_t h2_stream_write_data(h2_stream *stream, apr_status_t status = APR_SUCCESS; apr_bucket_brigade *tmp; - AP_DEBUG_ASSERT(stream); + ap_assert(stream); if (!stream->input) { return APR_EOF; } diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h index ecb53ba56d3..57fdbba04c9 100644 --- a/modules/http2/h2_stream.h +++ b/modules/http2/h2_stream.h @@ -65,6 +65,7 @@ struct h2_stream { unsigned int started : 1; /* stream has started processing */ unsigned int has_response : 1; /* response headers are known */ unsigned int push_policy; /* which push policy to use for this request */ + unsigned int can_be_cleaned : 1; /* stream pool can be cleaned */ apr_off_t out_data_frames; /* # of DATA frames sent */ apr_off_t out_data_octets; /* # of DATA octets (payload) sent */ @@ -98,7 +99,7 @@ void h2_stream_eos_destroy(h2_stream *stream); void h2_stream_destroy(h2_stream *stream); /** - * Removes stream from h2_session and destroys it. + * Cleanup references into requst processing. * * @param stream the stream to cleanup */ diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c index ee4b7bc69d3..3f70b3aa21d 100644 --- a/modules/http2/h2_task.c +++ b/modules/http2/h2_task.c @@ -546,7 +546,7 @@ void h2_task_destroy(h2_task *task) apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) { - AP_DEBUG_ASSERT(task); + ap_assert(task); if (task->c->master) { /* Each conn_rec->id is supposed to be unique at a point in time. Since @@ -580,7 +580,6 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) task->c->id = (task->c->master->id << free_bits)^slave_id; } - task->input.chunked = task->request->chunked; task->input.bb = apr_brigade_create(task->pool, task->c->bucket_alloc); if (task->request->serialize) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h index fe3dbf66a60..ad8f0565961 100644 --- a/modules/http2/h2_task.h +++ b/modules/http2/h2_task.h @@ -60,7 +60,6 @@ struct h2_task { struct { struct h2_bucket_beam *beam; - unsigned int chunked : 1; unsigned int eos : 1; apr_bucket_brigade *bb; apr_bucket_brigade *bbchunk; diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c index 095ba973287..81b94566c58 100644 --- a/modules/http2/h2_util.c +++ b/modules/http2/h2_util.c @@ -1130,11 +1130,11 @@ h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p, h2_ngheader *ngh; size_t n; - AP_DEBUG_ASSERT(req); - AP_DEBUG_ASSERT(req->scheme); - AP_DEBUG_ASSERT(req->authority); - AP_DEBUG_ASSERT(req->path); - AP_DEBUG_ASSERT(req->method); + ap_assert(req); + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); n = 4; apr_table_do(count_header, &n, req->headers, NULL); diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c index 2a1599914c6..1dcfb2fcd75 100644 --- a/modules/http2/h2_workers.c +++ b/modules/http2/h2_workers.c @@ -243,8 +243,8 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, h2_workers *workers; apr_pool_t *pool; - AP_DEBUG_ASSERT(s); - AP_DEBUG_ASSERT(server_pool); + ap_assert(s); + ap_assert(server_pool); /* let's have our own pool that will be parent to all h2_worker * instances we create. This happens in various threads, but always