From: Jim Jagielski Date: Wed, 13 Mar 2019 12:32:42 +0000 (+0000) Subject: Merge r1853874, r1853938 from trunk: X-Git-Tag: 2.4.39~50 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=3faceab3d6c38092275bbf4024357b1bcc3d4d53;p=thirdparty%2Fapache%2Fhttpd.git Merge r1853874, r1853938 from trunk: mod_cache_socache: avoid pool to heap reallocation. Below some threshold, the previous code tried free (sub-)pooled memory ASAP by moving small buffers (< capacity / 2) to a heap bucket. But this is not really an optimization because first it requires at some point to allocate more than the configured capacity, and second since this happens during response handling the pool is about to be destroyed soon anymay. This commit simply keeps the data in the subpool and uses a pool bucket for the output brigade to take care of the lifetime until it's consumed (or not). Follow up to r1853874: CHANGES entry. Submitted by: ylavic Reviewed by: ylavic, icing, jim git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855407 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/CHANGES b/CHANGES index 7ca26783cfe..791f419a835 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,9 @@ Changes with Apache 2.4.39 *) http: Fix possible empty response with mod_ratelimit for HEAD requests. PR 63192. [Yann Ylavic] + *) mod_cache_socache: Avoid reallocations and be safe with outgoing data + lifetime. [Yann Ylavic] + *) MPMs unix: bind the bucket number of each child to its slot number, for a more efficient per bucket maintenance. [Yann Ylavic] diff --git a/STATUS b/STATUS index b911055265a..33bb6ff21ff 100644 --- a/STATUS +++ b/STATUS @@ -137,14 +137,6 @@ PATCHES ACCEPTED TO BACKPORT FROM TRUNK: +1: ylavic, icing, jim icing: please add r1853967 to this to keep h2 working with this change. if added, +1 - *) mod_cache_socache: Avoid reallocations and be safe with outgoing data - lifetime. - trunk patch: http://svn.apache.org/r1853874 - http://svn.apache.org/r1853938 (CHANGES entry) - 2.4.x patch: svn merge -c 1853874,1853938 ^/httpd/httpd/trunk . - (trunk works, modulo CHANGES) - +1: ylavic, icing, jim - PATCHES PROPOSED TO BACKPORT FROM TRUNK: [ New proposals should be added at the end of the list ] diff --git a/modules/cache/mod_cache_socache.c b/modules/cache/mod_cache_socache.c index 0d76760c5ba..ecaf3763b1d 100644 --- a/modules/cache/mod_cache_socache.c +++ b/modules/cache/mod_cache_socache.c @@ -429,6 +429,14 @@ static int create_entity(cache_handle_t *h, request_rec *r, const char *key, return OK; } +static apr_status_t sobj_body_pre_cleanup(void *baton) +{ + cache_socache_object_t *sobj = baton; + apr_brigade_cleanup(sobj->body); + sobj->body = NULL; + return APR_SUCCESS; +} + static int open_entity(cache_handle_t *h, request_rec *r, const char *key) { cache_socache_dir_conf *dconf = @@ -648,36 +656,25 @@ static int open_entity(cache_handle_t *h, request_rec *r, const char *key) } /* Retrieve the body if we have one */ - sobj->body = apr_brigade_create(r->pool, r->connection->bucket_alloc); len = buffer_len - slider; - - /* - * Optimisation: if the body is small, we want to make a - * copy of the body and free the temporary pool, as we - * don't want large blocks of unused memory hanging around - * to the end of the response. In contrast, if the body is - * large, we would rather leave the body where it is in the - * temporary pool, and save ourselves the copy. - */ - if (len * 2 > dconf->max) { + if (len > 0) { apr_bucket *e; - - /* large - use the brigade as is, we're done */ - e = apr_bucket_immortal_create((const char *) sobj->buffer + slider, - len, r->connection->bucket_alloc); - + /* Create the body brigade later concatenated to the output filters' + * brigade by recall_body(). Since sobj->buffer (the data) point to + * sobj->pool (a subpool of r->pool), be safe by using a pool bucket + * which can morph to heap if sobj->pool is destroyed while the bucket + * is still alive. But if sobj->pool gets destroyed while the bucket is + * still in sobj->body (i.e. recall_body() was never called), we don't + * need to morph to something just about to be freed, so a pre_cleanup + * will take care of cleaning up sobj->body before this happens (and is + * a noop otherwise). + */ + sobj->body = apr_brigade_create(sobj->pool, r->connection->bucket_alloc); + apr_pool_pre_cleanup_register(sobj->pool, sobj, sobj_body_pre_cleanup); + e = apr_bucket_pool_create((const char *) sobj->buffer + slider, len, + sobj->pool, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(sobj->body, e); } - else { - - /* small - make a copy of the data... */ - apr_brigade_write(sobj->body, NULL, NULL, (const char *) sobj->buffer - + slider, len); - - /* ...and get rid of the large memory buffer */ - apr_pool_destroy(sobj->pool); - sobj->pool = NULL; - } /* make the configuration stick */ h->cache_obj = obj; @@ -766,13 +763,9 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb) { cache_socache_object_t *sobj = (cache_socache_object_t*) h->cache_obj->vobj; - apr_bucket *e; - e = APR_BRIGADE_FIRST(sobj->body); - - if (e != APR_BRIGADE_SENTINEL(sobj->body)) { - APR_BUCKET_REMOVE(e); - APR_BRIGADE_INSERT_TAIL(bb, e); + if (sobj->body) { + APR_BRIGADE_CONCAT(bb, sobj->body); } return APR_SUCCESS;