From: Yann Ylavic Date: Thu, 3 Dec 2020 14:06:17 +0000 (+0000) Subject: mod_proxy: provide prefetching and spooling mechanisms to all proxy modules. X-Git-Tag: 2.5.0-alpha2-ci-test-only~1118 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b288fac3b5bf3e288dfe1aaff2ae8089cff6b468;p=thirdparty%2Fapache%2Fhttpd.git mod_proxy: provide prefetching and spooling mechanisms to all proxy modules. Export ap_proxy_prefetch_input(), ap_proxy_spool_input() and ap_proxy_read_input() from mod_proxy_http to mod_proxy.h/proxy_util.c so that they are usable by all proxy modules. mod_proxy_fcgi will use them in a following commit. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1884067 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/include/ap_mmn.h b/include/ap_mmn.h index 9e7b870c1a8..b25fafba896 100644 --- a/include/ap_mmn.h +++ b/include/ap_mmn.h @@ -659,6 +659,9 @@ * dav_find_attr(). * 20200705.2 (2.5.1-dev) Add dav_liveprop_elem structure and * DAV_PROP_ELEMENT key. + * 20200705.3 (2.5.1-dev) Add ap_proxy_prefetch_input(), + * ap_proxy_spool_input() and + * ap_proxy_read_input(). */ #define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */ @@ -666,7 +669,7 @@ #ifndef MODULE_MAGIC_NUMBER_MAJOR #define MODULE_MAGIC_NUMBER_MAJOR 20200705 #endif -#define MODULE_MAGIC_NUMBER_MINOR 2 /* 0...n */ +#define MODULE_MAGIC_NUMBER_MINOR 3 /* 0...n */ /** * Determine if the server's current MODULE_MAGIC_NUMBER is at least a diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h index 664ee0b0d9d..0fc5b1e3824 100644 --- a/modules/proxy/mod_proxy.h +++ b/modules/proxy/mod_proxy.h @@ -1227,6 +1227,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, char **old_cl_val, char **old_te_val); +/** + * Prefetch the client request body (in memory), up to a limit. + * Read what's in the client pipe. If nonblocking is set and read is EAGAIN, + * pass a FLUSH bucket to the backend and read again in blocking mode. + * @param r client request + * @param backend backend connection + * @param input_brigade input brigade to use/fill + * @param block blocking or non-blocking mode + * @param bytes_read number of bytes read + * @param max_read maximum number of bytes to read + * @return OK or HTTP_* error code + * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE + */ +PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_read_type_e block, + apr_off_t *bytes_read, + apr_off_t max_read); + +/** + * Spool the client request body to memory, or disk above given limit. + * @param r client request + * @param backend backend connection + * @param input_brigade input brigade to use/fill + * @param bytes_spooled number of bytes spooled + * @param max_mem_spool maximum number of in-memory bytes + * @return OK or HTTP_* error code + */ +PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t *bytes_spooled, + apr_off_t max_mem_spool); + +/** + * Read what's in the client pipe. If the read would block (EAGAIN), + * pass a FLUSH bucket to the backend and read again in blocking mode. + * @param r client request + * @param backend backend connection + * @param input_brigade brigade to use/fill + * @param max_read maximum number of bytes to read + * @return OK or HTTP_* error code + */ +PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t max_read); + /** * @param bucket_alloc bucket allocator * @param r request diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c index 432fb1764df..751878cd8e2 100644 --- a/modules/proxy/mod_proxy_http.c +++ b/modules/proxy/mod_proxy_http.c @@ -356,50 +356,6 @@ static void proxy_http_async_cb(void *baton) } } -/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN, - * pass a FLUSH bucket to the backend and read again in blocking mode. - */ -static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, - int nonblocking) -{ - request_rec *r = req->r; - proxy_conn_rec *p_conn = req->backend; - apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; - apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; - apr_status_t status; - int rv; - - for (;;) { - status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, - block, HUGE_STRING_LEN); - if (block == APR_BLOCK_READ - || (!APR_STATUS_IS_EAGAIN(status) - && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) { - break; - } - - /* Flush and retry (blocking) */ - apr_brigade_cleanup(bb); - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1); - if (rv != OK) { - return rv; - } - block = APR_BLOCK_READ; - } - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - - return OK; -} - static int stream_reqbody(proxy_http_req_t *req) { request_rec *r = req->r; @@ -418,7 +374,8 @@ static int stream_reqbody(proxy_http_req_t *req) do { if (APR_BRIGADE_EMPTY(input_brigade) && APR_BRIGADE_EMPTY(header_brigade)) { - rv = stream_reqbody_read(req, input_brigade, 1); + rv = ap_proxy_read_input(r, p_conn, input_brigade, + HUGE_STRING_LEN); if (rv != OK) { return rv; } @@ -505,7 +462,7 @@ static int stream_reqbody(proxy_http_req_t *req) */ APR_BRIGADE_PREPEND(input_brigade, header_brigade); - /* Flush here on EOS because we won't stream_reqbody_read() again */ + /* Flush here on EOS because we won't ap_proxy_read_input() again. */ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, input_brigade, seen_eos); if (rv != OK) { @@ -516,138 +473,6 @@ static int stream_reqbody(proxy_http_req_t *req) return OK; } -static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) -{ - apr_pool_t *p = req->p; - request_rec *r = req->r; - int seen_eos = 0, rv = OK; - apr_status_t status = APR_SUCCESS; - apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; - apr_bucket_brigade *input_brigade = req->input_brigade; - apr_bucket_brigade *body_brigade; - apr_bucket *e; - apr_off_t bytes, fsize = 0; - apr_file_t *tmpfile = NULL; - apr_off_t limit; - - body_brigade = apr_brigade_create(p, bucket_alloc); - *bytes_spooled = 0; - - limit = ap_get_limit_req_body(r); - - do { - if (APR_BRIGADE_EMPTY(input_brigade)) { - rv = stream_reqbody_read(req, input_brigade, 0); - if (rv != OK) { - return rv; - } - } - - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; - } - - apr_brigade_length(input_brigade, 1, &bytes); - - if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { - /* - * LimitRequestBody does not affect Proxy requests (Should it?). - * Let it take effect if we decide to store the body in a - * temporary file on disk. - */ - if (limit && (*bytes_spooled + bytes > limit)) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) - "Request body is larger than the configured " - "limit of %" APR_OFF_T_FMT, limit); - return HTTP_REQUEST_ENTITY_TOO_LARGE; - } - /* can't spool any more in memory; write latest brigade to disk */ - if (tmpfile == NULL) { - const char *temp_dir; - char *template; - - status = apr_temp_dir_get(&temp_dir, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) - "search for temporary directory failed"); - return HTTP_INTERNAL_SERVER_ERROR; - } - apr_filepath_merge(&template, temp_dir, - "modproxy.tmp.XXXXXX", - APR_FILEPATH_NATIVE, p); - status = apr_file_mktemp(&tmpfile, template, 0, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) - "creation of temporary file in directory " - "%s failed", temp_dir); - return HTTP_INTERNAL_SERVER_ERROR; - } - } - for (e = APR_BRIGADE_FIRST(input_brigade); - e != APR_BRIGADE_SENTINEL(input_brigade); - e = APR_BUCKET_NEXT(e)) { - const char *data; - apr_size_t bytes_read, bytes_written; - - apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); - status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); - if (status != APR_SUCCESS) { - const char *tmpfile_name; - - if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { - tmpfile_name = "(unknown)"; - } - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) - "write to temporary file %s failed", - tmpfile_name); - return HTTP_INTERNAL_SERVER_ERROR; - } - AP_DEBUG_ASSERT(bytes_read == bytes_written); - fsize += bytes_written; - } - apr_brigade_cleanup(input_brigade); - } - else { - - /* - * Save input_brigade in body_brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * body_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - } - - *bytes_spooled += bytes; - } while (!seen_eos); - - APR_BRIGADE_CONCAT(input_brigade, body_brigade); - if (tmpfile) { - apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); - } - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(input_brigade, e); - } - if (tmpfile) { - /* We dropped metadata buckets when spooling to tmpfile, - * terminate with EOS for stream_reqbody() to flush the - * whole in one go. - */ - e = apr_bucket_eos_create(bucket_alloc); - APR_BRIGADE_INSERT_TAIL(input_brigade, e); - } - return OK; -} - static void terminate_headers(proxy_http_req_t *req) { apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; @@ -695,12 +520,9 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; apr_bucket_brigade *header_brigade = req->header_brigade; apr_bucket_brigade *input_brigade = req->input_brigade; - apr_bucket_brigade *temp_brigade; apr_bucket *e; - apr_status_t status; apr_off_t bytes_read = 0; apr_off_t bytes; - apr_read_type_e block; int rv; if (req->force10 && r->expecting_100) { @@ -759,70 +581,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, p_conn->close = 1; } - /* Prefetch MAX_MEM_SPOOL bytes - * - * This helps us avoid any election of C-L v.s. T-E - * request bodies, since we are willing to keep in - * memory this much data, in any case. This gives - * us an instant C-L election if the body is of some - * reasonable size. - */ - temp_brigade = apr_brigade_create(p, bucket_alloc); - block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; - - /* Account for saved input, if any. */ - apr_brigade_length(input_brigade, 0, &bytes_read); - - /* Ensure we don't hit a wall where we have a buffer too small - * for ap_get_brigade's filters to fetch us another bucket, - * surrender once we hit 80 bytes less than MAX_MEM_SPOOL - * (an arbitrary value). - */ - while (bytes_read < MAX_MEM_SPOOL - 80 - && (APR_BRIGADE_EMPTY(input_brigade) - || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { - status = ap_get_brigade(r->input_filters, temp_brigade, - AP_MODE_READBYTES, block, - MAX_MEM_SPOOL - bytes_read); - /* ap_get_brigade may return success with an empty brigade - * for a non-blocking read which would block - */ - if (block == APR_NONBLOCK_READ - && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) - || APR_STATUS_IS_EAGAIN(status))) { - break; - } - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) - "prefetch request body failed to %pI<>%pI (%s)" - " from %s (%s)", - p_conn->connection->local_addr, - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - - apr_brigade_length(temp_brigade, 1, &bytes); - bytes_read += bytes; - - /* - * Save temp_brigade in input_brigade. (At least) in the SSL case - * temp_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * input_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) - "processing prefetched request body failed" - " to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return HTTP_INTERNAL_SERVER_ERROR; - } + rv = ap_proxy_prefetch_input(r, req->backend, input_brigade, + req->prefetch_nonblocking ? APR_NONBLOCK_READ + : APR_BLOCK_READ, + &bytes_read, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; } /* @@ -860,7 +624,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, else if (req->old_cl_val && r->input_filters == r->proto_input_filters) { /* Streaming is possible by preserving the existing C-L */ if (!ap_parse_strict_length(&req->cl_val, req->old_cl_val)) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085) "could not parse request Content-Length (%s)", req->old_cl_val); return HTTP_INTERNAL_SERVER_ERROR; @@ -892,7 +656,8 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, /* Spool now, before connecting or reusing the backend connection * which could expire and be closed in the meantime. */ - rv = spool_reqbody_cl(req, &bytes); + rv = ap_proxy_spool_input(r, p_conn, input_brigade, + &bytes, MAX_MEM_SPOOL); if (rv != OK) { return rv; } diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c index 0b342242e59..a699201610b 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c @@ -4000,6 +4000,268 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, return OK; } +PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_read_type_e block, + apr_off_t *bytes_read, + apr_off_t max_read) +{ + apr_pool_t *p = r->pool; + conn_rec *c = r->connection; + apr_bucket_brigade *temp_brigade; + apr_status_t status; + apr_off_t bytes; + + *bytes_read = 0; + if (max_read < APR_BUCKET_BUFF_SIZE) { + max_read = APR_BUCKET_BUFF_SIZE; + } + + /* Prefetch max_read bytes + * + * This helps us avoid any election of C-L v.s. T-E + * request bodies, since we are willing to keep in + * memory this much data, in any case. This gives + * us an instant C-L election if the body is of some + * reasonable size. + */ + temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc); + + /* Account for saved input, if any. */ + apr_brigade_length(input_brigade, 0, bytes_read); + + /* Ensure we don't hit a wall where we have a buffer too small for + * ap_get_brigade's filters to fetch us another bucket, surrender + * once we hit 80 bytes (an arbitrary value) less than max_read. + */ + while (*bytes_read < max_read - 80 + && (APR_BRIGADE_EMPTY(input_brigade) + || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { + status = ap_get_brigade(r->input_filters, temp_brigade, + AP_MODE_READBYTES, block, + max_read - *bytes_read); + /* ap_get_brigade may return success with an empty brigade + * for a non-blocking read which would block + */ + if (block == APR_NONBLOCK_READ + && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) + || APR_STATUS_IS_EAGAIN(status))) { + break; + } + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) + "prefetch request body failed to %pI (%s)" + " from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + } + + apr_brigade_length(temp_brigade, 1, &bytes); + *bytes_read += bytes; + + /* + * Save temp_brigade in input_brigade. (At least) in the SSL case + * temp_brigade contains transient buckets whose data would get + * overwritten during the next call of ap_get_brigade in the loop. + * ap_save_brigade ensures these buckets to be set aside. + * Calling ap_save_brigade with NULL as filter is OK, because + * input_brigade already has been created and does not need to get + * created by ap_save_brigade. + */ + status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) + "processing prefetched request body failed" + " to %pI (%s) from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + + return OK; +} + +PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *bb, + apr_off_t max_read) +{ + apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc; + apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ + : APR_BLOCK_READ; + apr_status_t status; + int rv; + + for (;;) { + apr_brigade_cleanup(bb); + status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, + block, max_read); + if (block == APR_BLOCK_READ + || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) + && !APR_STATUS_IS_EAGAIN(status))) { + break; + } + + /* Flush and retry (blocking) */ + apr_brigade_cleanup(bb); + rv = ap_proxy_pass_brigade(bucket_alloc, r, backend, + backend->connection, bb, 1); + if (rv != OK) { + return rv; + } + block = APR_BLOCK_READ; + } + + if (status != APR_SUCCESS) { + conn_rec *c = r->connection; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) + "read request body failed to %pI (%s)" + " from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + } + + return OK; +} + +PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t *bytes_spooled, + apr_off_t max_mem_spool) +{ + apr_pool_t *p = r->pool; + int seen_eos = 0, rv = OK; + apr_status_t status = APR_SUCCESS; + apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc; + apr_bucket_brigade *body_brigade; + apr_bucket *e; + apr_off_t bytes, fsize = 0; + apr_file_t *tmpfile = NULL; + apr_off_t limit; + + *bytes_spooled = 0; + body_brigade = apr_brigade_create(p, bucket_alloc); + + limit = ap_get_limit_req_body(r); + + do { + if (APR_BRIGADE_EMPTY(input_brigade)) { + rv = ap_proxy_read_input(r, backend, input_brigade, + HUGE_STRING_LEN); + if (rv != OK) { + return rv; + } + } + + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; + } + + apr_brigade_length(input_brigade, 1, &bytes); + + if (*bytes_spooled + bytes > max_mem_spool) { + /* + * LimitRequestBody does not affect Proxy requests (Should it?). + * Let it take effect if we decide to store the body in a + * temporary file on disk. + */ + if (limit && (*bytes_spooled + bytes > limit)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) + "Request body is larger than the configured " + "limit of %" APR_OFF_T_FMT, limit); + return HTTP_REQUEST_ENTITY_TOO_LARGE; + } + /* can't spool any more in memory; write latest brigade to disk */ + if (tmpfile == NULL) { + const char *temp_dir; + char *template; + + status = apr_temp_dir_get(&temp_dir, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) + "search for temporary directory failed"); + return HTTP_INTERNAL_SERVER_ERROR; + } + apr_filepath_merge(&template, temp_dir, + "modproxy.tmp.XXXXXX", + APR_FILEPATH_NATIVE, p); + status = apr_file_mktemp(&tmpfile, template, 0, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) + "creation of temporary file in directory " + "%s failed", temp_dir); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + for (e = APR_BRIGADE_FIRST(input_brigade); + e != APR_BRIGADE_SENTINEL(input_brigade); + e = APR_BUCKET_NEXT(e)) { + const char *data; + apr_size_t bytes_read, bytes_written; + + apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); + status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); + if (status != APR_SUCCESS) { + const char *tmpfile_name; + + if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { + tmpfile_name = "(unknown)"; + } + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) + "write to temporary file %s failed", + tmpfile_name); + return HTTP_INTERNAL_SERVER_ERROR; + } + AP_DEBUG_ASSERT(bytes_read == bytes_written); + fsize += bytes_written; + } + apr_brigade_cleanup(input_brigade); + } + else { + + /* + * Save input_brigade in body_brigade. (At least) in the SSL case + * input_brigade contains transient buckets whose data would get + * overwritten during the next call of ap_get_brigade in the loop. + * ap_save_brigade ensures these buckets to be set aside. + * Calling ap_save_brigade with NULL as filter is OK, because + * body_brigade already has been created and does not need to get + * created by ap_save_brigade. + */ + status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); + if (status != APR_SUCCESS) { + return HTTP_INTERNAL_SERVER_ERROR; + } + + } + + *bytes_spooled += bytes; + } while (!seen_eos); + + APR_BRIGADE_CONCAT(input_brigade, body_brigade); + if (tmpfile) { + apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); + } + if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + if (tmpfile) { + /* We dropped metadata buckets when spooling to tmpfile, + * terminate with EOS to allow for flushing in a one go. + */ + e = apr_bucket_eos_create(bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + return OK; +} + PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, request_rec *r, proxy_conn_rec *p_conn, conn_rec *origin, apr_bucket_brigade *bb,