From: Joe Orton Date: Tue, 25 Feb 2020 12:34:00 +0000 (+0000) Subject: * modules/generators/cgi_common.h (cgi_handle_request): Factor out X-Git-Tag: 2.5.0-alpha2-ci-test-only~1622 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=82b87850eed88c3d44c74b6b0265bc9acb138b3e;p=thirdparty%2Fapache%2Fhttpd.git * modules/generators/cgi_common.h (cgi_handle_request): Factor out near-identical common code from mod_cgid, mod_cgi. * modules/generators/mod_cgid.c (cgid_handler), modules/generators/mod_cgi.c (cgi_handler): Adjust to use cgi_handle_request. Github: closes #97 git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1874491 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/generators/cgi_common.h b/modules/generators/cgi_common.h index 79970253f69..1c80b0e85e2 100644 --- a/modules/generators/cgi_common.h +++ b/modules/generators/cgi_common.h @@ -361,3 +361,87 @@ static int cgi_handle_response(request_rec *r, int nph, apr_bucket_brigade *bb, return OK; /* NOT r->status, even if it has changed. */ } + +/* Read the request body and write it to fd 'script_out', using 'bb' + * as temporary bucket brigade. If 'logbuf' is non-NULL, the first + * logbufbytes of stdout are stored in logbuf. */ +static apr_status_t cgi_handle_request(request_rec *r, apr_file_t *script_out, + apr_bucket_brigade *bb, + char *logbuf, apr_size_t logbufbytes) +{ + int seen_eos = 0; + int child_stopped_reading = 0; + apr_status_t rv; + int dbpos = 0; + + do { + apr_bucket *bucket; + + rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, + APR_BLOCK_READ, HUGE_STRING_LEN); + + if (rv != APR_SUCCESS) { + return rv; + } + + for (bucket = APR_BRIGADE_FIRST(bb); + bucket != APR_BRIGADE_SENTINEL(bb); + bucket = APR_BUCKET_NEXT(bucket)) + { + const char *data; + apr_size_t len; + + if (APR_BUCKET_IS_EOS(bucket)) { + seen_eos = 1; + break; + } + + /* We can't do much with this. */ + if (APR_BUCKET_IS_FLUSH(bucket)) { + continue; + } + + /* If the child stopped, we still must read to EOS. */ + if (child_stopped_reading) { + continue; + } + + /* read */ + apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); + + if (logbufbytes && dbpos < logbufbytes) { + int cursize; + + if ((dbpos + len) > logbufbytes) { + cursize = logbufbytes - dbpos; + } + else { + cursize = len; + } + memcpy(logbuf + dbpos, data, cursize); + dbpos += cursize; + } + + /* Keep writing data to the child until done or too much time + * elapses with no progress or an error occurs. + */ + rv = apr_file_write_full(script_out, data, len, NULL); + + if (rv != APR_SUCCESS) { + /* silly script stopped reading, soak up remaining message */ + child_stopped_reading = 1; + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02651) + "Error writing request body to script %s", + r->filename); + } + } + apr_brigade_cleanup(bb); + } + while (!seen_eos); + + if (logbuf) { + logbuf[dbpos] = '\0'; + } + + return APR_SUCCESS; +} diff --git a/modules/generators/mod_cgi.c b/modules/generators/mod_cgi.c index 8b96fd5190a..d8fc7b7a55e 100644 --- a/modules/generators/mod_cgi.c +++ b/modules/generators/mod_cgi.c @@ -576,21 +576,20 @@ static apr_status_t default_build_command(const char **cmd, const char ***argv, static int cgi_handler(request_rec *r) { int nph; - apr_size_t dbpos = 0; + apr_size_t dbufsize; const char *argv0; const char *command; const char **argv; char *dbuf = NULL; apr_file_t *script_out = NULL, *script_in = NULL, *script_err = NULL; - apr_bucket_brigade *bb; + conn_rec *c = r->connection; + apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc); apr_bucket *b; int is_included; - int seen_eos, child_stopped_reading; apr_pool_t *p; cgi_server_conf *conf; apr_status_t rv; cgi_exec_info_t e_info; - conn_rec *c; cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module); apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; @@ -598,8 +597,6 @@ static int cgi_handler(request_rec *r) return DECLINED; } - c = r->connection; - is_included = !strcmp(r->protocol, "INCLUDED"); p = r->main ? r->main->pool : r->pool; @@ -668,83 +665,24 @@ static int cgi_handler(request_rec *r) return HTTP_INTERNAL_SERVER_ERROR; } - /* Transfer any put/post args, CERN style... - * Note that we already ignore SIGPIPE in the core server. - */ - bb = apr_brigade_create(r->pool, c->bucket_alloc); - seen_eos = 0; - child_stopped_reading = 0; + /* Buffer for logging script stdout. */ if (conf->logname) { - dbuf = apr_palloc(r->pool, conf->bufbytes + 1); - dbpos = 0; + dbufsize = conf->bufbytes; + dbuf = apr_palloc(r->pool, dbufsize + 1); } - do { - apr_bucket *bucket; - - rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, - APR_BLOCK_READ, HUGE_STRING_LEN); - - if (rv != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01225) - "Error reading request entity data"); - return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); - } - - for (bucket = APR_BRIGADE_FIRST(bb); - bucket != APR_BRIGADE_SENTINEL(bb); - bucket = APR_BUCKET_NEXT(bucket)) - { - const char *data; - apr_size_t len; - - if (APR_BUCKET_IS_EOS(bucket)) { - seen_eos = 1; - break; - } - - /* We can't do much with this. */ - if (APR_BUCKET_IS_FLUSH(bucket)) { - continue; - } - - /* If the child stopped, we still must read to EOS. */ - if (child_stopped_reading) { - continue; - } - - /* read */ - apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); - - if (conf->logname && dbpos < conf->bufbytes) { - int cursize; - - if ((dbpos + len) > conf->bufbytes) { - cursize = conf->bufbytes - dbpos; - } - else { - cursize = len; - } - memcpy(dbuf + dbpos, data, cursize); - dbpos += cursize; - } - - /* Keep writing data to the child until done or too much time - * elapses with no progress or an error occurs. - */ - rv = apr_file_write_full(script_out, data, len, NULL); - - if (rv != APR_SUCCESS) { - /* silly script stopped reading, soak up remaining message */ - child_stopped_reading = 1; - } - } - apr_brigade_cleanup(bb); + else { + dbufsize = 0; + dbuf = NULL; } - while (!seen_eos); - if (conf->logname) { - dbuf[dbpos] = '\0'; + /* Read the request body. */ + rv = cgi_handle_request(r, script_out, bb, dbuf, dbufsize); + if (rv) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01225) + "Error reading request entity data"); + return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); } + /* Is this flush really needed? */ apr_file_flush(script_out); apr_file_close(script_out); diff --git a/modules/generators/mod_cgid.c b/modules/generators/mod_cgid.c index 102d2b3193c..2778430ee43 100644 --- a/modules/generators/mod_cgid.c +++ b/modules/generators/mod_cgid.c @@ -1527,13 +1527,13 @@ static apr_status_t cleanup_script(void *vptr) static int cgid_handler(request_rec *r) { conn_rec *c = r->connection; - int retval, nph, dbpos; + int retval, nph; char *argv0, *dbuf; - apr_bucket_brigade *bb; + apr_size_t dbufsize; + apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_bucket *b; cgid_server_conf *conf; int is_included; - int seen_eos, child_stopped_reading; int sd; char **env; apr_file_t *tempsock, *script_err, *errpipe_out; @@ -1659,87 +1659,22 @@ static int cgid_handler(request_rec *r) apr_file_pipe_timeout_set(tempsock, timeout); apr_pool_cleanup_kill(r->pool, (void *)((long)sd), close_unix_socket); - /* Transfer any put/post args, CERN style... - * Note that we already ignore SIGPIPE in the core server. - */ - bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); - seen_eos = 0; - child_stopped_reading = 0; - dbuf = NULL; - dbpos = 0; + /* Buffer for logging script stdout. */ if (conf->logname) { - dbuf = apr_palloc(r->pool, conf->bufbytes + 1); + dbufsize = conf->bufbytes; + dbuf = apr_palloc(r->pool, dbufsize + 1); } - do { - apr_bucket *bucket; - - rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, - APR_BLOCK_READ, HUGE_STRING_LEN); - - if (rv != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01270) - "Error reading request entity data"); - return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); - } - - for (bucket = APR_BRIGADE_FIRST(bb); - bucket != APR_BRIGADE_SENTINEL(bb); - bucket = APR_BUCKET_NEXT(bucket)) - { - const char *data; - apr_size_t len; - - if (APR_BUCKET_IS_EOS(bucket)) { - seen_eos = 1; - break; - } - - /* We can't do much with this. */ - if (APR_BUCKET_IS_FLUSH(bucket)) { - continue; - } - - /* If the child stopped, we still must read to EOS. */ - if (child_stopped_reading) { - continue; - } - - /* read */ - apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); - - if (conf->logname && dbpos < conf->bufbytes) { - int cursize; - - if ((dbpos + len) > conf->bufbytes) { - cursize = conf->bufbytes - dbpos; - } - else { - cursize = len; - } - memcpy(dbuf + dbpos, data, cursize); - dbpos += cursize; - } - - /* Keep writing data to the child until done or too much time - * elapses with no progress or an error occurs. - */ - rv = apr_file_write_full(tempsock, data, len, NULL); - - if (rv != APR_SUCCESS) { - /* silly script stopped reading, soak up remaining message */ - child_stopped_reading = 1; - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02651) - "Error writing request body to script %s", - r->filename); - - } - } - apr_brigade_cleanup(bb); + else { + dbuf = NULL; + dbufsize = 0; } - while (!seen_eos); - if (conf->logname) { - dbuf[dbpos] = '\0'; + /* Read the request body. */ + rv = cgi_handle_request(r, tempsock, bb, dbuf, dbufsize); + if (rv) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01270) + "Error reading request entity data"); + return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); } /* we're done writing, or maybe we didn't write at all;