Changes with Apache 2.0.47
+ *) Prevent the server from crashing when entering infinite loops. The
+ new LimitInternalRecursion directive configures limits of subsequent
+ internal redirects and nested subrequests, after which the request
+ will be aborted. PR 19753 (and probably others).
+ [William Rowe, Jeff Trawick, André Malo]
+
*) core_output_filter: don't split the brigade after a FLUSH bucket if
it's the last bucket. This prevents creating unneccessary empty
brigades which may not be destroyed until the end of a keepalive
APACHE 2.0 STATUS: -*-text-*-
-Last modified at [$Date: 2003/07/01 00:13:28 $]
+Last modified at [$Date: 2003/07/01 01:25:04 $]
Release:
http://cvs.apache.org/viewcvs.cgi/httpd-2.0/server/protocol.c.diff?r1=1.131&r2=1.132&diff_format=h
+1: brianp, jerenkrantz
- * Backport LimitInteralRecursion to 2.0 and 1.3.
- (1.3 patch is here: <http://cvs.apache.org/~nd/recursion13.patch>)
- (2.0 patch is here: <http://cvs.apache.org/~nd/recursion20.patch>)
- PR 19753.
- include/http_core.h r1.75, r1.76
- modules/http/http_request.c r1.156, r1.158
- server/core.c r1.236, r1.237
- server/request.c r1.126, r1.127
- +1: nd, brianp (for 2.0), jerenkrantz (both)
-
* Replace some of the mutex locking in the worker MPM with
atomic operations for higher concurrency.
server/mpm/worker/fdqueue.c 1.24, 1.25
+++ /dev/null
-#!/bin/sh
-
-export CVSROOT=cvs.apache.org:/home/cvs
-
-if [ "x$1" = "xhelp" -o "x$2" = "x" ]; then
- echo "Usage: ./httpd_roll_release tag log_name [user]"
- echo "tag the tag to use when checking out the repository"
- echo "log_name the name of a file to log the results to."
- echo "user An optional user name to use when siging the release"
- exit
-else
- TAG=$1
-fi
-
-LOG_NAME=`pwd`/$2
-
-USER=$3
-
-REPO="httpd-2.0"
-WORKING_DIR=`echo "$REPO" | sed -e 's/[\-\.]/./g'`
-WORKING_TAG=`echo "$TAG" | sed -e 's/APACHE_2_0_/./'`
-WORKING_DIR="$WORKING_DIR$WORKING_TAG"
-
-START_DIR=`echo "$PWD"`
-
-# Check out the correct repositories.
-echo "Checking out repository $REPO into $WORKING_DIR using tag $TAG"
-
-umask 022
-echo Checking out httpd-2.0 > $LOG_NAME
-cvs checkout -r $TAG -d $WORKING_DIR $REPO >> $LOG_NAME
-cd $WORKING_DIR/srclib
-echo "Checking out apr, and apr-util" >> $LOG_NAME
-cvs checkout -r $TAG apr apr-util >> $LOG_NAME
-cd $START_DIR/$WORKING_DIR
-
-# Make sure the master site's FAQ is up-to-date. It doesn't hurt to do this
-# all the time. :-)
-echo "REMEMBER TO UPDATE THE SITE'S FAQ!!"
-#(cd /www/httpd.apache.org/docs-2.0/faq/; cvs update)
-
-# Now update the FAQ in the tarball via a download from the master site.
-# The FAQ contains SSI tags too complex for the expand.pl script to handle.
-rm -f docs/manual/faq/*.html
-links -source http://httpd.apache.org/docs-2.0/faq/index.html?ONEPAGE \
- > docs/manual/faq/index.html
-
-# Create the configure scripts
-echo "Creating the configure script"
-cd $START_DIR/$WORKING_DIR
-
-echo >> $LOG_NAME
-echo "Running ./buildconf" >> $LOG_NAME
-./buildconf >> $LOG_NAME
-
-echo >> $LOG_NAME
-echo "Fixup the timestamps preventing remake of generated files." >> $LOG_NAME
-touch modules/ssl/ssl_expr_parse.c >> $LOG_NAME
-touch modules/ssl/ssl_expr_parse.h >> $LOG_NAME
-touch modules/ssl/ssl_expr_scan.c >> $LOG_NAME
-
-# Remove any files we don't distribute with our code
-rm -f STATUS
-
-echo >> $LOG_NAME
-echo "Removing files that we don't distribute"
-echo "Removing files that we don't distribute" >> $LOG_NAME
-find . -name ".cvsignore" -exec rm {} \; >> $LOG_NAME
-find . -type d -name "CVS" | xargs rm -rf >> $LOG_NAME
-find . -type d -name "autom4te.cache" | xargs rm -rf >> $LOG_NAME
-
-# expand SSI directives in the manual
-echo "Making sure people can read the manual (expanding SSI's)"
-
-echo >> $LOG_NAME
-echo "Making sure people can read the manual (expanding SSI's)" >> $LOG_NAME
-( cd docs/manual ; chmod +x expand.pl ; ./expand.pl ; rm ./expand.pl ) >> $LOG_NAME
-
-# Time to roll the tarball
-echo "Rolling the tarballs"
-
-cd $START_DIR
-echo >> $LOG_NAME
-echo "Rolling the tarball" >> $LOG_NAME
-tar cvf $WORKING_DIR-alpha.tar $WORKING_DIR >> $LOG_NAME
-cp -p $WORKING_DIR-alpha.tar x$WORKING_DIR-alpha.tar
-gzip -9 $WORKING_DIR-alpha.tar
-mv x$WORKING_DIR-alpha.tar httpd.tar
-compress httpd.tar
-mv httpd.tar.Z $WORKING_DIR-alpha.tar.Z
-
-# Test the tarballs
-echo "Testing the tarball"
-
-echo >> $LOG_NAME
-echo "Testing the tarball $WORKING_DIR-alpha.tar.gz" >> $LOG_NAME
-gunzip -c $WORKING_DIR-alpha.tar.gz | tar tvf - >> $LOG_NAME
-zcat $WORKING_DIR-alpha.tar.Z | tar tvf - >> $LOG_NAME
-
-# remember the CHANGES file
-echo "Copying the CHANGES file to this directory"
-cp $WORKING_DIR/CHANGES .
-
-# cleanup
-echo "Cleaning up my workspace"
-rm -fr $WORKING_DIR
-
-if [ "x$USER" != "x" ]; then
- USER="-u $USER"
-fi
-
-echo Signing the tarballs
-
-echo "Signing the tarballs" >> $LOG_NAME
-pgp -sba $WORKING_DIR-alpha.tar.gz $USER
-pgp -sba $WORKING_DIR-alpha.tar.Z $USER
-
-pgp $WORKING_DIR-alpha.tar.gz.asc $WORKING_DIR-alpha.tar.gz >> $LOG_NAME
-pgp $WORKING_DIR-alpha.tar.Z.asc $WORKING_DIR-alpha.tar.Z >> $LOG_NAME
-
-echo "Don't forget to make the tarballs available by copying them to the"
-echo "/www/httpd.apache.org/dev/dist directory."
*/
#define AP_MIN_BYTES_TO_WRITE 8000
+/* default maximum of internal redirects */
+# define AP_DEFAULT_MAX_INTERNAL_REDIRECTS 10
+
+/* default maximum subrequest nesting level */
+# define AP_DEFAULT_MAX_SUBREQ_DEPTH 10
+
/**
* Retrieve the value of Options for this request
* @param r The current request
*/
AP_DECLARE(void) ap_custom_response(request_rec *r, int status, const char *string);
+/**
+ * Check if the current request is beyond the configured max. number of redirects or subrequests
+ * @param r The current request
+ * @return true (is exceeded) or false
+ * @deffunc int ap_is_recursion_limit_exceeded(const request_rec *r)
+ */
+AP_DECLARE(int) ap_is_recursion_limit_exceeded(const request_rec *r);
+
/**
* Check for a definition from the server command line
* @param name The define to check for
char *access_name;
apr_array_header_t *sec_dir;
apr_array_header_t *sec_url;
+
+ /* recursion backstopper */
+ int redirect_limit; /* maximum number of internal redirects */
+ int subreq_limit; /* maximum nesting level of subrequests */
} core_server_config;
/* for AddOutputFiltersByType in core.c */
static request_rec *internal_internal_redirect(const char *new_uri,
request_rec *r) {
int access_status;
- request_rec *new = (request_rec *) apr_pcalloc(r->pool,
- sizeof(request_rec));
+ request_rec *new;
+
+ if (ap_is_recursion_limit_exceeded(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return NULL;
+ }
+
+ new = (request_rec *) apr_pcalloc(r->pool, sizeof(request_rec));
new->connection = r->connection;
new->server = r->server;
AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r)
{
request_rec *new = internal_internal_redirect(new_uri, r);
- int access_status = ap_process_request_internal(new);
+ int access_status;
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ access_status = ap_process_request_internal(new);
if (access_status == OK) {
if ((access_status = ap_invoke_handler(new)) != 0) {
ap_die(access_status, new);
{
int access_status;
request_rec *new = internal_internal_redirect(new_uri, r);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
if (r->handler)
ap_set_content_type(new, r->content_type);
access_status = ap_process_request_internal(new);
conf->sec_dir = apr_array_make(a, 40, sizeof(ap_conf_vector_t *));
conf->sec_url = apr_array_make(a, 40, sizeof(ap_conf_vector_t *));
+ /* recursion stopper */
+ conf->redirect_limit = 0; /* 0 == unset */
+ conf->subreq_limit = 0;
+
return (void *)conf;
}
conf->sec_dir = apr_array_append(p, base->sec_dir, virt->sec_dir);
conf->sec_url = apr_array_append(p, base->sec_url, virt->sec_url);
+ conf->redirect_limit = virt->redirect_limit
+ ? virt->redirect_limit
+ : base->redirect_limit;
+
+ conf->subreq_limit = virt->subreq_limit
+ ? virt->subreq_limit
+ : base->subreq_limit;
+
return conf;
}
}
#endif
+static const char *set_recursion_limit(cmd_parms *cmd, void *dummy,
+ const char *arg1, const char *arg2)
+{
+ core_server_config *conf = ap_get_module_config(cmd->server->module_config,
+ &core_module);
+ int limit = atoi(arg1);
+
+ if (limit <= 0) {
+ return "The recursion limit must be greater than zero.";
+ }
+ if (limit < 4) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "Limiting internal redirects to very low numbers may "
+ "cause normal requests to fail.");
+ }
+
+ conf->redirect_limit = limit;
+
+ if (arg2) {
+ limit = atoi(arg2);
+
+ if (limit <= 0) {
+ return "The recursion limit must be greater than zero.";
+ }
+ if (limit < 4) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
+ "Limiting the subrequest depth to a very low level may"
+ " cause normal requests to fail.");
+ }
+ }
+
+ conf->subreq_limit = limit;
+
+ return NULL;
+}
+
+static void log_backtrace(const request_rec *r)
+{
+ const request_rec *top = r;
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "r->uri = %s", r->uri ? r->uri : "(unexpectedly NULL)");
+
+ while (top && (top->prev || top->main)) {
+ if (top->prev) {
+ top = top->prev;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "redirected from r->uri = %s",
+ top->uri ? top->uri : "(unexpectedly NULL)");
+ }
+
+ if (!top->prev && top->main) {
+ top = top->main;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "subrequested from r->uri = %s",
+ top->uri ? top->uri : "(unexpectedly NULL)");
+ }
+ }
+}
+
+/*
+ * check whether redirect limit is reached
+ */
+AP_DECLARE(int) ap_is_recursion_limit_exceeded(const request_rec *r)
+{
+ core_server_config *conf = ap_get_module_config(r->server->module_config,
+ &core_module);
+ const request_rec *top = r;
+ int redirects = 0, subreqs = 0;
+ int rlimit = conf->redirect_limit
+ ? conf->redirect_limit
+ : AP_DEFAULT_MAX_INTERNAL_REDIRECTS;
+ int slimit = conf->subreq_limit
+ ? conf->subreq_limit
+ : AP_DEFAULT_MAX_SUBREQ_DEPTH;
+
+
+ while (top->prev || top->main) {
+ if (top->prev) {
+ if (++redirects >= rlimit) {
+ /* uuh, too much. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Request exceeded the limit of %d internal "
+ "redirects due to probable configuration error. "
+ "Use 'LimitInternalRecursion' to increase the "
+ "limit if necessary. Use 'LogLevel debug' to get "
+ "a backtrace.", rlimit);
+
+ /* post backtrace */
+ log_backtrace(r);
+
+ /* return failure */
+ return 1;
+ }
+
+ top = top->prev;
+ }
+
+ if (!top->prev && top->main) {
+ if (++subreqs >= slimit) {
+ /* uuh, too much. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Request exceeded the limit of %d subrequest "
+ "nesting levels due to probable confguration "
+ "error. Use 'LimitInternalRecursion' to increase "
+ "the limit if necessary. Use 'LogLevel debug' to "
+ "get a backtrace.", slimit);
+
+ /* post backtrace */
+ log_backtrace(r);
+
+ /* return failure */
+ return 1;
+ }
+
+ top = top->main;
+ }
+ }
+
+ /* recursion state: ok */
+ return 0;
+}
+
static const char *add_ct_output_filters(cmd_parms *cmd, void *conf_,
const char *arg, const char *arg2)
{
OR_ALL, "soft/hard limits for max number of processes per uid"),
#endif
+/* internal recursion stopper */
+AP_INIT_TAKE12("LimitInternalRecursion", set_recursion_limit, NULL, RSRC_CONF,
+ "maximum recursion depth of internal redirects and subrequests"),
+
AP_INIT_TAKE1("ForceType", ap_set_string_slot_lower,
(void *)APR_OFFSETOF(core_dir_config, mime_type), OR_FILEINFO,
"a mime type that overrides other configured type"),
udir = ap_escape_uri(rnew->pool, udir); /* re-escape it */
ap_parse_uri(rnew, ap_make_full_path(rnew->pool, udir, new_file));
}
+
+ /* We cannot return NULL without violating the API. So just turn this
+ * subrequest into a 500 to indicate the failure. */
+ if (ap_is_recursion_limit_exceeded(r)) {
+ rnew->status = HTTP_INTERNAL_SERVER_ERROR;
+ return rnew;
+ }
+
/* lookup_uri
* If the content can be served by the quick_handler, we can
* safely bypass request_internal processing.
ap_parse_uri(rnew, rnew->uri);
}
+ /* We cannot return NULL without violating the API. So just turn this
+ * subrequest into a 500. */
+ if (ap_is_recursion_limit_exceeded(r)) {
+ rnew->status = HTTP_INTERNAL_SERVER_ERROR;
+ return rnew;
+ }
+
if ((res = ap_process_request_internal(rnew))) {
rnew->status = res;
}
rnew->uri = apr_pstrdup(rnew->pool, "");
}
+ /* We cannot return NULL without violating the API. So just turn this
+ * subrequest into a 500. */
+ if (ap_is_recursion_limit_exceeded(r)) {
+ rnew->status = HTTP_INTERNAL_SERVER_ERROR;
+ return rnew;
+ }
+
if ((res = ap_process_request_internal(rnew))) {
rnew->status = res;
}