-*- coding: utf-8 -*-
Changes with Apache 2.2.1
+ *) mod_proxy: If we get an error reading the upstream response,
+ close the connection. [Justin Erenkrantz, Roy T. Fielding,
+ Jim Jagielski, Ruediger Pluem]
+
*) mod_proxy_ajp: Support common headers of the AJP protocol in responses.
PR 38340. [Aleksey Pesternikov <apesternikov yahoo.com>]
+1: rpluem, niq, jerenkrantz
NOTE: this also supersedes previous fix to PR#37790
- * mod_proxy: Correctly error out if we get an error from upstream
- response. Otherwise, we'll leave the client in a bad state.
- http://svn.apache.org/viewcvs.cgi?rev=354628&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=354636&view=rev (comment only delta)
- Message-ID: <4395A056.2070000@web.turner.com>
- +1: jerenkrantz, jim, mturk
- -1: roy
- rpluem says: The patch mentioned above had been vetoed by Roy on the list
- (http://mail-archives.apache.org/mod_mbox/httpd-dev/200512.mbox/ajax/%3cA1E95672-E36F-40FB-B906-41A447D72504@gbiv.com%3e)
- Several changes have been done to this patch to fix the
- problem and to address Roys veto (see also
- http://mail-archives.apache.org/mod_mbox/httpd-dev/200601.mbox/%3c43BF83EB.8090703@apache.org%3e)
- The patch now consists of the following revisions on
- trunk:
-
- http://svn.apache.org/viewcvs.cgi?rev=354628&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=354636&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=357461&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=357519&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=358022&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=365374&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=366181&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=366554&view=rev
- http://svn.apache.org/viewcvs.cgi?rev=366558&view=rev
-
- A consolidated version of these revisions can be found at
- http://people.apache.org/~rpluem/patches/partial_2.2.diff
-
- As it is fundamentally different from the first approach I
- would suggest to restart the voting.
-
- http://people.apache.org/~rpluem/patches/partial_2.2.diff
- +1: jim, rpluem, jerenkrantz
-
PATCHES PROPOSED TO BACKPORT FROM TRUNK:
* mod_dbd: When threaded, create a private pool in child_init
* sanity checks.
*/
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
- if (r->connection->aborted) {
+ if (r->connection->aborted || r->no_cache) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
"disk_cache: Discarding body for URL %s "
"because connection has been aborted.",
#include "mod_core.h"
+/*
+ * A pointer to this is used to memorize in the filter context that a bad
+ * gateway error bucket had been seen. It is used as an invented unique pointer.
+ */
+static char bad_gateway_seen;
+
apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
#define ASCII_CRLF "\015\012"
eos = e;
break;
}
+ if (AP_BUCKET_IS_ERROR(e)
+ && (((ap_bucket_error *)(e->data))->status
+ == HTTP_BAD_GATEWAY)) {
+ /*
+ * We had a broken backend. Memorize this in the filter
+ * context.
+ */
+ f->ctx = &bad_gateway_seen;
+ continue;
+ }
if (APR_BUCKET_IS_FLUSH(e)) {
flush = e;
more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
* 2) the trailer
* 3) the end-of-chunked body CRLF
*
- * If there is no EOS bucket, then do nothing.
+ * We only do this if we have not seen an error bucket with
+ * status HTTP_BAD_GATEWAY. We have memorized an
+ * error bucket that we had seen in the filter context.
+ * The error bucket with status HTTP_BAD_GATEWAY indicates that the
+ * connection to the backend (mod_proxy) broke in the middle of the
+ * response. In order to signal the client that something went wrong
+ * we do not create the last-chunk marker and set c->keepalive to
+ * AP_CONN_CLOSE in the core output filter.
*
* XXX: it would be nice to combine this with the end-of-chunk
* marker above, but this is a bit more straight-forward for
* now.
*/
- if (eos != NULL) {
+ if (eos && !f->ctx) {
/* XXX: (2) trailers ... does not yet exist */
e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
/* <trailers> */
AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
static const char *set_keep_alive_timeout(cmd_parms *cmd, void *dummy,
NULL, r, r->connection);
ap_add_output_filter_handle(ap_http_header_filter_handle,
NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_outerror_filter_handle,
+ NULL, r, r->connection);
}
return OK;
ap_chunk_filter_handle =
ap_register_output_filter("CHUNK", ap_http_chunk_filter,
NULL, AP_FTYPE_TRANSCODE);
+ ap_http_outerror_filter_handle =
+ ap_register_output_filter("HTTP_OUTERROR", ap_http_outerror_filter,
+ NULL, AP_FTYPE_PROTOCOL);
ap_byterange_filter_handle =
ap_register_output_filter("BYTERANGE", ap_byterange_filter,
NULL, AP_FTYPE_PROTOCOL);
return bufsiz;
}
+/* Filter to handle any error buckets on output */
+apr_status_t ap_http_outerror_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ apr_bucket *e;
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e)) {
+ /*
+ * Start of error handling state tree. Just one condition
+ * right now :)
+ */
+ if (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY) {
+ /* stream aborted and we have not ended it yet */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, b);
+}
+
extern AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
extern AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
extern AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+extern AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
extern AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
/*
/* HTTP/1.1 chunked transfer encoding filter. */
apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b);
+/* Filter to handle any error buckets on output */
+apr_status_t ap_http_outerror_filter(ap_filter_t *f,
+ apr_bucket_brigade *b);
+
char *ap_response_code_string(request_rec *r, int error_index);
/**
PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
proxy_conn_rec *conn,
conn_rec *c, server_rec *s);
+/**
+ * Signal the upstream chain that the connection to the backend broke in the
+ * middle of the response. This is done by sending an error bucket with
+ * status HTTP_BAD_GATEWAY and an EOS bucket up the filter chain.
+ * @param r current request record of client request
+ * @param brigade The brigade that is sent through the output filter chain
+ */
+PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
+ apr_bucket_brigade *brigade);
/* Scoreboard */
#if MODULE_MAGIC_NUMBER_MAJOR > 20020903
int havebody = 1;
int isok = 1;
apr_off_t bb_len;
+ int data_sent = 0;
+ int rv = 0;
#ifdef FLUSHING_BANDAID
apr_int32_t conn_poll_fd;
apr_pollfd_t *conn_poll;
"proxy: error processing body");
isok = 0;
}
+ data_sent = 1;
apr_brigade_cleanup(output_brigade);
}
else {
"proxy: error processing body");
isok = 0;
}
+ data_sent = 1;
break;
default:
isok = 0;
}
apr_brigade_destroy(input_brigade);
- apr_brigade_destroy(output_brigade);
+ /*
+ * Clear output_brigade to remove possible buckets that remained there
+ * after an error.
+ */
+ apr_brigade_cleanup(output_brigade);
if (status != APR_SUCCESS) {
/* We had a failure: Close connection to backend */
"proxy: send body failed to %pI (%s)",
conn->worker->cp->addr,
conn->worker->hostname);
- return HTTP_SERVICE_UNAVAILABLE;
+ /*
+ * If we already send data, signal a broken backend connection
+ * upwards in the chain.
+ */
+ if (data_sent) {
+ ap_proxy_backend_broke(r, output_brigade);
+ /* Return DONE to avoid error messages being added to the stream */
+ rv = DONE;
+ } else
+ rv = HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ /*
+ * Ensure that we sent an EOS bucket thru the filter chain, if we already
+ * have sent some data. Maybe ap_proxy_backend_broke was called and added
+ * one to the brigade already (no longer making it empty). So we should
+ * not do this in this case.
+ */
+ if (data_sent && !r->eos_sent && APR_BRIGADE_EMPTY(output_brigade)) {
+ e = apr_bucket_eos_create(r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(output_brigade, e);
}
+ /* If we have added something to the brigade above, sent it */
+ if (!APR_BRIGADE_EMPTY(output_brigade))
+ ap_pass_brigade(r->output_filters, output_brigade);
+
+ apr_brigade_destroy(output_brigade);
+
+ if (rv)
+ return rv;
+
/* Nice we have answer to send to the client */
if (result == CMD_AJP13_END_RESPONSE && isok) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
* are being read. */
int pread_len = 0;
apr_table_t *save_table;
+ int backend_broke = 0;
bb = apr_brigade_create(p, c->bucket_alloc);
break;
}
else if (rv != APR_SUCCESS) {
+ /* In this case, we are in real trouble because
+ * our backend bailed on us. Pass along a 502 error
+ * error bucket
+ */
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c,
"proxy: error reading response");
+ ap_proxy_backend_broke(r, bb);
+ ap_pass_brigade(r->output_filters, bb);
+ backend_broke = 1;
+ backend->close = 1;
break;
}
/* next time try a non-blocking read */
}
} while (interim_response);
+ /* If our connection with the client is to be aborted, return DONE. */
+ if (c->aborted || backend_broke) {
+ return DONE;
+ }
+
if (conf->error_override) {
/* the code above this checks for 'OK' which is what the hook expects */
if (ap_is_HTTP_SUCCESS(r->status))
lb_workers_limit = proxy_lb_workers + PROXY_DYNAMIC_BALANCER_LIMIT;
return lb_workers_limit;
}
+
+PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
+ apr_bucket_brigade *brigade)
+{
+ apr_bucket *e;
+ conn_rec *c = r->connection;
+
+ r->no_cache = 1;
+ /*
+ * If this is a subrequest, then prevent also caching of the main
+ * request.
+ */
+ if (r->main)
+ r->main->no_cache = 1;
+ e = ap_bucket_error_create(HTTP_BAD_GATEWAY, NULL, c->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(brigade, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(brigade, e);
+}