]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
Backport to v2.4:
authorGraham Leggett <minfrin@apache.org>
Sun, 17 Jan 2021 16:21:35 +0000 (16:21 +0000)
committerGraham Leggett <minfrin@apache.org>
Sun, 17 Jan 2021 16:21:35 +0000 (16:21 +0000)
  *) mod_proxy_http: handle upgrade/tunneling protocols. BZ 61616 is about
                     mod_proxy_connect but there has been wstunnel reports
                     on dev@ about that too lately.
     trunk patch: https://svn.apache.org/r1678771
                  https://svn.apache.org/r1832348
                  https://svn.apache.org/r1869338
                  https://svn.apache.org/r1869420
                  https://svn.apache.org/r1878367
                  https://svn.apache.org/r1877557
                  https://svn.apache.org/r1877558
                  https://svn.apache.org/r1877646
                  https://svn.apache.org/r1877695
                  https://svn.apache.org/r1879401
                  https://svn.apache.org/r1879402
                  https://svn.apache.org/r1880200
                  https://svn.apache.org/r1885239
                  https://svn.apache.org/r1885240
                  https://svn.apache.org/r1885244
     2.4.x patch: http://people.apache.org/~ylavic/patches/2.4.x-mod_proxy_http-upgrade-4on5-v2.patch
                  https://github.com/apache/httpd/pull/158
     +1: ylavic, covener, minfrin
     ylavic: All the corresponding trunk changes to mod_proxy_wstunnel (but
             r1885239) have been dropped for this backport proposal, the goal
             being to handle upgrade in mod_proxy_http from now, while r1885239
             allows to benefit from the Upgrade improvements done in proxy_http
             with existing wstunnel configurations (provided mod_proxy_http
             module is loaded).

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1885605 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
STATUS
include/ap_mmn.h
modules/proxy/mod_proxy.c
modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_connect.c
modules/proxy/mod_proxy_http.c
modules/proxy/mod_proxy_wstunnel.c
modules/proxy/proxy_util.c

diff --git a/CHANGES b/CHANGES
index f2d9638c784012eef0f8843988e82bdff6750e95..cf75068c73ea503bc844f2837a9c2bcf038b9f2c 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,19 @@
                                                          -*- coding: utf-8 -*-
 Changes with Apache 2.4.47
 
+  *) mod_proxy: Put mod_proxy_{connect,wstunnel} tunneling code in common in
+     proxy_util.  [Yann Ylavic]
+
+  *) mod_proxy: Improve tunneling loop to support half closed connections and
+     pending data draining (for protocols like rsync). PR 61616. [Yann Ylavic]
+
+  *) mod_proxy_http: handle Upgrade request, 101 (Switching Protocol) response
+     and switched protocol forwarding.  [Yann Ylavic]
+
+  *) mod_proxy_wstunnel: Leave Upgrade requests handling to mod_proxy_http,
+     allowing for (non-)Upgrade negotiation with the origin server.
+     [Yann Ylavic]
+
   *) mod_proxy: Allow ProxyErrorOverride to be restricted to specific status 
      codes.  PR63628. [Martin Drößler <mail martindroessler.de>]
 
diff --git a/STATUS b/STATUS
index 10f31a8a363285eb9296f0e712d5eaf988f05218..67d7d35afd8b443c54efceb28bda5b48f9d7a1a3 100644 (file)
--- a/STATUS
+++ b/STATUS
@@ -138,34 +138,6 @@ RELEASE SHOWSTOPPERS:
 PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
   [ start all new proposals below, under PATCHES PROPOSED. ]
 
-  *) mod_proxy_http: handle upgrade/tunneling protocols. BZ 61616 is about
-                     mod_proxy_connect but there has been wstunnel reports
-                     on dev@ about that too lately.
-     trunk patch: https://svn.apache.org/r1678771
-                  https://svn.apache.org/r1832348
-                  https://svn.apache.org/r1869338
-                  https://svn.apache.org/r1869420
-                  https://svn.apache.org/r1878367
-                  https://svn.apache.org/r1877557
-                  https://svn.apache.org/r1877558
-                  https://svn.apache.org/r1877646
-                  https://svn.apache.org/r1877695
-                  https://svn.apache.org/r1879401
-                  https://svn.apache.org/r1879402
-                  https://svn.apache.org/r1880200
-                  https://svn.apache.org/r1885239
-                  https://svn.apache.org/r1885240
-                  https://svn.apache.org/r1885244
-     2.4.x patch: http://people.apache.org/~ylavic/patches/2.4.x-mod_proxy_http-upgrade-4on5-v2.patch
-                  https://github.com/apache/httpd/pull/158
-     +1: ylavic, covener, minfrin
-     ylavic: All the corresponding trunk changes to mod_proxy_wstunnel (but
-             r1885239) have been dropped for this backport proposal, the goal
-             being to handle upgrade in mod_proxy_http from now, while r1885239
-             allows to benefit from the Upgrade improvements done in proxy_http
-             with existing wstunnel configurations (provided mod_proxy_http
-             module is loaded).
-
 
 
 PATCHES PROPOSED TO BACKPORT FROM TRUNK:
index f207b788c9ec6c4c683ee245015773c98f8ea242..7cef0379eacf7e4d07b523ff977f9f98feed550b 100644 (file)
  *                          flush_max_threshold and flush_max_pipelined to
  *                          core_server_config, and ap_get_read_buf_size().
  * 20120211.98 (2.4.47-dev) Add ap_proxy_should_override to mod_proxy.h
+ * 20120211.99 (2.4.47-dev) Add proxy_tunnel_rec, ap_proxy_tunnel_create()
+ *                          and ap_proxy_tunnel_run() to proxy_util.
+ * 20120211.99 (2.4.47-dev) Add ap_proxy_worker_can_upgrade()
  */
 
 #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
 #ifndef MODULE_MAGIC_NUMBER_MAJOR
 #define MODULE_MAGIC_NUMBER_MAJOR 20120211
 #endif
-#define MODULE_MAGIC_NUMBER_MINOR 98                  /* 0...n */
+#define MODULE_MAGIC_NUMBER_MINOR 99                  /* 0...n */
 
 /**
  * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
index 4f6300fbf0851fba4f95fd94780d1c488d4b56e4..f9e3902e1a7f7ea8775bc94fd8dfbeba4a50d3f5 100644 (file)
@@ -314,7 +314,8 @@ static const char *set_worker_param(apr_pool_t *p,
         }
     }
     else if (!strcasecmp(key, "upgrade")) {
-        if (PROXY_STRNCPY(worker->s->upgrade, val) != APR_SUCCESS) {
+        if (PROXY_STRNCPY(worker->s->upgrade,
+                          strcasecmp(val, "ANY") ? val : "*") != APR_SUCCESS) {
             return apr_psprintf(p, "upgrade protocol length must be < %d characters",
                                 (int)sizeof(worker->s->upgrade));
         }
index 76d10d974ff7d934f97e280710a927266325a0ec..c9a10ea54ec7bdc4b322a0274a176e1ecd79d625 100644 (file)
@@ -728,6 +728,19 @@ typedef __declspec(dllimport) const char *
 PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p,
                                            proxy_worker *worker);
 
+/**
+ * Return whether a worker upgrade configuration matches Upgrade header
+ * @param p       memory pool used for displaying worker name
+ * @param worker  the worker
+ * @param upgrade the Upgrade header to match
+ * @param dflt    default protocol (NULL for none)
+ * @return        1 (true) or 0 (false)
+ */
+PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p,
+                                               const proxy_worker *worker,
+                                               const char *upgrade,
+                                               const char *dflt);
+
 /**
  * Get the worker from proxy configuration
  * @param p        memory pool used for finding worker
@@ -1203,6 +1216,40 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
                                          conn_rec *origin, apr_bucket_brigade *bb,
                                          int flush);
 
+struct proxy_tunnel_conn; /* opaque */
+typedef struct {
+    request_rec *r;
+    const char *scheme;
+    apr_pollset_t *pollset;
+    apr_array_header_t *pfds;
+    apr_interval_time_t timeout;
+    struct proxy_tunnel_conn *client,
+                             *origin;
+    apr_size_t read_buf_size;
+    int replied;
+} proxy_tunnel_rec;
+
+/**
+ * Create a tunnel, to be activated by ap_proxy_tunnel_run().
+ * @param tunnel   tunnel created
+ * @param r        client request
+ * @param c_o      connection to origin
+ * @param scheme   caller proxy scheme (connect, ws(s), http(s), ...)
+ * @return         APR_SUCCESS or error status
+ */
+PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **tunnel,
+                                                   request_rec *r, conn_rec *c_o,
+                                                   const char *scheme);
+
+/**
+ * Forward anything from either side of the tunnel to the other,
+ * until one end aborts or a polling timeout/error occurs.
+ * @param tunnel  tunnel to run
+ * @return        OK if completion is full, HTTP_GATEWAY_TIME_OUT on timeout
+ *                or another HTTP_ error otherwise.
+ */
+PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel);
+
 /**
  * Clear the headers referenced by the Connection header from the given
  * table, and remove the Connection header.
@@ -1286,6 +1333,15 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
                                                       apr_bucket_brigade *from,
                                                       apr_bucket_brigade *to);
 
+/* 
+ * The flags for ap_proxy_transfer_between_connections(), where for legacy and
+ * compatibility reasons FLUSH_EACH and FLUSH_AFTER are boolean values.
+ */
+#define AP_PROXY_TRANSFER_FLUSH_EACH        (0x00)
+#define AP_PROXY_TRANSFER_FLUSH_AFTER       (0x01)
+#define AP_PROXY_TRANSFER_YIELD_PENDING     (0x02)
+#define AP_PROXY_TRANSFER_YIELD_MAX_READS   (0x04)
+
 /*
  * Sends all data that can be read non blocking from the input filter chain of
  * c_i and send it down the output filter chain of c_o. For reading it uses
@@ -1303,10 +1359,12 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
  * @param name  string for logging from where data was pulled
  * @param sent  if not NULL will be set to 1 if data was sent through c_o
  * @param bsize maximum amount of data pulled in one iteration from c_i
- * @param after if set flush data on c_o only once after the loop
+ * @param flags AP_PROXY_TRANSFER_* bitmask
  * @return      apr_status_t of the operation. Could be any error returned from
  *              either the input filter chain of c_i or the output filter chain
- *              of c_o. APR_EPIPE if the outgoing connection was aborted.
+ *              of c_o, APR_EPIPE if the outgoing connection was aborted, or
+ *              APR_INCOMPLETE if AP_PROXY_TRANSFER_YIELD_PENDING was set and
+ *              the output stack gets full before the input stack is exhausted.
  */
 PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
                                                        request_rec *r,
@@ -1317,7 +1375,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
                                                        const char *name,
                                                        int *sent,
                                                        apr_off_t bsize,
-                                                       int after);
+                                                       int flags);
 
 extern module PROXY_DECLARE_DATA proxy_module;
 
index 84536ae51c7cd726f89aa62c9b8deb412dbe3eb4..eb6dd7e166d95314c090d061bf35bb01382caefa 100644 (file)
@@ -156,25 +156,19 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
     apr_socket_t *sock;
     conn_rec *c = r->connection;
     conn_rec *backconn;
-    int done = 0;
 
-    apr_bucket_brigade *bb_front;
-    apr_bucket_brigade *bb_back;
     apr_status_t rv;
     apr_size_t nbytes;
     char buffer[HUGE_STRING_LEN];
-    apr_socket_t *client_socket = ap_get_conn_socket(c);
+
+    apr_bucket_brigade *bb;
+    proxy_tunnel_rec *tunnel;
     int failed, rc;
-    apr_pollset_t *pollset;
-    apr_pollfd_t pollfd;
-    const apr_pollfd_t *signalled;
-    apr_int32_t pollcnt, pi;
-    apr_int16_t pollevent;
-    apr_sockaddr_t *nexthop;
 
     apr_uri_t uri;
     const char *connectname;
     apr_port_t connectport = 0;
+    apr_sockaddr_t *nexthop;
 
     /* is this for us? */
     if (r->method_number != M_CONNECT) {
@@ -261,28 +255,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
         }
     }
 
-    /* setup polling for connection */
-    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
-
-    if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) {
-        apr_socket_close(sock);
-        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01020)
-                      "error apr_pollset_create()");
-        return HTTP_INTERNAL_SERVER_ERROR;
-    }
-
-    /* Add client side to the poll */
-    pollfd.p = r->pool;
-    pollfd.desc_type = APR_POLL_SOCKET;
-    pollfd.reqevents = APR_POLLIN | APR_POLLHUP;
-    pollfd.desc.s = client_socket;
-    pollfd.client_data = NULL;
-    apr_pollset_add(pollset, &pollfd);
-
-    /* Add the server side to the poll */
-    pollfd.desc.s = sock;
-    apr_pollset_add(pollset, &pollfd);
-
     /*
      * Step Three: Send the Request
      *
@@ -305,6 +277,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
         backconn->aborted = 1;
         ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01022)
                       "pre_connection setup failed (%d)", rc);
+        apr_socket_close(sock);
         return HTTP_INTERNAL_SERVER_ERROR;
     }
 
@@ -314,9 +287,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
     apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
                    backconn->local_addr->port));
 
-
-    bb_front = apr_brigade_create(p, c->bucket_alloc);
-    bb_back = apr_brigade_create(p, backconn->bucket_alloc);
+    bb = apr_brigade_create(p, c->bucket_alloc);
 
     /* If we are connecting through a remote proxy, we need to pass
      * the CONNECT request on to it.
@@ -326,24 +297,24 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
      */
         ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                       "sending the CONNECT request to the remote proxy");
-        ap_fprintf(backconn->output_filters, bb_back,
+        ap_fprintf(backconn->output_filters, bb,
                    "CONNECT %s HTTP/1.0" CRLF, r->uri);
-        ap_fprintf(backconn->output_filters, bb_back,
+        ap_fprintf(backconn->output_filters, bb,
                    "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner());
-        ap_fflush(backconn->output_filters, bb_back);
+        ap_fflush(backconn->output_filters, bb);
     }
     else {
         ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Returning 200 OK");
         nbytes = apr_snprintf(buffer, sizeof(buffer),
                               "HTTP/1.0 200 Connection Established" CRLF);
         ap_xlate_proto_to_ascii(buffer, nbytes);
-        ap_fwrite(c->output_filters, bb_front, buffer, nbytes);
+        ap_fwrite(c->output_filters, bb, buffer, nbytes);
         nbytes = apr_snprintf(buffer, sizeof(buffer),
                               "Proxy-agent: %s" CRLF CRLF,
                               ap_get_server_banner());
         ap_xlate_proto_to_ascii(buffer, nbytes);
-        ap_fwrite(c->output_filters, bb_front, buffer, nbytes);
-        ap_fflush(c->output_filters, bb_front);
+        ap_fwrite(c->output_filters, bb, buffer, nbytes);
+        ap_fflush(c->output_filters, bb);
 #if 0
         /* This is safer code, but it doesn't work yet.  I'm leaving it
          * here so that I can fix it later.
@@ -354,8 +325,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
         ap_rflush(r);
 #endif
     }
-
-    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
+    apr_brigade_cleanup(bb);
 
     /*
      * Step Four: Handle Data Transfer
@@ -363,88 +333,30 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
      * Handle two way transfer of data over the socket (this is a tunnel).
      */
 
-    /* we are now acting as a tunnel - the input/output filter stacks should
-     * not contain any non-connection filters.
-     */
-    r->output_filters = c->output_filters;
-    r->proto_output_filters = c->output_filters;
-    r->input_filters = c->input_filters;
-    r->proto_input_filters = c->input_filters;
-/*    r->sent_bodyct = 1;*/
-
-    do { /* Loop until done (one side closes the connection, or an error) */
-        rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled);
-        if (rv != APR_SUCCESS) {
-            if (APR_STATUS_IS_EINTR(rv)) {
-                continue;
-            }
-            apr_socket_close(sock);
-            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01023) "error apr_poll()");
-            return HTTP_INTERNAL_SERVER_ERROR;
-        }
-#ifdef DEBUGGING
-        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01024)
-                      "woke from poll(), i=%d", pollcnt);
-#endif
-
-        for (pi = 0; pi < pollcnt; pi++) {
-            const apr_pollfd_t *cur = &signalled[pi];
+    /* r->sent_bodyct = 1; */
 
-            if (cur->desc.s == sock) {
-                pollevent = cur->rtnevents;
-                if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
-#ifdef DEBUGGING
-                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01025)
-                                  "sock was readable");
-#endif
-                    done |= ap_proxy_transfer_between_connections(r, backconn,
-                                                                  c, bb_back,
-                                                                  bb_front,
-                                                                  "sock", NULL,
-                                                                  CONN_BLKSZ, 1)
-                                                                 != APR_SUCCESS;
-                }
-                else if (pollevent & APR_POLLERR) {
-                    ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(01026)
-                                  "err on backconn");
-                    backconn->aborted = 1;
-                    done = 1;
-                }
-            }
-            else if (cur->desc.s == client_socket) {
-                pollevent = cur->rtnevents;
-                if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
-#ifdef DEBUGGING
-                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01027)
-                                  "client was readable");
-#endif
-                    done |= ap_proxy_transfer_between_connections(r, c,
-                                                                  backconn,
-                                                                  bb_front,
-                                                                  bb_back,
-                                                                  "client",
-                                                                  NULL,
-                                                                  CONN_BLKSZ, 1)
-                                                                 != APR_SUCCESS;
-                }
-                else if (pollevent & APR_POLLERR) {
-                    ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02827)
-                                  "err on client");
-                    c->aborted = 1;
-                    done = 1;
-                }
-            }
-            else {
-                ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01028)
-                              "unknown socket in pollset");
-                done = 1;
-            }
+    rv = ap_proxy_tunnel_create(&tunnel, r, backconn, "CONNECT");
+    if (rv != APR_SUCCESS) {
+        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10208)
+                      "can't create tunnel for %pI (%s)",
+                      nexthop, connectname);
+        return HTTP_INTERNAL_SERVER_ERROR;
+    }
 
+    rc = ap_proxy_tunnel_run(tunnel);
+    if (ap_is_HTTP_ERROR(rc)) {
+        if (rc == HTTP_GATEWAY_TIME_OUT) {
+            /* ap_proxy_tunnel_run() didn't log this */
+            ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10224)
+                          "tunnel timed out");
         }
-    } while (!done);
-
-    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                  "finished with poll() - cleaning up");
+        /* Don't send an error page if we sent data already */
+        if (proxyport && !tunnel->replied) {
+            return rc;
+        }
+        /* Custom log may need this, still */
+        r->status = rc;
+    }
 
     /*
      * Step Five: Clean Up
@@ -457,8 +369,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
     else
         ap_lingering_close(backconn);
 
-    c->keepalive = AP_CONN_CLOSE;
-
     return OK;
 }
 
index c6e2b7db034596ebf6dd5a9b682864001d9bcf77..a020438f2d923864e8af2b3467f7cb484032ed3d 100644 (file)
@@ -31,36 +31,71 @@ static apr_status_t ap_proxy_http_cleanup(const char *scheme,
 static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n,
                                     request_rec *r, int flags, int *read);
 
+static const char *get_url_scheme(const char **url, int *is_ssl)
+{
+    const char *u = *url;
+
+    switch (u[0]) {
+    case 'h':
+    case 'H':
+        if (strncasecmp(u + 1, "ttp", 3) == 0) {
+            if (u[4] == ':') {
+                *is_ssl = 0;
+                *url = u + 5;
+                return "http";
+            }
+            if (apr_tolower(u[4]) == 's' && u[5] == ':') {
+                *is_ssl = 1;
+                *url = u + 6;
+                return "https";
+            }
+        }
+        break;
+
+    case 'w':
+    case 'W':
+        if (apr_tolower(u[1]) == 's') {
+            if (u[2] == ':') {
+                *is_ssl = 0;
+                *url = u + 3;
+                return "ws";
+            }
+            if (apr_tolower(u[2]) == 's' && u[3] == ':') {
+                *is_ssl = 1;
+                *url = u + 4;
+                return "wss";
+            }
+        }
+        break;
+    }
+
+    *is_ssl = 0;
+    return NULL;
+}
+
 /*
  * Canonicalise http-like URLs.
  *  scheme is the scheme for the URL
  *  url    is the URL starting with the first '/'
- *  def_port is the default port for this scheme.
  */
 static int proxy_http_canon(request_rec *r, char *url)
 {
+    const char *base_url = url;
     char *host, *path, sport[7];
     char *search = NULL;
     const char *err;
     const char *scheme;
     apr_port_t port, def_port;
+    int is_ssl = 0;
 
-    /* ap_port_of_scheme() */
-    if (strncasecmp(url, "http:", 5) == 0) {
-        url += 5;
-        scheme = "http";
-    }
-    else if (strncasecmp(url, "https:", 6) == 0) {
-        url += 6;
-        scheme = "https";
-    }
-    else {
+    scheme = get_url_scheme((const char **)&url, &is_ssl);
+    if (!scheme) {
         return DECLINED;
     }
-    port = def_port = ap_proxy_port_of_scheme(scheme);
+    port = def_port = (is_ssl) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT;
 
     ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
-                  "HTTP: canonicalising URL %s", url);
+                  "HTTP: canonicalising URL %s", base_url);
 
     /* do syntatic check.
      * We break the URL into host, port, path, search
@@ -68,7 +103,7 @@ static int proxy_http_canon(request_rec *r, char *url)
     err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
     if (err) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01083)
-                      "error parsing URL %s: %s", url, err);
+                      "error parsing URL %s: %s", base_url, err);
         return HTTP_BAD_REQUEST;
     }
 
@@ -108,8 +143,9 @@ static int proxy_http_canon(request_rec *r, char *url)
     if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
         host = apr_pstrcat(r->pool, "[", host, "]", NULL);
     }
+
     r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
-            "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+                              "/", path, (search) ? "?" : "", search, NULL);
     return OK;
 }
 
@@ -223,17 +259,6 @@ static void add_cl(apr_pool_t *p,
 #define ZERO_ASCII  "\060"
 #endif
 
-static void terminate_headers(apr_bucket_alloc_t *bucket_alloc,
-                              apr_bucket_brigade *header_brigade)
-{
-    apr_bucket *e;
-
-    /* add empty line at the end of the headers */
-    e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
-    APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-}
-
-
 #define MAX_MEM_SPOOL 16384
 
 typedef enum {
@@ -246,6 +271,7 @@ typedef enum {
 typedef struct {
     apr_pool_t *p;
     request_rec *r;
+    const char *proto;
     proxy_worker *worker;
     proxy_server_conf *sconf;
 
@@ -261,8 +287,11 @@ typedef struct {
 
     rb_methods rb_method;
 
-    unsigned int do_100_continue:1,
-                 prefetch_nonblocking:1;
+    const char *upgrade;
+
+    unsigned int do_100_continue        :1,
+                 prefetch_nonblocking   :1,
+                 force10                :1;
 } proxy_http_req_t;
 
 /* Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
@@ -377,20 +406,26 @@ static int stream_reqbody(proxy_http_req_t *req)
                 }
             }
             else if (rb_method == RB_STREAM_CL
-                     && bytes_streamed > req->cl_val) {
-                /* C-L < bytes streamed?!?
-                 * We will error out after the body is completely
-                 * consumed, but we can't stream more bytes at the
-                 * back end since they would in part be interpreted
-                 * as another request!  If nothing is sent, then
-                 * just send nothing.
+                     && (bytes_streamed > req->cl_val
+                         || (seen_eos && bytes_streamed < req->cl_val))) {
+                /* C-L != bytes streamed?!?
+                 *
+                 * Prevent HTTP Request/Response Splitting.
+                 *
+                 * We can't stream more (or less) bytes at the back end since
+                 * they could be interpreted in separate requests (more bytes
+                 * now would start a new request, less bytes would make the
+                 * first bytes of the next request be part of the current one).
                  *
-                 * Prevents HTTP Response Splitting.
+                 * It can't happen from the client connection here thanks to
+                 * ap_http_filter(), but some module's filter may be playing
+                 * bad games, hence the HTTP_INTERNAL_SERVER_ERROR.
                  */
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086)
-                              "read more bytes of request body than expected "
+                              "read %s bytes of request body than expected "
                               "(got %" APR_OFF_T_FMT ", expected "
                               "%" APR_OFF_T_FMT ")",
+                              bytes_streamed > req->cl_val ? "more" : "less",
                               bytes_streamed, req->cl_val);
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
@@ -416,13 +451,6 @@ static int stream_reqbody(proxy_http_req_t *req)
         }
     } while (!seen_eos);
 
-    if (rb_method == RB_STREAM_CL && bytes_streamed != req->cl_val) {
-        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087)
-                      "client %s given Content-Length did not match"
-                      " number of body bytes read", r->connection->client_ip);
-        return HTTP_BAD_REQUEST;
-    }
-
     return OK;
 }
 
@@ -558,6 +586,43 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
     return OK;
 }
 
+static void terminate_headers(proxy_http_req_t *req)
+{
+    apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+    apr_bucket *e;
+    char *buf;
+
+    /*
+     * Handle Connection: header if we do HTTP/1.1 request:
+     * If we plan to close the backend connection sent Connection: close
+     * otherwise sent Connection: Keep-Alive.
+     */
+    if (!req->force10) {
+        if (req->upgrade) {
+            buf = apr_pstrdup(req->p, "Connection: Upgrade" CRLF);
+            ap_xlate_proto_to_ascii(buf, strlen(buf));
+            e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc);
+            APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
+
+            /* Tell the backend that it can upgrade the connection. */
+            buf = apr_pstrcat(req->p, "Upgrade: ", req->upgrade, CRLF, NULL);
+        }
+        else if (ap_proxy_connection_reusable(req->backend)) {
+            buf = apr_pstrdup(req->p, "Connection: Keep-Alive" CRLF);
+        }
+        else {
+            buf = apr_pstrdup(req->p, "Connection: close" CRLF);
+        }
+        ap_xlate_proto_to_ascii(buf, strlen(buf));
+        e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc);
+        APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
+    }
+
+    /* add empty line at the end of the headers */
+    e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
+    APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
+}
+
 static int ap_proxy_http_prefetch(proxy_http_req_t *req,
                                   apr_uri_t *uri, char *url)
 {
@@ -570,21 +635,14 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
     apr_bucket_brigade *input_brigade = req->input_brigade;
     apr_bucket_brigade *temp_brigade;
     apr_bucket *e;
-    char *buf;
     apr_status_t status;
     apr_off_t bytes_read = 0;
     apr_off_t bytes;
-    int force10, rv;
+    int rv;
     apr_read_type_e block;
-    conn_rec *origin = p_conn->connection;
 
-    if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
-        if (r->expecting_100) {
-            return HTTP_EXPECTATION_FAILED;
-        }
-        force10 = 1;
-    } else {
-        force10 = 0;
+    if (req->force10 && r->expecting_100) {
+        return HTTP_EXPECTATION_FAILED;
     }
 
     rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn,
@@ -636,7 +694,6 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
                       "chunked body with Content-Length (C-L ignored)",
                       c->client_ip, c->remote_host ? c->remote_host: "");
         req->old_cl_val = NULL;
-        origin->keepalive = AP_CONN_CLOSE;
         p_conn->close = 1;
     }
 
@@ -756,7 +813,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
         req->rb_method = RB_STREAM_CL;
     }
     else if (req->old_te_val) {
-        if (force10
+        if (req->force10
              || (apr_table_get(r->subprocess_env, "proxy-sendcl")
                   && !apr_table_get(r->subprocess_env, "proxy-sendchunks")
                   && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) {
@@ -776,7 +833,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
             }
             req->rb_method = RB_STREAM_CL;
         }
-        else if (!force10
+        else if (!req->force10
                   && (apr_table_get(r->subprocess_env, "proxy-sendchunks")
                       || apr_table_get(r->subprocess_env, "proxy-sendchunked"))
                   && !apr_table_get(r->subprocess_env, "proxy-sendcl")) {
@@ -820,23 +877,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
 
 /* Yes I hate gotos.  This is the subrequest shortcut */
 skip_body:
-    /*
-     * Handle Connection: header if we do HTTP/1.1 request:
-     * If we plan to close the backend connection sent Connection: close
-     * otherwise sent Connection: Keep-Alive.
-     */
-    if (!force10) {
-        if (!ap_proxy_connection_reusable(p_conn)) {
-            buf = apr_pstrdup(p, "Connection: close" CRLF);
-        }
-        else {
-            buf = apr_pstrdup(p, "Connection: Keep-Alive" CRLF);
-        }
-        ap_xlate_proto_to_ascii(buf, strlen(buf));
-        e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
-        APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-    }
-    terminate_headers(bucket_alloc, header_brigade);
+    terminate_headers(req);
 
     return OK;
 }
@@ -1147,6 +1188,36 @@ static int add_trailers(void *data, const char *key, const char *val)
     return 1;
 }
 
+static int send_continue_body(proxy_http_req_t *req)
+{
+    int status;
+
+    /* Send the request body (fully). */
+    switch(req->rb_method) {
+    case RB_SPOOL_CL:
+    case RB_STREAM_CL:
+    case RB_STREAM_CHUNKED:
+        status = stream_reqbody(req);
+        break;
+    default:
+        /* Shouldn't happen */
+        status = HTTP_INTERNAL_SERVER_ERROR;
+        break;
+    }
+    if (status != OK) {
+        conn_rec *c = req->r->connection;
+        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req->r,
+                APLOGNO(10154) "pass request body failed "
+                "to %pI (%s) from %s (%s) with status %i",
+                req->backend->addr,
+                req->backend->hostname ? req->backend->hostname : "",
+                c->client_ip, c->remote_host ? c->remote_host : "",
+                status);
+        req->backend->close = 1;
+    }
+    return status;
+}
+
 static
 int ap_proxy_http_process_response(proxy_http_req_t *req)
 {
@@ -1157,6 +1228,7 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
     proxy_conn_rec *backend = req->backend;
     conn_rec *origin = req->origin;
     int do_100_continue = req->do_100_continue;
+    int status;
 
     char *buffer;
     char fixed_buffer[HUGE_STRING_LEN];
@@ -1228,6 +1300,7 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
                    origin->local_addr->port));
     do {
         apr_status_t rc;
+        const char *upgrade = NULL;
         int major = 0, minor = 0;
         int toclose = 0;
 
@@ -1248,7 +1321,8 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
                 apr_table_setn(r->notes, "proxy_timedout", "1");
                 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout");
                 if (do_100_continue) {
-                    return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue");
+                    return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE,
+                                         "Timeout on 100-Continue");
                 }
             }
             /*
@@ -1300,12 +1374,12 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
                 /* Need to return OK to avoid sending an error message */
                 return OK;
             }
-            else if (!c->keepalives) {
-                     ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105)
-                                   "NOT Closing connection to client"
-                                   " although reading from backend server %s:%d"
-                                   " failed.",
-                                   backend->hostname, backend->port);
+            if (!c->keepalives) {
+                ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105)
+                              "NOT Closing connection to client"
+                              " although reading from backend server %s:%d"
+                              " failed.",
+                              backend->hostname, backend->port);
             }
             return ap_proxyerror(r, HTTP_BAD_GATEWAY,
                                  "Error reading from remote server");
@@ -1325,8 +1399,8 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
              */
             if ((major != 1) || (len >= response_field_size - 1)) {
                 return ap_proxyerror(r, HTTP_BAD_GATEWAY,
-                apr_pstrcat(p, "Corrupt status line returned by remote "
-                            "server: ", buffer, NULL));
+                            apr_pstrcat(p, "Corrupt status line returned "
+                                        "by remote server: ", buffer, NULL));
             }
             backasswards = 0;
 
@@ -1423,10 +1497,29 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
              */
             te = apr_table_get(r->headers_out, "Transfer-Encoding");
 
+            upgrade = apr_table_get(r->headers_out, "Upgrade");
+            if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
+                if (!upgrade || !req->upgrade || (strcasecmp(req->upgrade,
+                                                             upgrade) != 0)) {
+                    return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+                                         apr_pstrcat(p, "Unexpected Upgrade: ",
+                                                     upgrade ? upgrade : "n/a",
+                                                     " (expecting ",
+                                                     req->upgrade ? req->upgrade
+                                                                  : "n/a", ")",
+                                                     NULL));
+                }
+                backend->close = 1;
+            }
+
             /* strip connection listed hop-by-hop headers from response */
             toclose = ap_proxy_clear_connection_fn(r, r->headers_out);
             if (toclose) {
                 backend->close = 1;
+                if (toclose < 0) {
+                    return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+                                         "Malformed connection header");
+                }
             }
 
             if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
@@ -1486,6 +1579,8 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
         }
 
         if (ap_is_HTTP_INFO(proxy_status)) {
+            const char *policy = NULL;
+
             /* RFC2616 tells us to forward this.
              *
              * OTOH, an interim response here may mean the backend
@@ -1501,15 +1596,29 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
              * We need to force "r->expecting_100 = 1" for RFC behaviour
              * otherwise ap_send_interim_response() does nothing when
              * the client did not ask for 100-continue.
+             *
+             * 101 Switching Protocol has its own configuration which
+             * shouldn't be interfered by "proxy-interim-response".
              */
-            const char *policy = apr_table_get(r->subprocess_env,
-                                               "proxy-interim-response");
+            if (proxy_status != HTTP_SWITCHING_PROTOCOLS) {
+                policy = apr_table_get(r->subprocess_env,
+                                       "proxy-interim-response");
+            }
             ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                          "HTTP: received interim %d response", r->status);
+                          "HTTP: received interim %d response (policy: %s)",
+                          r->status, policy ? policy : "n/a");
             if (!policy
                     || (!strcasecmp(policy, "RFC")
                         && (proxy_status != HTTP_CONTINUE
                             || (r->expecting_100 = 1)))) {
+                switch (proxy_status) {
+                case HTTP_SWITCHING_PROTOCOLS:
+                    AP_DEBUG_ASSERT(upgrade != NULL);
+                    apr_table_setn(r->headers_out, "Connection", "Upgrade");
+                    apr_table_setn(r->headers_out, "Upgrade",
+                                   apr_pstrdup(p, upgrade));
+                    break;
+                }
                 ap_send_interim_response(r, 1);
             }
             /* FIXME: refine this to be able to specify per-response-status
@@ -1563,30 +1672,8 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
                           major, minor, proxy_status_line);
 
             if (do_send_body) {
-                int status;
-
-                /* Send the request body (fully). */
-                switch(req->rb_method) {
-                case RB_SPOOL_CL:
-                case RB_STREAM_CL:
-                case RB_STREAM_CHUNKED:
-                    status = stream_reqbody(req);
-                    break;
-                default:
-                    /* Shouldn't happen */
-                    status = HTTP_INTERNAL_SERVER_ERROR;
-                    break;
-                }
+                status = send_continue_body(req);
                 if (status != OK) {
-                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
-                            APLOGNO(10154) "pass request body failed "
-                            "to %pI (%s) from %s (%s) with status %i",
-                            backend->addr,
-                            backend->hostname ? backend->hostname : "",
-                            c->client_ip,
-                            c->remote_host ? c->remote_host : "",
-                            status);
-                    backend->close = 1;
                     return status;
                 }
             }
@@ -1607,6 +1694,62 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
             do_100_continue = 0;
         }
 
+        if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
+            apr_status_t rv;
+            proxy_tunnel_rec *tunnel;
+            apr_interval_time_t client_timeout = -1,
+                                backend_timeout = -1;
+
+            /* If we didn't send the full body yet, do it now */
+            if (do_100_continue) {
+                r->expecting_100 = 0;
+                status = send_continue_body(req);
+                if (status != OK) {
+                    return status;
+                }
+            }
+
+            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239)
+                          "HTTP: tunneling protocol %s", upgrade);
+
+            rv = ap_proxy_tunnel_create(&tunnel, r, origin, "HTTP");
+            if (rv != APR_SUCCESS) {
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240)
+                              "can't create tunnel for %s", upgrade);
+                return HTTP_INTERNAL_SERVER_ERROR;
+            }
+
+            /* Set timeout to the lowest configured for client or backend */
+            apr_socket_timeout_get(backend->sock, &backend_timeout);
+            apr_socket_timeout_get(ap_get_conn_socket(c), &client_timeout);
+            if (backend_timeout >= 0 && backend_timeout < client_timeout) {
+                tunnel->timeout = backend_timeout;
+            }
+            else {
+                tunnel->timeout = client_timeout;
+            }
+
+            /* Let proxy tunnel forward everything */
+            status = ap_proxy_tunnel_run(tunnel);
+            if (ap_is_HTTP_ERROR(status)) {
+                /* Tunnel always return HTTP_GATEWAY_TIME_OUT on timeout,
+                 * but we can differentiate between client and backend here.
+                 */
+                if (status == HTTP_GATEWAY_TIME_OUT
+                        && tunnel->timeout == client_timeout) {
+                    status = HTTP_REQUEST_TIME_OUT;
+                }
+            }
+            else {
+                /* Update r->status for custom log */
+                status = HTTP_SWITCHING_PROTOCOLS;
+            }
+            r->status = status;
+
+            /* We are done with both connections */
+            return DONE;
+        }
+
         if (interim_response) {
             /* Already forwarded above, read next response */
             continue;
@@ -1636,7 +1779,7 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
              */
             r->status = HTTP_OK;
             /* Discard body, if one is expected */
-        if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) {
+            if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) {
                 const char *tmp;
                 /* Add minimal headers needed to allow http_in filter
                  * detecting end of body without waiting for a timeout. */
@@ -1659,6 +1802,17 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
             return proxy_status;
         }
 
+        /* Forward back Upgrade header if it matches the configured one(s), it
+         * may be an HTTP_UPGRADE_REQUIRED response or some other status where
+         * Upgrade makes sense to negotiate the protocol by other means.
+         */
+        if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
+                                                   (*req->proto == 'w')
+                                                   ? "WebSocket" : NULL)) {
+            apr_table_setn(r->headers_out, "Connection", "Upgrade");
+            apr_table_setn(r->headers_out, "Upgrade", apr_pstrdup(p, upgrade));
+        }
+
         r->sent_bodyct = 1;
         /*
          * Is it an HTTP/0.9 response or did we maybe preread the 1st line of
@@ -1861,6 +2015,7 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
              */
             ap_proxy_release_connection(backend->worker->s->scheme,
                     backend, r->server);
+            /* Ensure that the backend is not reused */
             req->backend = NULL;
 
             /* Pass EOS bucket down the filter chain. */
@@ -1915,9 +2070,8 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
                               apr_port_t proxyport)
 {
     int status;
-    char *scheme;
-    const char *proxy_function;
-    const char *u;
+    const char *scheme;
+    const char *u = url;
     proxy_http_req_t *req = NULL;
     proxy_conn_rec *backend = NULL;
     apr_bucket_brigade *input_brigade = NULL;
@@ -1934,41 +2088,31 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
     apr_pool_t *p = r->pool;
     apr_uri_t *uri;
 
-    /* find the scheme */
-    u = strchr(url, ':');
-    if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0')
-        return DECLINED;
-    if ((u - url) > 14)
-        return HTTP_BAD_REQUEST;
-    scheme = apr_pstrmemdup(p, url, u - url);
-    /* scheme is lowercase */
-    ap_str_tolower(scheme);
-    /* is it for us? */
-    if (strcmp(scheme, "https") == 0) {
-        if (!ap_proxy_ssl_enable(NULL)) {
-            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112)
-                          "HTTPS: declining URL %s (mod_ssl not configured?)",
-                          url);
-            return DECLINED;
-        }
-        is_ssl = 1;
-        proxy_function = "HTTPS";
+    scheme = get_url_scheme(&u, &is_ssl);
+    if (!scheme && proxyname && strncasecmp(url, "ftp:", 4) == 0) {
+        u = url + 4;
+        scheme = "ftp";
+        is_ssl = 0;
     }
-    else if (!(strcmp(scheme, "http") == 0 || (strcmp(scheme, "ftp") == 0 && proxyname))) {
-        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) "HTTP: declining URL %s",
-                      url);
-        return DECLINED; /* only interested in HTTP, or FTP via proxy */
+    if (!scheme || u[0] != '/' || u[1] != '/' || u[2] == '\0') {
+        if (!scheme && (u = strchr(url, ':')) && (u - url) > 14) {
+            ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10262)
+                          "overlong proxy URL scheme in %s", url);
+            return HTTP_BAD_REQUEST;
+        }
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113)
+                      "HTTP: declining URL %s", url);
+        return DECLINED; /* only interested in HTTP, WS or FTP via proxy */
     }
-    else {
-        if (*scheme == 'h')
-            proxy_function = "HTTP";
-        else
-            proxy_function = "FTP";
+    if (is_ssl && !ap_proxy_ssl_enable(NULL)) {
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112)
+                      "HTTP: declining URL %s (mod_ssl not configured?)", url);
+        return DECLINED;
     }
     ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url);
 
     /* create space for state information */
-    if ((status = ap_proxy_acquire_connection(proxy_function, &backend,
+    if ((status = ap_proxy_acquire_connection(scheme, &backend,
                                               worker, r->server)) != OK) {
         return status;
     }
@@ -1981,11 +2125,27 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
     req->sconf = conf;
     req->worker = worker;
     req->backend = backend;
+    req->proto = scheme;
     req->bucket_alloc = c->bucket_alloc;
     req->rb_method = RB_INIT;
 
     dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
 
+    if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
+        req->force10 = 1;
+    }
+    else if (*worker->s->upgrade || *req->proto == 'w') {
+        /* Forward Upgrade header if it matches the configured one(s),
+         * the default being "WebSocket" for ws[s] schemes.
+         */
+        const char *upgrade = apr_table_get(r->headers_in, "Upgrade");
+        if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
+                                                   (*req->proto == 'w')
+                                                   ? "WebSocket" : NULL)) {
+            req->upgrade = upgrade;
+        }
+    }
+
     /* We possibly reuse input data prefetched in previous call(s), e.g. for a
      * balancer fallback scenario, and in this case the 100 continue settings
      * should be consistent between balancer members. If not, we need to ignore
@@ -2001,13 +2161,16 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
     /* Should we handle end-to-end or ping 100-continue? */
     if ((r->expecting_100 && (dconf->forward_100_continue || input_brigade))
             || PROXY_DO_100_CONTINUE(worker, r)) {
-        req->do_100_continue = req->prefetch_nonblocking = 1;
+        req->do_100_continue = 1;
     }
+
     /* Should we block while prefetching the body or try nonblocking and flush
      * data to the backend ASAP?
      */
-    else if (input_brigade || apr_table_get(r->subprocess_env,
-                                            "proxy-prefetch-nonblocking")) {
+    if (input_brigade
+            || req->do_100_continue
+            || apr_table_get(r->subprocess_env,
+                             "proxy-prefetch-nonblocking")) {
         req->prefetch_nonblocking = 1;
     }
 
@@ -2083,9 +2246,9 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
         }
 
         /* Step Two: Make the Connection */
-        if (ap_proxy_check_connection(proxy_function, backend, r->server, 1,
+        if (ap_proxy_check_connection(scheme, backend, r->server, 1,
                                       PROXY_CHECK_CONN_EMPTY)
-                && ap_proxy_connect_backend(proxy_function, backend, worker,
+                && ap_proxy_connect_backend(scheme, backend, worker,
                                             r->server)) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114)
                           "HTTP: failed to make connection to backend: %s",
@@ -2095,8 +2258,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
         }
 
         /* Step Three: Create conn_rec */
-        if ((status = ap_proxy_connection_create_ex(proxy_function,
-                                                    backend, r)) != OK)
+        if ((status = ap_proxy_connection_create_ex(scheme, backend, r)) != OK)
             break;
         req->origin = backend->connection;
 
@@ -2134,7 +2296,7 @@ cleanup:
     if (req->backend) {
         if (status != OK)
             req->backend->close = 1;
-        ap_proxy_http_cleanup(proxy_function, r, req->backend);
+        ap_proxy_http_cleanup(scheme, r, req->backend);
     }
     return status;
 }
index 9fe4b8d8cf0d2bfc29caa0d2bda7ab006a2fc6be..46749dfb399a3a869e24ebb67b487edc9152049e 100644 (file)
  */
 
 #include "mod_proxy.h"
+#include "http_config.h"
 
 module AP_MODULE_DECLARE_DATA proxy_wstunnel_module;
 
+static int fallback_to_mod_proxy_http;
+
 static int proxy_wstunnel_check_trans(request_rec *r, const char *url)
 {
+    if (fallback_to_mod_proxy_http) {
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "check_trans fallback");
+        return DECLINED;
+    }
+
     if (ap_cstr_casecmpn(url, "ws:", 3) != 0
             && ap_cstr_casecmpn(url, "wss:", 4) != 0) {
         return DECLINED;
@@ -50,6 +58,11 @@ static int proxy_wstunnel_canon(request_rec *r, char *url)
     char *scheme;
     apr_port_t port, def_port;
 
+    if (fallback_to_mod_proxy_http) {
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "canon fallback");
+        return DECLINED;
+    }
+
     /* ap_port_of_scheme() */
     if (strncasecmp(url, "ws:", 3) == 0) {
         url += 3;
@@ -304,12 +317,17 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
     int status;
     char server_portstr[32];
     proxy_conn_rec *backend = NULL;
+    const char *upgrade;
     char *scheme;
     apr_pool_t *p = r->pool;
     char *locurl = url;
     apr_uri_t *uri;
     int is_ssl = 0;
-    const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket";
+
+    if (fallback_to_mod_proxy_http) {
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "handler fallback");
+        return DECLINED;
+    }
 
     if (strncasecmp(url, "wss:", 4) == 0) {
         scheme = "WSS";
@@ -319,20 +337,24 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
         scheme = "WS";
     }
     else {
-        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450) "declining URL %s", url);
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450)
+                      "declining URL %s", url);
         return DECLINED;
     }
-
-    if (ap_cstr_casecmp(upgrade_method, "NONE") != 0) {
-        const char *upgrade;
-        upgrade = apr_table_get(r->headers_in, "Upgrade");
-        if (!upgrade || (ap_cstr_casecmp(upgrade, upgrade_method) != 0 &&
-            ap_cstr_casecmp(upgrade_method, "ANY") !=0)) {
-            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02900)
-                          "declining URL %s  (not %s, Upgrade: header is %s)", 
-                          url, upgrade_method, upgrade ? upgrade : "missing");
-            return DECLINED;
-        }
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "serving URL %s", url);
+
+    upgrade = apr_table_get(r->headers_in, "Upgrade");
+    if (!upgrade || !ap_proxy_worker_can_upgrade(p, worker, upgrade,
+                                                 "WebSocket")) {
+        const char *worker_upgrade = *worker->s->upgrade ? worker->s->upgrade
+                                                         : "WebSocket";
+        ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02900)
+                      "require upgrade for URL %s "
+                      "(Upgrade header is %s, expecting %s)", 
+                      url, upgrade ? upgrade : "missing", worker_upgrade);
+        apr_table_setn(r->err_headers_out, "Connection", "Upgrade");
+        apr_table_setn(r->err_headers_out, "Upgrade", worker_upgrade);
+        return HTTP_UPGRADE_REQUIRED;
     }
 
     uri = apr_palloc(p, sizeof(*uri));
@@ -384,9 +406,19 @@ cleanup:
     return status;
 }
 
-static void ap_proxy_http_register_hook(apr_pool_t *p)
+static int proxy_wstunnel_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+                                      apr_pool_t *ptemp, server_rec *s)
+{
+    fallback_to_mod_proxy_http =
+        (ap_find_linked_module("mod_proxy_http.c") != NULL);
+
+    return OK;
+}
+
+static void ws_proxy_hooks(apr_pool_t *p)
 {
     static const char * const aszSucc[] = { "mod_proxy_http.c", NULL};
+    ap_hook_post_config(proxy_wstunnel_post_config, NULL, NULL, APR_HOOK_MIDDLE);
     proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, aszSucc, APR_HOOK_FIRST);
     proxy_hook_check_trans(proxy_wstunnel_check_trans, NULL, aszSucc, APR_HOOK_MIDDLE);
     proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, aszSucc, APR_HOOK_FIRST);
@@ -399,5 +431,5 @@ AP_DECLARE_MODULE(proxy_wstunnel) = {
     NULL,                       /* create per-server config structure */
     NULL,                       /* merge per-server config structures */
     NULL,                       /* command apr_table_t */
-    ap_proxy_http_register_hook /* register hooks */
+    ws_proxy_hooks              /* register hooks */
 };
index 5c088e8d55e6383a17c0e37acaee8ea919e0b7a1..7c3372c555f199a7f25da881835e07f3c2abd946 100644 (file)
@@ -1660,6 +1660,23 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p,
     return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name, NULL);
 }
 
+PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p,
+                                               const proxy_worker *worker,
+                                               const char *upgrade,
+                                               const char *dflt)
+{
+    /* Find in worker->s->upgrade list (if any) */
+    const char *worker_upgrade = worker->s->upgrade;
+    if (*worker_upgrade) {
+        return (strcmp(worker_upgrade, "*") == 0
+                || ap_cstr_casecmp(worker_upgrade, upgrade) == 0
+                || ap_find_token(p, worker_upgrade, upgrade));
+    }
+
+    /* Compare to the provided default (if any) */
+    return (dflt && ap_cstr_casecmp(dflt, upgrade) == 0);
+}
+
 /*
  * Taken from ap_strcmp_match() :
  * Match = 0, NoMatch = 1, Abort = -1, Inval = -2
@@ -4072,6 +4089,28 @@ PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme)
     return 0;
 }
 
+static APR_INLINE int ap_filter_should_yield(ap_filter_t *f)
+{
+    return f->c->data_in_output_filters;
+}
+
+static APR_INLINE int ap_filter_output_pending(conn_rec *c)
+{
+    ap_filter_t *f = c->output_filters;
+    while (f->next) {
+        f = f->next;
+    }
+    if (f->frec->filter_func.out_func(f, NULL)) {
+        return AP_FILTER_ERROR;
+    }
+    return c->data_in_output_filters ? OK : DECLINED;
+}
+
+static APR_INLINE int ap_filter_input_pending(conn_rec *c)
+{
+    return c->data_in_input_filters ? OK : DECLINED;
+}
+
 PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
                                                       apr_bucket_brigade *from,
                                                       apr_bucket_brigade *to)
@@ -4110,6 +4149,16 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
     return rv;
 }
 
+/* An arbitrary large value to address pathological case where we keep
+ * reading from one side only, without scheduling the other direction for
+ * too long. This can happen with large MTU and small read buffers, like
+ * micro-benchmarking huge files bidirectional transfer with client, proxy
+ * and backend on localhost for instance. Though we could just ignore the
+ * case and let the sender stop by itself at some point when/if it needs to
+ * receive data, or the receiver stop when/if it needs to send...
+ */
+#define PROXY_TRANSFER_MAX_READS 10000
+
 PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
                                                        request_rec *r,
                                                        conn_rec *c_i,
@@ -4119,81 +4168,498 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
                                                        const char *name,
                                                        int *sent,
                                                        apr_off_t bsize,
-                                                       int after)
+                                                       int flags)
 {
     apr_status_t rv;
+    int flush_each = 0;
+    unsigned int num_reads = 0;
 #ifdef DEBUGGING
     apr_off_t len;
 #endif
 
-    do {
+    /*
+     * Compat: since FLUSH_EACH is default (and zero) for legacy reasons, we
+     * pretend it's no FLUSH_AFTER nor YIELD_PENDING flags, the latter because
+     * flushing would defeat the purpose of checking for pending data (hence
+     * determine whether or not the output chain/stack is full for stopping).
+     */
+    if (!(flags & (AP_PROXY_TRANSFER_FLUSH_AFTER |
+                   AP_PROXY_TRANSFER_YIELD_PENDING))) {
+        flush_each = 1;
+    }
+
+    for (;;) {
         apr_brigade_cleanup(bb_i);
         rv = ap_get_brigade(c_i->input_filters, bb_i, AP_MODE_READBYTES,
                             APR_NONBLOCK_READ, bsize);
-        if (rv == APR_SUCCESS) {
-            if (c_o->aborted) {
-                return APR_EPIPE;
-            }
-            if (APR_BRIGADE_EMPTY(bb_i)) {
-                break;
+        if (rv != APR_SUCCESS) {
+            if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) {
+                ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308)
+                              "ap_proxy_transfer_between_connections: "
+                              "error on %s - ap_get_brigade",
+                              name);
+                if (rv == APR_INCOMPLETE) {
+                    /* Don't return APR_INCOMPLETE, it'd mean "should yield"
+                     * for the caller, while it means "incomplete body" here
+                     * from ap_http_filter(), which is an error.
+                     */
+                    rv = APR_EGENERAL;
+                }
             }
+            break;
+        }
+
+        if (c_o->aborted) {
+            apr_brigade_cleanup(bb_i);
+            flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER;
+            rv = APR_EPIPE;
+            break;
+        }
+        if (APR_BRIGADE_EMPTY(bb_i)) {
+            break;
+        }
 #ifdef DEBUGGING
-            len = -1;
-            apr_brigade_length(bb_i, 0, &len);
-            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306)
-                          "ap_proxy_transfer_between_connections: "
-                          "read %" APR_OFF_T_FMT
-                          " bytes from %s", len, name);
+        len = -1;
+        apr_brigade_length(bb_i, 0, &len);
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306)
+                      "ap_proxy_transfer_between_connections: "
+                      "read %" APR_OFF_T_FMT
+                      " bytes from %s", len, name);
 #endif
-            if (sent) {
-                *sent = 1;
-            }
-            ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o);
-            if (!after) {
-                apr_bucket *b;
+        if (sent) {
+            *sent = 1;
+        }
+        ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o);
+        if (flush_each) {
+            apr_bucket *b;
+            /*
+             * Do not use ap_fflush here since this would cause the flush
+             * bucket to be sent in a separate brigade afterwards which
+             * causes some filters to set aside the buckets from the first
+             * brigade and process them when FLUSH arrives in the second
+             * brigade. As set asides of our transformed buckets involve
+             * memory copying we try to avoid this. If we have the flush
+             * bucket in the first brigade they directly process the
+             * buckets without setting them aside.
+             */
+            b = apr_bucket_flush_create(bb_o->bucket_alloc);
+            APR_BRIGADE_INSERT_TAIL(bb_o, b);
+        }
+        rv = ap_pass_brigade(c_o->output_filters, bb_o);
+        apr_brigade_cleanup(bb_o);
+        if (rv != APR_SUCCESS) {
+            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307)
+                          "ap_proxy_transfer_between_connections: "
+                          "error on %s - ap_pass_brigade",
+                          name);
+            flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER;
+            break;
+        }
 
-                /*
-                 * Do not use ap_fflush here since this would cause the flush
-                 * bucket to be sent in a separate brigade afterwards which
-                 * causes some filters to set aside the buckets from the first
-                 * brigade and process them when the flush arrives in the second
-                 * brigade. As set asides of our transformed buckets involve
-                 * memory copying we try to avoid this. If we have the flush
-                 * bucket in the first brigade they directly process the
-                 * buckets without setting them aside.
-                 */
-                b = apr_bucket_flush_create(bb_o->bucket_alloc);
-                APR_BRIGADE_INSERT_TAIL(bb_o, b);
+        /* Yield if the output filters stack is full? This is to avoid
+         * blocking and give the caller a chance to POLLOUT async.
+         */
+        if (flags & AP_PROXY_TRANSFER_YIELD_PENDING) {
+            int rc = OK;
+
+            if (!ap_filter_should_yield(c_o->output_filters)) {
+                rc = ap_filter_output_pending(c_o);
             }
-            rv = ap_pass_brigade(c_o->output_filters, bb_o);
-            if (rv != APR_SUCCESS) {
-                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307)
+            if (rc == OK) {
+                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                               "ap_proxy_transfer_between_connections: "
-                              "error on %s - ap_pass_brigade",
-                              name);
+                              "yield (output pending)");
+                rv = APR_INCOMPLETE;
+                break;
+            }
+            if (rc != DECLINED) {
+                rv = AP_FILTER_ERROR;
+                break;
             }
-        } else if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) {
-            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308)
+        }
+
+        /* Yield if we keep hold of the thread for too long? This gives
+         * the caller a chance to schedule the other direction too.
+         */
+        if ((flags & AP_PROXY_TRANSFER_YIELD_MAX_READS)
+                && ++num_reads > PROXY_TRANSFER_MAX_READS) {
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                           "ap_proxy_transfer_between_connections: "
-                          "error on %s - ap_get_brigade",
-                          name);
+                          "yield (max reads)");
+            rv = APR_SUCCESS;
+            break;
         }
-    } while (rv == APR_SUCCESS);
+    }
 
-    if (after) {
+    if (flags & AP_PROXY_TRANSFER_FLUSH_AFTER) {
         ap_fflush(c_o->output_filters, bb_o);
+        apr_brigade_cleanup(bb_o);
     }
+    apr_brigade_cleanup(bb_i);
 
     ap_log_rerror(APLOG_MARK, APLOG_TRACE2, rv, r,
-                  "ap_proxy_transfer_between_connections complete");
+                  "ap_proxy_transfer_between_connections complete (%s %pI)",
+                  (c_i == r->connection) ? "to" : "from",
+                  (c_i == r->connection) ? c_o->client_addr
+                                         : c_i->client_addr);
 
     if (APR_STATUS_IS_EAGAIN(rv)) {
         rv = APR_SUCCESS;
     }
-
     return rv;
 }
 
+struct proxy_tunnel_conn {
+    /* the other side of the tunnel */
+    struct proxy_tunnel_conn *other;
+
+    conn_rec *c;
+    const char *name;
+
+    apr_pollfd_t *pfd;
+    apr_bucket_brigade *bb;
+
+    unsigned int down_in:1,
+                 down_out:1;
+};
+
+PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
+                                                   request_rec *r, conn_rec *c_o,
+                                                   const char *scheme)
+{
+    apr_status_t rv;
+    conn_rec *c_i = r->connection;
+    proxy_tunnel_rec *tunnel;
+
+    *ptunnel = NULL;
+
+    tunnel = apr_pcalloc(r->pool, sizeof(*tunnel));
+
+    rv = apr_pollset_create(&tunnel->pollset, 2, r->pool, APR_POLLSET_NOCOPY);
+    if (rv != APR_SUCCESS) {
+        return rv;
+    }
+
+    tunnel->r = r;
+    tunnel->scheme = apr_pstrdup(r->pool, scheme);
+    tunnel->client = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn));
+    tunnel->origin = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn));
+    tunnel->pfds = apr_array_make(r->pool, 2, sizeof(apr_pollfd_t));
+    tunnel->read_buf_size = ap_get_read_buf_size(r);
+    tunnel->client->other = tunnel->origin;
+    tunnel->origin->other = tunnel->client;
+    tunnel->timeout = -1;
+
+    tunnel->client->c = c_i;
+    tunnel->client->name = "client";
+    tunnel->client->bb = apr_brigade_create(c_i->pool, c_i->bucket_alloc);
+    tunnel->client->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t);
+    tunnel->client->pfd->p = r->pool;
+    tunnel->client->pfd->desc_type = APR_POLL_SOCKET;
+    tunnel->client->pfd->desc.s = ap_get_conn_socket(c_i);
+    tunnel->client->pfd->client_data = tunnel->client;
+
+    tunnel->origin->c = c_o;
+    tunnel->origin->name = "origin";
+    tunnel->origin->bb = apr_brigade_create(c_o->pool, c_o->bucket_alloc);
+    tunnel->origin->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t);
+    tunnel->origin->pfd->p = r->pool;
+    tunnel->origin->pfd->desc_type = APR_POLL_SOCKET;
+    tunnel->origin->pfd->desc.s = ap_get_conn_socket(c_o);
+    tunnel->origin->pfd->client_data = tunnel->origin;
+
+    /* We should be nonblocking from now on the sockets */
+    apr_socket_opt_set(tunnel->client->pfd->desc.s, APR_SO_NONBLOCK, 1);
+    apr_socket_opt_set(tunnel->origin->pfd->desc.s, APR_SO_NONBLOCK, 1);
+
+    /* No coalescing filters */
+    ap_remove_output_filter_byhandle(c_i->output_filters,
+                                     "SSL/TLS Coalescing Filter");
+    ap_remove_output_filter_byhandle(c_o->output_filters,
+                                     "SSL/TLS Coalescing Filter");
+
+    /* Bidirectional non-HTTP stream will confuse mod_reqtimeoout */
+    ap_remove_input_filter_byhandle(c_i->input_filters, "reqtimeout");
+
+    /* The input/output filter stacks should contain connection filters only */
+    r->input_filters = r->proto_input_filters = c_i->input_filters;
+    r->output_filters = r->proto_output_filters = c_i->output_filters;
+
+    /* Won't be reused after tunneling */
+    c_i->keepalive = AP_CONN_CLOSE;
+    c_o->keepalive = AP_CONN_CLOSE;
+
+    /* Start with POLLOUT and let ap_proxy_tunnel_run() schedule both
+     * directions when there are no output data pending (anymore).
+     */
+    tunnel->client->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+    tunnel->origin->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+    if ((rv = apr_pollset_add(tunnel->pollset, tunnel->client->pfd))
+            || (rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
+        return rv;
+    }
+
+    *ptunnel = tunnel;
+    return APR_SUCCESS;
+}
+
+static void add_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd,
+                        apr_int16_t events)
+{
+    apr_status_t rv;
+
+    AP_DEBUG_ASSERT((pfd->reqevents & events) == 0);
+
+    if (pfd->reqevents) {
+        rv = apr_pollset_remove(pollset, pfd);
+        if (rv != APR_SUCCESS) {
+            AP_DEBUG_ASSERT(1);
+        }
+    }
+
+    if (events & APR_POLLIN) {
+        events |= APR_POLLHUP;
+    }
+    pfd->reqevents |= events | APR_POLLERR;
+    rv = apr_pollset_add(pollset, pfd);
+    if (rv != APR_SUCCESS) {
+        AP_DEBUG_ASSERT(1);
+    }
+}
+
+static void del_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd,
+                        apr_int16_t events)
+{
+    apr_status_t rv;
+
+    AP_DEBUG_ASSERT((pfd->reqevents & events) != 0);
+
+    rv = apr_pollset_remove(pollset, pfd);
+    if (rv != APR_SUCCESS) {
+        AP_DEBUG_ASSERT(0);
+        return;
+    }
+
+    if (events & APR_POLLIN) {
+        events |= APR_POLLHUP;
+    }
+    if (pfd->reqevents & ~(events | APR_POLLERR)) {
+        pfd->reqevents &= ~events;
+        rv = apr_pollset_add(pollset, pfd);
+        if (rv != APR_SUCCESS) {
+            AP_DEBUG_ASSERT(0);
+            return;
+        }
+    }
+    else {
+        pfd->reqevents = 0;
+    }
+}
+
+static int proxy_tunnel_forward(proxy_tunnel_rec *tunnel,
+                                 struct proxy_tunnel_conn *in)
+{
+    struct proxy_tunnel_conn *out = in->other;
+    apr_status_t rv;
+    int sent = 0;
+
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, tunnel->r,
+                  "proxy: %s: %s input ready",
+                  tunnel->scheme, in->name);
+
+    rv = ap_proxy_transfer_between_connections(tunnel->r,
+                                               in->c, out->c,
+                                               in->bb, out->bb,
+                                               in->name, &sent,
+                                               tunnel->read_buf_size,
+                                           AP_PROXY_TRANSFER_YIELD_PENDING |
+                                           AP_PROXY_TRANSFER_YIELD_MAX_READS);
+    if (sent && out == tunnel->client) {
+        tunnel->replied = 1;
+    }
+    if (rv != APR_SUCCESS) {
+        if (APR_STATUS_IS_INCOMPLETE(rv)) {
+            /* Pause POLLIN while waiting for POLLOUT on the other
+             * side, hence avoid filling the output filters even
+             * more to avoid blocking there.
+             */
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, tunnel->r,
+                          "proxy: %s: %s wait writable",
+                          tunnel->scheme, out->name);
+        }
+        else if (APR_STATUS_IS_EOF(rv)) {
+            /* Stop POLLIN and wait for POLLOUT (flush) on the
+             * other side to shut it down.
+             */
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r,
+                          "proxy: %s: %s read shutdown",
+                          tunnel->scheme, in->name);
+            in->down_in = 1;
+        }
+        else {
+            /* Real failure, bail out */
+            return HTTP_INTERNAL_SERVER_ERROR;
+        }
+
+        del_pollset(tunnel->pollset, in->pfd, APR_POLLIN);
+        add_pollset(tunnel->pollset, out->pfd, APR_POLLOUT);
+    }
+
+    return OK;
+}
+
+PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel)
+{
+    int rc = OK;
+    request_rec *r = tunnel->r;
+    apr_pollset_t *pollset = tunnel->pollset;
+    struct proxy_tunnel_conn *client = tunnel->client,
+                             *origin = tunnel->origin;
+    apr_interval_time_t timeout = tunnel->timeout >= 0 ? tunnel->timeout : -1;
+    const char *scheme = tunnel->scheme;
+    apr_status_t rv;
+
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10212)
+                  "proxy: %s: tunnel running (timeout %lf)",
+                  scheme, timeout >= 0 ? (double)timeout / APR_USEC_PER_SEC
+                                       : (double)-1.0);
+
+    /* Loop until both directions of the connection are closed,
+     * or a failure occurs.
+     */
+    do {
+        const apr_pollfd_t *results;
+        apr_int32_t nresults, i;
+
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+                      "proxy: %s: polling (client=%hx, origin=%hx)",
+                      scheme, client->pfd->reqevents, origin->pfd->reqevents);
+        do {
+            rv = apr_pollset_poll(pollset, timeout, &nresults, &results);
+        } while (APR_STATUS_IS_EINTR(rv));
+
+        if (rv != APR_SUCCESS) {
+            if (APR_STATUS_IS_TIMEUP(rv)) {
+                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, APLOGNO(10213)
+                              "proxy: %s: polling timed out "
+                              "(client=%hx, origin=%hx)",
+                              scheme, client->pfd->reqevents,
+                              origin->pfd->reqevents);
+                rc = HTTP_GATEWAY_TIME_OUT;
+            }
+            else {
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10214)
+                              "proxy: %s: polling failed", scheme);
+                rc = HTTP_INTERNAL_SERVER_ERROR;
+            }
+            return rc;
+        }
+
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, APLOGNO(10215)
+                      "proxy: %s: woken up, %i result(s)", scheme, nresults);
+
+        for (i = 0; i < nresults; i++) {
+            const apr_pollfd_t *pfd = &results[i];
+            struct proxy_tunnel_conn *tc = pfd->client_data;
+
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+                          "proxy: %s: #%i: %s: %hx/%hx", scheme, i,
+                          tc->name, pfd->rtnevents, tc->pfd->reqevents);
+
+            /* sanity check */
+            if (pfd->desc.s != client->pfd->desc.s
+                    && pfd->desc.s != origin->pfd->desc.s) {
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10222)
+                              "proxy: %s: unknown socket in pollset", scheme);
+                return HTTP_INTERNAL_SERVER_ERROR;
+            }
+
+            if (!(pfd->rtnevents & (APR_POLLIN  | APR_POLLOUT |
+                                    APR_POLLHUP | APR_POLLERR))) {
+                /* this catches POLLNVAL etc.. */
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10220)
+                              "proxy: %s: polling events error (%x)",
+                              scheme, pfd->rtnevents);
+                return HTTP_INTERNAL_SERVER_ERROR;
+            }
+
+            /* Write if we asked for POLLOUT, and got it or POLLERR
+             * alone (i.e. not with POLLIN|HUP). We want the output filters
+             * to know about the socket error if any, by failing the write.
+             */
+            if ((tc->pfd->reqevents & APR_POLLOUT)
+                    && ((pfd->rtnevents & APR_POLLOUT)
+                        || !(pfd->rtnevents & (APR_POLLIN | APR_POLLHUP)))) {
+                struct proxy_tunnel_conn *out = tc, *in = tc->other;
+
+                ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+                              "proxy: %s: %s output ready",
+                              scheme, out->name);
+
+                rc = ap_filter_output_pending(out->c);
+                if (rc == OK) {
+                    /* Keep polling out (only) */
+                    continue;
+                }
+                if (rc != DECLINED) {
+                    /* Real failure, bail out */
+                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10221)
+                                  "proxy: %s: %s flushing failed (%i)",
+                                  scheme, out->name, rc);
+                    return rc;
+                }
+
+                /* No more pending data. If the other side is not readable
+                 * anymore it's time to shutdown for write (this direction
+                 * is over). Otherwise back to normal business.
+                 */
+                del_pollset(pollset, out->pfd, APR_POLLOUT);
+                if (in->down_in) {
+                    ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+                                  "proxy: %s: %s write shutdown",
+                                  scheme, out->name);
+                    apr_socket_shutdown(out->pfd->desc.s, 1);
+                    out->down_out = 1;
+                }
+                else {
+                    ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r,
+                                  "proxy: %s: %s resume writable",
+                                  scheme, out->name);
+                    add_pollset(pollset, in->pfd, APR_POLLIN);
+
+                    /* Flush any pending input data now, we don't know when
+                     * the next POLLIN will trigger and retaining data might
+                     * block the protocol.
+                     */
+                    if (ap_filter_input_pending(in->c) == OK) {
+                        rc = proxy_tunnel_forward(tunnel, in);
+                        if (rc != OK) {
+                            return rc;
+                        }
+                    }
+                }
+            }
+
+            /* Read if we asked for POLLIN|HUP, and got it or POLLERR
+             * alone (i.e. not with POLLOUT). We want the input filters
+             * to know about the socket error if any, by failing the read.
+             */
+            if ((tc->pfd->reqevents & APR_POLLIN)
+                    && ((pfd->rtnevents & (APR_POLLIN | APR_POLLHUP))
+                        || !(pfd->rtnevents & APR_POLLOUT))) {
+                rc = proxy_tunnel_forward(tunnel, tc);
+                if (rc != OK) {
+                    return rc;
+                }
+            }
+        }
+    } while (!client->down_out || !origin->down_out);
+
+    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10223)
+                  "proxy: %s: tunnel finished", scheme);
+    return OK;
+}
+
 PROXY_DECLARE (const char *) ap_proxy_show_hcmethod(hcmethod_t method)
 {
     proxy_hcmethods_t *m = proxy_hcmethods;