]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
event: Add support for non blocking behaviour in the
authorGraham Leggett <minfrin@apache.org>
Fri, 21 Jan 2022 00:09:24 +0000 (00:09 +0000)
committerGraham Leggett <minfrin@apache.org>
Fri, 21 Jan 2022 00:09:24 +0000 (00:09 +0000)
CONN_STATE_READ_REQUEST_LINE phase, in addition to the existing
CONN_STATE_WRITE_COMPLETION phase. Update mod_ssl to perform non blocking
TLS handshakes.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1897281 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
include/ap_mmn.h
include/scoreboard.h
modules/generators/mod_status.c
modules/ssl/mod_ssl.c
modules/ssl/ssl_engine_io.c
server/mpm/event/event.c

diff --git a/CHANGES b/CHANGES
index de3bbe22d627de0a0845fbb9fba6a568a456b8f3..233319563af7db386285582d132173cc61393b59 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,11 @@
                                                          -*- coding: utf-8 -*-
 Changes with Apache 2.5.1
 
+  *) event: Add support for non blocking behaviour in the
+     CONN_STATE_READ_REQUEST_LINE phase, in addition to the existing
+     CONN_STATE_WRITE_COMPLETION phase. Update mod_ssl to perform non blocking
+     TLS handshakes. [Graham Leggett]
+
   *) http: Enforce that fully qualified uri-paths not to be forward-proxied
      have an http(s) scheme, and that the ones to be forward proxied have a
      hostname, per HTTP specifications.  [Ruediger Pluem, Yann Ylavic]
index 33cd91e5a77e12e195a7b99b74be788cfba446bf..6dbbe5e27ca7a05824ea685f3100ccc16f940d1e 100644 (file)
  * 20210926.2 (2.5.1-dev)  Add ap_post_read_request()
  * 20211221.0 (2.5.1-dev)  Bump PROXY_WORKER_MAX_NAME_SIZE from 256 to 384,
  *                         add PROXY_WORKER_UDS_PATH_SIZE.
+ * 20211221.1 (2.5.1-dev)  Add read_line to scoreboard.
  * 
  */
 
 #ifndef MODULE_MAGIC_NUMBER_MAJOR
 #define MODULE_MAGIC_NUMBER_MAJOR 20211221
 #endif
-#define MODULE_MAGIC_NUMBER_MINOR 0             /* 0...n */
+#define MODULE_MAGIC_NUMBER_MINOR 1             /* 0...n */
 
 /**
  * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
index 321b32778a0b4cc437f2208336a211d518a75528..b0bdc6f13b076c15ed9df628b06179464af16767 100644 (file)
@@ -148,6 +148,7 @@ struct process_score {
     apr_uint32_t lingering_close;   /* async connections in lingering close */
     apr_uint32_t keep_alive;        /* async connections in keep alive */
     apr_uint32_t suspended;         /* connections suspended by some module */
+    apr_uint32_t read_line;         /* async connections doing read line */
 };
 
 /* Scoreboard is now in 'local' memory, since it isn't updated once created,
index 8e80202c446e25a0adc6788a66a03a774fd19c77..8707ebe58b38d91138ccc72975c99a3b6654e5b4 100644 (file)
@@ -557,7 +557,7 @@ static int status_handler(request_rec *r)
         ap_rputs("</dl>", r);
 
     if (is_async) {
-        int write_completion = 0, lingering_close = 0, keep_alive = 0,
+        int read_line = 0, write_completion = 0, lingering_close = 0, keep_alive = 0,
             connections = 0, stopping = 0, procs = 0;
         /*
          * These differ from 'busy' and 'ready' in how gracefully finishing
@@ -574,11 +574,12 @@ static int status_handler(request_rec *r)
                          "<th colspan=\"3\">Async connections</th></tr>\n"
                      "<tr><th>total</th><th>accepting</th>"
                          "<th>busy</th><th>idle</th>"
-                         "<th>writing</th><th>keep-alive</th><th>closing</th></tr>\n", r);
+                         "<th>reading</th><th>writing</th><th>keep-alive</th><th>closing</th></tr>\n", r);
         for (i = 0; i < server_limit; ++i) {
             ps_record = ap_get_scoreboard_process(i);
             if (ps_record->pid) {
                 connections      += ps_record->connections;
+                read_line        += ps_record->read_line;
                 write_completion += ps_record->write_completion;
                 keep_alive       += ps_record->keep_alive;
                 lingering_close  += ps_record->lingering_close;
@@ -600,7 +601,7 @@ static int status_handler(request_rec *r)
                                       "<td>%s%s</td>"
                                       "<td>%u</td><td>%s</td>"
                                       "<td>%u</td><td>%u</td>"
-                                      "<td>%u</td><td>%u</td><td>%u</td>"
+                                      "<td>%u</td><td>%u</td><td>%u</td><td>%u</td>"
                                       "</tr>\n",
                                i, ps_record->pid,
                                dying, old,
@@ -608,6 +609,7 @@ static int status_handler(request_rec *r)
                                ps_record->not_accepting ? "no" : "yes",
                                thread_busy_buffer[i],
                                thread_idle_buffer[i],
+                               ps_record->read_line,
                                ps_record->write_completion,
                                ps_record->keep_alive,
                                ps_record->lingering_close);
@@ -619,12 +621,12 @@ static int status_handler(request_rec *r)
                           "<td>%d</td><td>%d</td>"
                           "<td>%d</td><td>&nbsp;</td>"
                           "<td>%d</td><td>%d</td>"
-                          "<td>%d</td><td>%d</td><td>%d</td>"
+                          "<td>%d</td><td>%d</td><td>%d</td><td>%d</td>"
                           "</tr>\n</table>\n",
                           procs, stopping,
                           connections,
                           busy_workers, idle_workers,
-                          write_completion, keep_alive, lingering_close);
+                          read_line, write_completion, keep_alive, lingering_close);
         }
         else {
             ap_rprintf(r, "Processes: %d\n"
@@ -632,13 +634,14 @@ static int status_handler(request_rec *r)
                           "BusyWorkers: %d\n"
                           "IdleWorkers: %d\n"
                           "ConnsTotal: %d\n"
+                          "ConnsAsyncReading: %d\n"
                           "ConnsAsyncWriting: %d\n"
                           "ConnsAsyncKeepAlive: %d\n"
                           "ConnsAsyncClosing: %d\n",
                           procs, stopping,
                           busy_workers, idle_workers,
                           connections,
-                          write_completion, keep_alive, lingering_close);
+                          read_line, write_completion, keep_alive, lingering_close);
         }
     }
 
index b54edb602dd3aab7e0c30cf43d0b1f7a3ebc0bf8..3108cd877ef710a9dae39c5bb5a649c8976fd757 100644 (file)
@@ -30,6 +30,7 @@
 #include "util_md5.h"
 #include "util_mutex.h"
 #include "ap_provider.h"
+#include "ap_mpm.h"
 #include "http_config.h"
 
 #include "mod_proxy.h" /* for proxy_hook_section_post_config() */
@@ -691,6 +692,8 @@ static int ssl_hook_process_connection(conn_rec* c)
 {
     SSLConnRec *sslconn = myConnConfig(c);
 
+    int status = DECLINED;
+
     if (sslconn && !sslconn->disabled) {
         /* On an active SSL connection, let the input filters initialize
          * themselves which triggers the handshake, which again triggers
@@ -698,13 +701,48 @@ static int ssl_hook_process_connection(conn_rec* c)
          */
         apr_bucket_brigade* temp;
 
+        int async_mpm = 0;
+
         temp = apr_brigade_create(c->pool, c->bucket_alloc);
-        ap_get_brigade(c->input_filters, temp,
-                       AP_MODE_INIT, APR_BLOCK_READ, 0);
+
+        if (ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm) != APR_SUCCESS) {
+            async_mpm = 0;
+        }
+
+        if (async_mpm) {
+
+            /* Take advantage of an async MPM. If we see an EAGAIN,
+             * loop round and don't block.
+             */
+            apr_status_t rv;
+
+            rv = ap_get_brigade(c->input_filters, temp,
+                           AP_MODE_INIT, APR_NONBLOCK_READ, 0);
+
+            if (rv == APR_SUCCESS) {
+                /* great news, lets continue */
+                status = DECLINED;
+            }
+            else if (rv == APR_EAGAIN) {
+                /* we've been asked to come around again, don't block */
+                status = OK;
+            }
+            else {
+                /* we failed, give up */
+                status = DONE;
+
+                c->aborted = 1;
+            }
+        }
+        else {
+            ap_get_brigade(c->input_filters, temp,
+                           AP_MODE_INIT, APR_BLOCK_READ, 0);
+        }
+
         apr_brigade_destroy(temp);
     }
-    
-    return DECLINED;
+
+    return status;
 }
 
 /*
index 7f2386fb115af55cae1f989f2644f55903f63f68..8658ed44e70fe798bb16a226078dd88431827775 100644 (file)
@@ -1489,10 +1489,21 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
         }
         else if (ssl_err == SSL_ERROR_WANT_READ) {
             /*
-             * This is in addition to what was present earlier. It is
-             * borrowed from openssl_state_machine.c [mod_tls].
-             * TBD.
+             * Call us back when ready to read *\/
              */
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, outctx->c,
+                          "Want read during nonblocking accept");
+            outctx->c->cs->sense = CONN_SENSE_WANT_READ;
+            outctx->rc = APR_EAGAIN;
+            return APR_EAGAIN;
+        }
+        else if (ssl_err == SSL_ERROR_WANT_WRITE) {
+            /*
+             * Call us back when ready to write *\/
+             */
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, outctx->c,
+                          "Want write during nonblocking accept");
+            outctx->c->cs->sense = CONN_SENSE_WANT_WRITE;
             outctx->rc = APR_EAGAIN;
             return APR_EAGAIN;
         }
index 34762f9df4840df06adf502195302f6b585772d8..a6fd9e93d7a1e8023ceb1d510fe8e265dc15f280 100644 (file)
@@ -268,12 +268,14 @@ struct timeout_queue {
 /*
  * Several timeout queues that use different timeouts, so that we always can
  * simply append to the end.
+ *   read_line_q        uses vhost's TimeOut FIXME - we can use a short timeout here
  *   write_completion_q uses vhost's TimeOut
  *   keepalive_q        uses vhost's KeepAliveTimeOut
  *   linger_q           uses MAX_SECS_TO_LINGER
  *   short_linger_q     uses SECONDS_TO_LINGER
  */
-static struct timeout_queue *write_completion_q,
+static struct timeout_queue *read_line_q,
+                            *write_completion_q,
                             *keepalive_q,
                             *linger_q,
                             *short_linger_q;
@@ -446,7 +448,8 @@ static event_retained_data *retained;
 static int max_spawn_rate_per_bucket = MAX_SPAWN_RATE / 1;
 
 struct event_srv_cfg_s {
-    struct timeout_queue *wc_q,
+    struct timeout_queue *rl_q,
+                         *wc_q,
                          *ka_q;
 };
 
@@ -1142,6 +1145,7 @@ read_request:
      */
     if (rc != OK || (cs->pub.state >= CONN_STATE_NUM)
                  || (cs->pub.state < CONN_STATE_LINGER
+                     && cs->pub.state != CONN_STATE_READ_REQUEST_LINE
                      && cs->pub.state != CONN_STATE_WRITE_COMPLETION
                      && cs->pub.state != CONN_STATE_CHECK_REQUEST_LINE_READABLE
                      && cs->pub.state != CONN_STATE_SUSPENDED)) {
@@ -1153,6 +1157,41 @@ read_request:
         cs->pub.state = CONN_STATE_LINGER;
     }
 
+    if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE) {
+        ap_update_child_status(cs->sbh, SERVER_BUSY_READ, NULL);
+
+        /* It greatly simplifies the logic to use a single timeout value per q
+         * because the new element can just be added to the end of the list and
+         * it will stay sorted in expiration time sequence.  If brand new
+         * sockets are sent to the event thread for a readability check, this
+         * will be a slight behavior change - they use the non-keepalive
+         * timeout today.  With a normal client, the socket will be readable in
+         * a few milliseconds anyway.
+         */
+        cs->queue_timestamp = apr_time_now();
+        notify_suspend(cs);
+
+        /* Add work to pollset. */
+        update_reqevents_from_sense(cs, -1);
+        apr_thread_mutex_lock(timeout_mutex);
+        TO_QUEUE_APPEND(cs->sc->rl_q, cs);
+        rv = apr_pollset_add(event_pollset, &cs->pfd);
+        if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+            AP_DEBUG_ASSERT(0);
+            TO_QUEUE_REMOVE(cs->sc->rl_q, cs);
+            apr_thread_mutex_unlock(timeout_mutex);
+            ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO()
+                         "process_socket: apr_pollset_add failure for "
+                         "read request line");
+            close_connection(cs);
+            signal_threads(ST_GRACEFUL);
+        }
+        else {
+            apr_thread_mutex_unlock(timeout_mutex);
+        }
+        return;
+    }
+
     if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) {
         int pending = DECLINED;
 
@@ -1911,10 +1950,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 last_log = now;
                 apr_thread_mutex_lock(timeout_mutex);
                 ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
-                             "connections: %u (clogged: %u write-completion: %d "
+                             "connections: %u (clogged: %u read-line: %d write-completion: %d "
                              "keep-alive: %d lingering: %d suspended: %u)",
                              apr_atomic_read32(&connection_count),
                              apr_atomic_read32(&clogged_count),
+                             apr_atomic_read32(read_line_q->total),
                              apr_atomic_read32(write_completion_q->total),
                              apr_atomic_read32(keepalive_q->total),
                              apr_atomic_read32(&lingering_count),
@@ -2047,6 +2087,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 int blocking = 0;
 
                 switch (cs->pub.state) {
+                case CONN_STATE_READ_REQUEST_LINE:
+                     remove_from_q = cs->sc->rl_q;
+                     blocking = 1;
+                     break;
+
                 case CONN_STATE_WRITE_COMPLETION:
                     remove_from_q = cs->sc->wc_q;
                     blocking = 1;
@@ -2263,16 +2308,19 @@ do_maintenance:
             else {
                 process_keepalive_queue(now);
             }
-            /* Step 2: write completion timeouts */
+            /* Step 2: read line timeouts */
+            process_timeout_queue(read_line_q, now,
+                                  defer_lingering_close);
+            /* Step 3: write completion timeouts */
             process_timeout_queue(write_completion_q, now,
                                   defer_lingering_close);
-            /* Step 3: (normal) lingering close completion timeouts */
+            /* Step 4: (normal) lingering close completion timeouts */
             if (dying && linger_q->timeout > short_linger_q->timeout) {
                 /* Dying, force short timeout for normal lingering close */
                 linger_q->timeout = short_linger_q->timeout;
             }
             process_timeout_queue(linger_q, now, shutdown_connection);
-            /* Step 4: (short) lingering close completion timeouts */
+            /* Step 5: (short) lingering close completion timeouts */
             process_timeout_queue(short_linger_q, now, shutdown_connection);
 
             apr_thread_mutex_unlock(timeout_mutex);
@@ -2282,6 +2330,7 @@ do_maintenance:
                                                   : -1);
 
             ps->keep_alive = apr_atomic_read32(keepalive_q->total);
+            ps->read_line = apr_atomic_read32(read_line_q->total);
             ps->write_completion = apr_atomic_read32(write_completion_q->total);
             ps->connections = apr_atomic_read32(&connection_count);
             ps->suspended = apr_atomic_read32(&suspended_count);
@@ -3886,14 +3935,15 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
     struct {
         struct timeout_queue *tail, *q;
         apr_hash_t *hash;
-    } wc, ka;
+    } rl, wc, ka;
 
     /* Not needed in pre_config stage */
     if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
         return OK;
     }
 
-    wc.tail = ka.tail = NULL;
+    rl.tail = wc.tail = ka.tail = NULL;
+    rl.hash = apr_hash_make(ptemp);
     wc.hash = apr_hash_make(ptemp);
     ka.hash = apr_hash_make(ptemp);
 
@@ -3907,7 +3957,13 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
 
         ap_set_module_config(s->module_config, &mpm_event_module, sc);
         if (!wc.tail) {
+
             /* The main server uses the global queues */
+
+            rl.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
+            apr_hash_set(rl.hash, &s->timeout, sizeof s->timeout, rl.q);
+            rl.tail = read_line_q = rl.q;
+
             wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
             apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
             wc.tail = write_completion_q = wc.q;
@@ -3918,8 +3974,17 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
             ka.tail = keepalive_q = ka.q;
         }
         else {
+
             /* The vhosts use any existing queue with the same timeout,
              * or their own queue(s) if there isn't */
+
+            rl.q = apr_hash_get(rl.hash, &s->timeout, sizeof s->timeout);
+            if (!rl.q) {
+                rl.q = TO_QUEUE_MAKE(pconf, s->timeout, rl.tail);
+                apr_hash_set(rl.hash, &s->timeout, sizeof s->timeout, rl.q);
+                rl.tail = rl.tail->next = rl.q;
+            }
+
             wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
             if (!wc.q) {
                 wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail);
@@ -3936,6 +4001,7 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
                 ka.tail = ka.tail->next = ka.q;
             }
         }
+        sc->rl_q = rl.q;
         sc->wc_q = wc.q;
         sc->ka_q = ka.q;
     }