From: Yann Ylavic Date: Thu, 22 Jul 2021 15:14:42 +0000 (+0000) Subject: mpm_event: better comments and variables' names. X-Git-Tag: 2.5.0-alpha2-ci-test-only~887 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d6220affccac4554b6f890b86786884be9a59ce0;p=thirdparty%2Fapache%2Fhttpd.git mpm_event: better comments and variables' names. No functional change. Follow up to r1891716. Github: #208 git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1891719 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c index 1f3d751a9ad..79c5f8cfa15 100644 --- a/server/mpm/event/event.c +++ b/server/mpm/event/event.c @@ -293,21 +293,21 @@ static volatile apr_time_t queues_next_expiry; */ static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el) { - apr_time_t q_expiry; + apr_time_t elem_expiry; apr_time_t next_expiry; APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list); ++*q->total; ++q->count; - /* Cheaply update the overall queues' next expiry according to the - * first entry of this queue (oldest), if necessary. + /* Cheaply update the global queues_next_expiry with the one of the + * first entry of this queue (oldest) if it expires before. */ el = APR_RING_FIRST(&q->head); - q_expiry = el->queue_timestamp + q->timeout; + elem_expiry = el->queue_timestamp + q->timeout; next_expiry = queues_next_expiry; - if (!next_expiry || next_expiry > q_expiry + TIMEOUT_FUDGE_FACTOR) { - queues_next_expiry = q_expiry; + if (!next_expiry || next_expiry > elem_expiry + TIMEOUT_FUDGE_FACTOR) { + queues_next_expiry = elem_expiry; /* Unblock the poll()ing listener for it to update its timeout. */ if (listener_is_wakeable) { apr_pollset_wakeup(event_pollset); @@ -1538,8 +1538,8 @@ static timer_event_t * event_get_timer_event(apr_time_t t, /* Okay, add sorted by when.. */ apr_skiplist_insert(timer_skiplist, te); - /* Cheaply update the overall timers' next expiry according to - * this event, if necessary. + /* Cheaply update the global timers_next_expiry with this event's + * if it expires before. */ next_expiry = timers_next_expiry; if (!next_expiry || next_expiry > te->when + EVENT_FUDGE_FACTOR) { @@ -1658,10 +1658,13 @@ static apr_status_t event_register_poll_callback(apr_pool_t *p, } /* - * Close socket and clean up if remote closed its end while we were in - * lingering close. Only to be called in the worker thread, and since it's - * in immediate call stack, we can afford a comfortable buffer size to - * consume data quickly. + * Flush data and close our side of the connection, then drain incoming data. + * If the latter would block put the connection in one of the linger timeout + * queues to be called back when ready, and repeat until it's closed by peer. + * Only to be called in the worker thread, and since it's in immediate call + * stack, we can afford a comfortable buffer size to consume data quickly. + * Pre-condition: cs is not in any timeout queue and not in the pollset, + * timeout_mutex is not locked */ #define LINGERING_BUF_SIZE (32 * 1024) static void process_lingering_close(event_conn_state_t *cs) @@ -1734,12 +1737,11 @@ static void process_lingering_close(event_conn_state_t *cs) apr_thread_mutex_unlock(timeout_mutex); } -/* call 'func' for all elements of 'q' with timeout less than 'timeout_time'. +/* call 'func' for all elements of 'q' above 'expiry'. * Pre-condition: timeout_mutex must already be locked * Post-condition: timeout_mutex will be locked again */ -static void process_timeout_queue(struct timeout_queue *q, - apr_time_t timeout_time, +static void process_timeout_queue(struct timeout_queue *q, apr_time_t expiry, int (*func)(event_conn_state_t *)) { apr_uint32_t total = 0, count; @@ -1759,27 +1761,26 @@ static void process_timeout_queue(struct timeout_queue *q, while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t, timeout_list)) { /* Trash the entry if: - * - no timeout_time was given (asked for all), or + * - no expiry was given (zero means all), or * - it expired (according to the queue timeout), or * - the system clock skewed in the past: no entry should be - * registered above the given timeout_time (~now) + the queue + * registered above the given expiry (~now) + the queue * timeout, we won't keep any here (eg. for centuries). * * Otherwise stop, no following entry will match thanks to the * single timeout per queue (entries are added to the end!). * This allows maintenance in O(1). */ - if (timeout_time - && cs->queue_timestamp + qp->timeout > timeout_time - && cs->queue_timestamp < timeout_time + qp->timeout) { - /* Since this is the next expiring of this queue, update the - * overall queues' next expiry if it's later than this one. + if (expiry && cs->queue_timestamp + qp->timeout > expiry + && cs->queue_timestamp < expiry + qp->timeout) { + /* Since this is the next expiring entry of this queue, update + * the global queues_next_expiry if it's later than this one. */ - apr_time_t q_expiry = cs->queue_timestamp + qp->timeout; + apr_time_t elem_expiry = cs->queue_timestamp + qp->timeout; apr_time_t next_expiry = queues_next_expiry; if (!next_expiry - || next_expiry > q_expiry + TIMEOUT_FUDGE_FACTOR) { - queues_next_expiry = q_expiry; + || next_expiry > elem_expiry + TIMEOUT_FUDGE_FACTOR) { + queues_next_expiry = elem_expiry; } break; } @@ -1819,17 +1820,17 @@ static void process_timeout_queue(struct timeout_queue *q, apr_thread_mutex_lock(timeout_mutex); } -static void process_keepalive_queue(apr_time_t timeout_time) +static void process_keepalive_queue(apr_time_t expiry) { /* If all workers are busy, we kill older keep-alive connections so * that they may connect to another process. */ - if (!timeout_time && *keepalive_q->total) { + if (!expiry && *keepalive_q->total) { ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf, "All workers are busy or dying, will shutdown %u " "keep-alive connections", *keepalive_q->total); } - process_timeout_queue(keepalive_q, timeout_time, shutdown_connection); + process_timeout_queue(keepalive_q, expiry, shutdown_connection); } static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) @@ -1859,9 +1860,9 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) timer_event_t *te; const apr_pollfd_t *out_pfd; apr_int32_t num = 0; - apr_interval_time_t timeout_interval; + apr_interval_time_t timeout; socket_callback_baton_t *user_chain; - apr_time_t now, timeout_time = -1; + apr_time_t now, expiry = -1; int workers_were_busy = 0; if (conns_this_child <= 0) @@ -1880,7 +1881,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) * faster. */ if (first_close) { - goto do_maintenance; /* with timeout_time == -1 */ + goto do_maintenance; /* with expiry == -1 */ } } @@ -1923,18 +1924,18 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) * must be performed. */ now = apr_time_now(); - timeout_interval = -1; + timeout = -1; /* Push expired timers to a worker, the first remaining one determines * the maximum time to poll() below, if any. */ - timeout_time = timers_next_expiry; - if (timeout_time && timeout_time < now + EVENT_FUDGE_FACTOR) { + expiry = timers_next_expiry; + if (expiry && expiry < now + EVENT_FUDGE_FACTOR) { apr_thread_mutex_lock(g_timer_skiplist_mtx); while ((te = apr_skiplist_peek(timer_skiplist))) { if (te->when > now + EVENT_FUDGE_FACTOR) { timers_next_expiry = te->when; - timeout_interval = te->when - now; + timeout = te->when - now; break; } apr_skiplist_pop(timer_skiplist, NULL); @@ -1958,27 +1959,27 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) } /* Same for queues, use their next expiry, if any. */ - timeout_time = queues_next_expiry; - if (timeout_time - && (timeout_interval < 0 - || timeout_time <= now - || timeout_interval > timeout_time - now)) { - timeout_interval = timeout_time > now ? timeout_time - now : 0; + expiry = queues_next_expiry; + if (expiry + && (timeout < 0 + || expiry <= now + || timeout > expiry - now)) { + timeout = expiry > now ? expiry - now : 0; } /* When non-wakeable, don't wait more than 100 ms, in any case. */ #define NON_WAKEABLE_POLL_TIMEOUT apr_time_from_msec(100) if (!listener_is_wakeable - && (timeout_interval < 0 - || timeout_interval > NON_WAKEABLE_POLL_TIMEOUT)) { - timeout_interval = NON_WAKEABLE_POLL_TIMEOUT; + && (timeout < 0 + || timeout > NON_WAKEABLE_POLL_TIMEOUT)) { + timeout = NON_WAKEABLE_POLL_TIMEOUT; } - else if (timeout_interval > 0) { + else if (timeout > 0) { /* apr_pollset_poll() might round down the timeout to milliseconds, * let's forcibly round up here to never return before the timeout. */ - timeout_interval = apr_time_from_msec( - apr_time_as_msec(timeout_interval + apr_time_from_msec(1) - 1) + timeout = apr_time_from_msec( + apr_time_as_msec(timeout + apr_time_from_msec(1) - 1) ); } @@ -1986,10 +1987,10 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) "polling with timeout=%" APR_TIME_T_FMT " queues_timeout=%" APR_TIME_T_FMT " timers_timeout=%" APR_TIME_T_FMT, - timeout_interval, queues_next_expiry - now, + timeout, queues_next_expiry - now, timers_next_expiry - now); - rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd); + rc = apr_pollset_poll(event_pollset, timeout, &num, &out_pfd); if (rc != APR_SUCCESS) { if (!APR_STATUS_IS_EINTR(rc) && !APR_STATUS_IS_TIMEUP(rc)) { ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf, @@ -2012,6 +2013,10 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) queues_next_expiry - now, timers_next_expiry - now); } + /* XXX possible optimization: stash the current time for use as + * r->request_time for new requests or queues maintenance + */ + for (user_chain = NULL; num; --num, ++out_pfd) { listener_poll_type *pt = (listener_poll_type *) out_pfd->client_data; if (pt->type == PT_CSD) { @@ -2214,25 +2219,22 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) push_timer2worker(te); } - /* XXX possible optimization: stash the current time for use as - * r->request_time for new requests - */ - /* We process the timeout queues here only when their overall next - * expiry (read once above) is over. This happens accurately since + /* We process the timeout queues here only when the global + * queues_next_expiry is passed. This happens accurately since * adding to the queues (in workers) can only decrease this expiry, * while latest ones are only taken into account here (in listener) * during queues' processing, with the lock held. This works both * with and without wake-ability. */ - timeout_time = queues_next_expiry; + expiry = queues_next_expiry; do_maintenance: - if (timeout_time && timeout_time < (now = apr_time_now())) { + if (expiry && expiry < (now = apr_time_now())) { ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ap_server_conf, "queues maintenance with timeout=%" APR_TIME_T_FMT, - timeout_time > 0 ? timeout_time - now : -1); + expiry > 0 ? expiry - now : -1); apr_thread_mutex_lock(timeout_mutex); - /* Processing all the queues below will recompute this. */ + /* Steps below will recompute this. */ queues_next_expiry = 0; /* Step 1: keepalive timeouts */