apr_time_t next_expiry;
APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list);
- apr_atomic_inc32(q->total);
+ ++*q->total;
++q->count;
/* Cheaply update the overall queues' next expiry according to the
{
APR_RING_REMOVE(el, timeout_list);
APR_RING_ELEM_INIT(el, timeout_list);
- apr_atomic_dec32(q->total);
+ --*q->total;
--q->count;
}
struct timeout_queue *qp;
apr_status_t rv;
- if (!apr_atomic_read32(q->total)) {
+ if (!*q->total) {
return;
}
APR_RING_UNSPLICE(first, last, timeout_list);
APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
timeout_list);
- AP_DEBUG_ASSERT(apr_atomic_read32(q->total) >= count);
- apr_atomic_sub32(q->total, count);
+ AP_DEBUG_ASSERT(*q->total >= count && qp->count >= count);
+ *q->total -= count;
qp->count -= count;
total += count;
}
if (!timeout_time) {
ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
"All workers are busy or dying, will close %u "
- "keep-alive connections",
- apr_atomic_read32(keepalive_q->total));
+ "keep-alive connections", *keepalive_q->total);
}
process_timeout_queue(keepalive_q, timeout_time,
start_lingering_close_nonblocking);
"keep-alive: %d lingering: %d suspended: %u)",
apr_atomic_read32(&connection_count),
apr_atomic_read32(&clogged_count),
- apr_atomic_read32(write_completion_q->total),
- apr_atomic_read32(keepalive_q->total),
+ *(volatile apr_uint32_t*)write_completion_q->total,
+ *(volatile apr_uint32_t*)keepalive_q->total,
apr_atomic_read32(&lingering_count),
apr_atomic_read32(&suspended_count));
if (dying) {
apr_thread_mutex_unlock(timeout_mutex);
- ps->keep_alive = apr_atomic_read32(keepalive_q->total);
- ps->write_completion = apr_atomic_read32(write_completion_q->total);
+ ps->keep_alive = *(volatile apr_uint32_t*)keepalive_q->total;
+ ps->write_completion = *(volatile apr_uint32_t*)write_completion_q->total;
ps->connections = apr_atomic_read32(&connection_count);
ps->suspended = apr_atomic_read32(&suspended_count);
ps->lingering_close = apr_atomic_read32(&lingering_count);
}
else if ((workers_were_busy || dying)
- && apr_atomic_read32(keepalive_q->total)) {
+ && *(volatile apr_uint32_t*)keepalive_q->total) {
apr_thread_mutex_lock(timeout_mutex);
process_keepalive_queue(0); /* kill'em all \m/ */
apr_thread_mutex_unlock(timeout_mutex);