0, /* stream timeout */
};
-static int files_per_session;
-
void h2_config_init(apr_pool_t *pool)
{
- /* Determine a good default for this platform and mpm?
- * TODO: not sure how APR wants to hand out this piece of
- * information.
- */
- int max_files = 256;
- int conn_threads = 1;
- int tx_files = max_files / 4;
-
(void)pool;
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &conn_threads);
- switch (h2_conn_mpm_type()) {
- case H2_MPM_PREFORK:
- case H2_MPM_WORKER:
- case H2_MPM_EVENT:
- /* allow that many transfer open files per mplx */
- files_per_session = (tx_files / conn_threads);
- break;
- default:
- /* don't know anything about it, stay safe */
- break;
- }
}
static void *h2_config_create(apr_pool_t *pool,
apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
{
- int n;
switch(var) {
case H2_CONF_MAX_STREAMS:
return H2_CONFIG_GET(conf, &defconf, h2_max_streams);
case H2_CONF_DIRECT:
return H2_CONFIG_GET(conf, &defconf, h2_direct);
case H2_CONF_SESSION_FILES:
- n = H2_CONFIG_GET(conf, &defconf, session_extra_files);
- if (n < 0) {
- n = files_per_session;
- }
- return n;
+ return H2_CONFIG_GET(conf, &defconf, session_extra_files);
case H2_CONF_TLS_WARMUP_SIZE:
return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
case H2_CONF_TLS_COOLDOWN_SECS:
{
const h2_config *config = h2_config_sget(s);
apr_status_t status = APR_SUCCESS;
- int minw, maxw;
+ int minw, maxw, max_tx_handles, n;
int max_threads_per_child = 0;
int idle_secs = 0;
maxw = minw;
}
+ /* How many file handles is it safe to use for transfer
+ * to the master connection to be streamed out?
+ * Is there a portable APR rlimit on NOFILES? Have not
+ * found it. And if, how many of those would we set aside?
+ * This leads all into a process wide handle allocation strategy
+ * which ultimately would limit the number of accepted connections
+ * with the assumption of implicitly reserving n handles for every
+ * connection and requiring modules with excessive needs to allocate
+ * from a central pool.
+ */
+ n = h2_config_geti(config, H2_CONF_SESSION_FILES);
+ if (n < 0) {
+ max_tx_handles = 256;
+ }
+ else {
+ max_tx_handles = maxw * n;
+ }
+
ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d",
- minw, maxw, max_threads_per_child);
+ "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d",
+ minw, maxw, max_threads_per_child, max_tx_handles);
+ workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles);
- workers = h2_workers_create(s, pool, minw, maxw);
idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
h2_workers_set_max_idle_secs(workers, idle_secs);
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
apr_size_t maxlen, apr_table_t *trailers,
- int *pfile_handles_allowed)
+ apr_size_t *pfile_buckets_allowed)
{
apr_status_t status;
int start_allowed;
* many open files already buffered. Otherwise we will run out of
* file handles.
*/
- start_allowed = *pfile_handles_allowed;
- status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed,
+ start_allowed = *pfile_buckets_allowed;
+ status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed,
"h2_io_out_write");
/* track # file buckets moved into our pool */
- if (start_allowed != *pfile_handles_allowed) {
- io->files_handles_owned += (start_allowed - *pfile_handles_allowed);
+ if (start_allowed != *pfile_buckets_allowed) {
+ io->files_handles_owned += (start_allowed - *pfile_buckets_allowed);
}
return status;
}
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
apr_size_t maxlen, apr_table_t *trailers,
- int *pfile_buckets_allowed);
+ apr_size_t *pfile_buckets_allowed);
/**
* Closes the input. After existing data has been read, APR_EOF will
static void have_out_data_for(h2_mplx *m, int stream_id);
+static void check_tx_reservation(h2_mplx *m)
+{
+ if (m->tx_handles_reserved == 0) {
+ m->tx_handles_reserved += h2_workers_tx_reserve(m->workers,
+ H2MIN(m->tx_chunk_size, h2_io_set_size(m->stream_ios)));
+ }
+}
+
+static void check_tx_free(h2_mplx *m)
+{
+ if (m->tx_handles_reserved > m->tx_chunk_size) {
+ apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size;
+ m->tx_handles_reserved = m->tx_chunk_size;
+ h2_workers_tx_free(m->workers, count);
+ }
+ else if (m->tx_handles_reserved
+ && (!m->stream_ios || h2_io_set_is_empty(m->stream_ios))) {
+ h2_workers_tx_free(m->workers, m->tx_handles_reserved);
+ m->tx_handles_reserved = 0;
+ }
+}
+
static void h2_mplx_destroy(h2_mplx *m)
{
AP_DEBUG_ASSERT(m);
m->stream_ios = NULL;
}
+ check_tx_free(m);
+
if (m->pool) {
apr_pool_destroy(m->pool);
}
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->workers = workers;
- m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
+ m->tx_handles_reserved = 0;
+ m->tx_chunk_size = 4;
+
m->stream_timeout_secs = h2_config_geti(conf, H2_CONF_STREAM_TIMEOUT_SECS);
}
return m;
h2_workers_register(m->workers, m);
}
-static void workers_unregister(h2_mplx *m)
-{
- h2_workers_unregister(m->workers, m);
-}
-
static int io_process_events(h2_mplx *m, h2_io *io)
{
if (io->input_consumed && m->input_consumed) {
/* The pool is cleared/destroyed which also closes all
* allocated file handles. Give this count back to our
* file handle pool. */
- m->file_handles_allowed += io->files_handles_owned;
+ m->tx_handles_reserved += io->files_handles_owned;
+
h2_io_set_remove(m->stream_ios, io);
h2_io_set_remove(m->ready_ios, io);
h2_io_destroy(io);
}
m->spare_pool = pool;
}
+
+ check_tx_free(m);
}
static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error)
{
apr_status_t status;
- workers_unregister(m);
+ h2_workers_unregister(m->workers, m);
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
int i, wait_secs = 5;
&& !is_aborted(m, &status)) {
status = h2_io_out_write(io, bb, m->stream_max_mem, trailers,
- &m->file_handles_allowed);
+ &m->tx_handles_reserved);
/* Wait for data to drain until there is room again or
* stream timeout expires */
h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout_secs, iowait);
h2_io_set_response(io, response);
h2_io_set_add(m->ready_ios, io);
+ if (response && response->http_status < 300) {
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_tx_reservation(m);
+ }
if (bb) {
status = out_write(m, io, f, bb, response->trailers, iowait);
}
apr_pool_t *spare_pool; /* spare pool, ready for next io */
struct h2_workers *workers;
- int file_handles_allowed;
+ apr_size_t tx_handles_reserved;
+ apr_size_t tx_chunk_size;
h2_mplx_consumed_cb *input_consumed;
void *input_consumed_ctx;
#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
#endif
#include "h2_version.h"
#include "h2_workers.h"
-#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
-#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
stream->response = response;
if (bb && !APR_BRIGADE_EMPTY(bb)) {
- int move_all = INT_MAX;
+ apr_size_t move_all = INT_MAX;
/* we can move file handles from h2_mplx into this h2_stream as many
* as we want, since the lifetimes are the same and we are not freeing
* the ones in h2_mplx->io before this stream is done. */
static apr_status_t last_not_included(apr_bucket_brigade *bb,
apr_off_t maxlen,
int same_alloc,
- int *pfile_buckets_allowed,
+ apr_size_t *pfile_buckets_allowed,
apr_bucket **pend)
{
apr_bucket *b;
#define LOG_LEVEL APLOG_INFO
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, int *pfile_handles_allowed,
+ apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
const char *msg)
{
apr_status_t status = APR_SUCCESS;
|| to->p == from->p);
if (!FILE_MOVE) {
- pfile_handles_allowed = NULL;
+ pfile_buckets_allowed = NULL;
}
if (!APR_BRIGADE_EMPTY(from)) {
apr_bucket *b, *end;
status = last_not_included(from, maxlen, same_alloc,
- pfile_handles_allowed, &end);
+ pfile_buckets_allowed, &end);
if (status != APR_SUCCESS) {
return status;
}
/* ignore */
}
}
- else if (pfile_handles_allowed
- && *pfile_handles_allowed > 0
+ else if (pfile_buckets_allowed
+ && *pfile_buckets_allowed > 0
&& APR_BUCKET_IS_FILE(b)) {
/* We do not want to read files when passing buckets, if
* we can avoid it. However, what we've come up so far
}
apr_brigade_insert_file(to, fd, b->start, b->length,
to->p);
- --(*pfile_handles_allowed);
+ --(*pfile_buckets_allowed);
}
else {
const char *data;
* @param msg message for use in logging
*/
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, int *pfile_buckets_allowed,
+ apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
const char *msg);
/**
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.0.17"
+#define MOD_HTTP2_VERSION "1.1.0-DEV"
/**
* @macro
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010011
+#define MOD_HTTP2_VERSION_NUM 0x010100
#endif /* mod_h2_h2_version_h */
#include "h2_worker.h"
#include "h2_workers.h"
+
static int in_list(h2_workers *workers, h2_mplx *m)
{
h2_mplx *e;
}
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool,
- int min_size, int max_size)
+ int min_size, int max_size,
+ apr_size_t max_tx_handles)
{
apr_status_t status;
h2_workers *workers;
workers->max_size = max_size;
apr_atomic_set32(&workers->max_idle_secs, 10);
+ workers->max_tx_handles = max_tx_handles;
+ workers->spare_tx_handles = workers->max_tx_handles;
+
apr_threadattr_create(&workers->thread_attr, workers->pool);
if (ap_thread_stacksize != 0) {
apr_threadattr_stacksize_set(workers->thread_attr,
status = apr_thread_cond_create(&workers->mplx_added, workers->pool);
}
+ if (status == APR_SUCCESS) {
+ status = apr_thread_mutex_create(&workers->tx_lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ }
+
if (status == APR_SUCCESS) {
status = h2_workers_start(workers);
}
}
apr_atomic_set32(&workers->max_idle_secs, idle_secs);
}
+
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ count = H2MIN(workers->spare_tx_handles, count);
+ workers->spare_tx_handles -= count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: reserved %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ return count;
+ }
+ return 0;
+}
+
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ workers->spare_tx_handles += count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: freed %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ }
+}
+
int min_size;
int max_size;
+ apr_size_t max_tx_handles;
+ apr_size_t spare_tx_handles;
+
unsigned int aborted : 1;
apr_threadattr_t *thread_attr;
struct apr_thread_mutex_t *lock;
struct apr_thread_cond_t *mplx_added;
+
+ struct apr_thread_mutex_t *tx_lock;
};
* threads.
*/
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
- int min_size, int max_size);
+ int min_size, int max_size,
+ apr_size_t max_tx_handles);
/* Destroy the worker pool and all its threads.
*/
* out of tasks, it will be automatically be unregistered. Should
* new tasks arrive, it needs to be registered again.
*/
-apr_status_t h2_workers_register(h2_workers *workers,
- struct h2_mplx *m);
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m);
/**
* Remove a h2_mplx from the worker registry.
*/
-apr_status_t h2_workers_unregister(h2_workers *workers,
- struct h2_mplx *m);
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m);
/**
* Set the amount of seconds a h2_worker should wait for new tasks
*/
void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs);
+/**
+ * Reservation of file handles available for transfer between workers
+ * and master connections.
+ *
+ * When handling output from request processing, file handles are often
+ * encountered when static files are served. The most efficient way is then
+ * to forward the handle itself to the master connection where it can be
+ * read or sendfile'd to the client. But file handles are a scarce resource,
+ * so there needs to be a limit on how many handles are transferred this way.
+ *
+ * h2_workers keeps track of the number of reserved handles and observes a
+ * configurable maximum value.
+ *
+ * @param workers the workers instance
+ * @param count how many handles the caller wishes to reserve
+ * @return the number of reserved handles, may be 0.
+ */
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count);
+
+/**
+ * Return a number of reserved file handles back to the pool. The number
+ * overall may not exceed the numbers reserved.
+ * @param workers the workers instance
+ * @param count how many handles are returned to the pool
+ */
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count);
+
#endif /* defined(__mod_h2__h2_workers__) */