apr_status_t status = APR_SUCCESS;
apr_bucket *b;
apr_off_t bblen;
- apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
- (apr_size_t)readbytes : APR_SIZE_MAX);
+ apr_size_t rmax = (readbytes < APR_INT32_MAX)?
+ (apr_size_t)readbytes : APR_INT32_MAX;
conn_ctx = h2_conn_ctx_get(f->c);
AP_DEBUG_ASSERT(conn_ctx);
static void c2_transit_recycle(h2_mplx *m, h2_c2_transit *transit)
{
- if (m->c2_transits->nelts >= m->max_spare_transits) {
+ if (m->c2_transits->nelts >= APR_INT32_MAX ||
+ (apr_uint32_t)m->c2_transits->nelts >= m->max_spare_transits) {
c2_transit_destroy(transit);
}
else {
m->q = h2_iq_create(m->pool, m->max_streams);
m->workers = workers;
- m->processing_max = H2MIN((int)h2_workers_get_max_workers(workers), m->max_streams);
+ m->processing_max = H2MIN(h2_workers_get_max_workers(workers), m->max_streams);
m->processing_limit = 6; /* the original h1 max parallel connections */
m->last_mood_change = apr_time_now();
m->mood_update_interval = apr_time_from_msec(100);
sizeof(h2_c2_transit*));
m->producer = h2_workers_register(workers, m->pool,
- apr_psprintf(m->pool, "h2-%d", (int)m->id),
+ apr_psprintf(m->pool, "h2-%u",
+ (unsigned int)m->id),
c2_prod_next, c2_prod_done,
workers_shutdown, m);
return m;
if (conn_ctx) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
H2_STRM_MSG(stream, "->03198: %s %s %s"
- "[started=%d/done=%d]"),
+ "[started=%u/done=%u]"),
conn_ctx->request->method, conn_ctx->request->authority,
conn_ctx->request->path,
- (int)apr_atomic_read32(&conn_ctx->started),
- (int)apr_atomic_read32(&conn_ctx->done));
+ apr_atomic_read32(&conn_ctx->started),
+ apr_atomic_read32(&conn_ctx->done));
}
else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
void h2_mplx_c1_destroy(h2_mplx *m)
{
apr_status_t status;
- int i, wait_secs = 60, old_aborted;
+ unsigned int i, wait_secs = 60;
+ int old_aborted;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
H2_MPLX_MSG(m, "start release"));
/* How to shut down a h2 connection:
* 1. cancel all streams still active */
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
- H2_MPLX_MSG(m, "release, %d/%d/%d streams (total/hold/purge), %d streams"),
- (int)h2_ihash_count(m->streams),
- (int)h2_ihash_count(m->shold), m->spurge->nelts, m->processing_count);
+ H2_MPLX_MSG(m, "release, %u/%u/%d streams (total/hold/purge), %d streams"),
+ h2_ihash_count(m->streams),
+ h2_ihash_count(m->shold),
+ m->spurge->nelts, m->processing_count);
while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
/* until empty */
}
/* This can happen if we have very long running requests
* that do not time out on IO. */
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, APLOGNO(03198)
- H2_MPLX_MSG(m, "waited %d sec for %d streams"),
- i*wait_secs, (int)h2_ihash_count(m->shold));
+ H2_MPLX_MSG(m, "waited %u sec for %u streams"),
+ i*wait_secs, h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_report_stream_iter, m);
}
}
ap_assert(m->processing_count == 0);
if (!h2_ihash_empty(m->shold)) {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, APLOGNO(03516)
- H2_MPLX_MSG(m, "unexpected %d streams in hold"),
- (int)h2_ihash_count(m->shold));
+ H2_MPLX_MSG(m, "unexpected %u streams in hold"),
+ h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
}
apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, h2_stream *stream,
- int *pstream_count)
+ unsigned int *pstream_count)
{
H2_MPLX_ENTER(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
H2_STRM_MSG(stream, "cleanup"));
m_stream_cleanup(m, stream);
- *pstream_count = (int)h2_ihash_count(m->streams);
+ *pstream_count = h2_ihash_count(m->streams);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
h2_stream_get_fn *get_stream,
h2_stream_pri_cmp_fn *stream_pri_cmp,
h2_session *session,
- int *pstream_count)
+ unsigned int *pstream_count)
{
apr_status_t rv;
int sid;
H2_MPLX_MSG(m, "activate at workers"));
}
}
- *pstream_count = (int)h2_ihash_count(m->streams);
+ *pstream_count = h2_ihash_count(m->streams);
#if APR_POOL_DEBUG
do {
{
h2_stream *stream = NULL;
apr_status_t rv = APR_SUCCESS;
- int sid;
+ apr_uint32_t sid;
conn_rec *c2 = NULL;
h2_c2_transit *transit = NULL;
struct h2_iqueue *q; /* all stream ids that need to be started */
apr_size_t stream_max_mem; /* max memory to buffer for a stream */
- int max_streams; /* max # of concurrent streams */
- int max_stream_id_started; /* highest stream id that started processing */
+ apr_uint32_t max_streams; /* max # of concurrent streams */
+ apr_uint32_t max_stream_id_started; /* highest stream id that started processing */
- int processing_count; /* # of c2 working for this mplx */
- int processing_limit; /* current limit on processing c2s, dynamic */
- int processing_max; /* max, hard limit of processing c2s */
+ apr_uint32_t processing_count; /* # of c2 working for this mplx */
+ apr_uint32_t processing_limit; /* current limit on processing c2s, dynamic */
+ apr_uint32_t processing_max; /* max, hard limit of processing c2s */
apr_time_t last_mood_change; /* last time, processing limit changed */
apr_interval_time_t mood_update_interval; /* how frequent we update at most */
- int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
+ apr_uint32_t irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
apr_thread_mutex_t *lock;
struct apr_thread_cond_t *join_wait;
request_rec *scratch_r; /* pseudo request_rec for scoreboard reporting */
- apr_size_t max_spare_transits; /* max number of transit pools idling */
+ apr_uint32_t max_spare_transits; /* max number of transit pools idling */
apr_array_header_t *c2_transits; /* base pools for running c2 connections */
};
* @param pstream_count return the number of streams active
*/
apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, struct h2_stream *stream,
- int *pstream_count);
+ unsigned int *pstream_count);
int h2_mplx_c1_stream_is_running(h2_mplx *m, struct h2_stream *stream);
h2_stream_get_fn *get_stream,
h2_stream_pri_cmp_fn *cmp,
struct h2_session *session,
- int *pstream_count);
+ unsigned int *pstream_count);
/**
* Stream priorities have changed, reschedule pending requests.
}
if (status == APR_SUCCESS) {
- ssize_t readlen = 0;
+ size_t readlen = 0;
while (status == APR_SUCCESS
&& (readlen < length)
&& !APR_BRIGADE_EMPTY(stream->input)) {
status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
if (status == APR_SUCCESS && blen > 0) {
- ssize_t copylen = H2MIN(length - readlen, blen);
+ size_t copylen = H2MIN(length - readlen, blen);
memcpy(buf, bdata, copylen);
buf += copylen;
readlen += copylen;
apr_status_t status = APR_SUCCESS;
apr_size_t readlen = 0;
ssize_t n;
-
+
while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
apr_bucket* b = APR_BRIGADE_FIRST(bb);
}
}
else {
- readlen += n;
- if (n < blen) {
- apr_bucket_split(b, n);
+ size_t rlen = (size_t)n;
+ readlen += rlen;
+ if (rlen < blen) {
+ apr_bucket_split(b, rlen);
}
}
}
const char *name, size_t nlen)
{
const literal *lit;
- int i;
+ size_t i;
for (i = 0; i < llen; ++i) {
lit = &lits[i];
{
if (ctx->link_start < ctx->link_end) {
char buffer[HUGE_STRING_LEN];
- int need_len, link_len, buffer_len, prepend_p_server;
+ size_t need_len, link_len, buffer_len, prepend_p_server;
const char *mapped;
buffer[0] = '\0';
EVP_MD_CTX *md;
apr_uint64_t val;
unsigned char hash[EVP_MAX_MD_SIZE];
- unsigned len;
- int i;
+ unsigned len, i;
md = EVP_MD_CTX_create();
ap_assert(md != NULL);
{
h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
h2_push_diary_entry e;
- int lastidx;
+ apr_size_t lastidx;
/* Move an existing entry to the last place */
if (diary->entries->nelts <= 0)
int maxP, const char *authority,
const char **pdata, apr_size_t *plen)
{
- int nelts, N, i;
+ int nelts, N;
unsigned char log2n, log2pmax;
gset_encoder encoder;
apr_uint64_t *hashes;
- apr_size_t hash_count;
+ apr_size_t hash_count, i;
nelts = diary->entries->nelts;
N = ceil_power_of_2(nelts);
apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- else if (len != length) {
+ else if (len != (apr_off_t)length) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
"got %ld from stream"), (long)length, (long)len);
size_t max_payloadlen, void *user_data)
{
h2_session *session = user_data;
- ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
- ssize_t padded_len = frame_len;
+ size_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
+ size_t padded_len = frame_len;
/* Determine # of padding bytes to append to frame. Unless session->padding_always
* the number my be capped by the ui.write_size that currently applies.
struct h2_workers *workers; /* for executing streams */
struct h2_c1_io_in_ctx_t *cin; /* connection input filter context */
h2_c1_io io; /* io on httpd conn filters */
- int padding_max; /* max number of padding bytes */
+ unsigned int padding_max; /* max number of padding bytes */
int padding_always; /* padding has precedence over I/O optimizations */
struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
- int open_streams; /* number of streams processing */
-
- int streams_done; /* number of http/2 streams handled */
- int responses_submitted; /* number of http/2 responses submitted */
- int streams_reset; /* number of http/2 streams reset by client */
- int pushes_promised; /* number of http/2 push promises submitted */
- int pushes_submitted; /* number of http/2 pushed responses submitted */
- int pushes_reset; /* number of http/2 pushed reset by client */
+ unsigned int open_streams; /* number of streams processing */
+
+ unsigned int streams_done; /* number of http/2 streams handled */
+ unsigned int responses_submitted; /* number of http/2 responses submitted */
+ unsigned int streams_reset; /* number of http/2 streams reset by client */
+ unsigned int pushes_promised; /* number of http/2 push promises submitted */
+ unsigned int pushes_submitted; /* number of http/2 pushed responses submitted */
+ unsigned int pushes_reset; /* number of http/2 pushed reset by client */
apr_size_t frames_received; /* number of http/2 frames received */
apr_size_t frames_sent; /* number of http/2 frames sent */
{
ap_assert(frame_type >= 0);
ap_assert(state >= 0);
- if (frame_type >= maxlen) {
+ if (frame_type < 0 || (apr_size_t)frame_type >= maxlen) {
return state; /* NOP, ignore unknown frame types */
}
return on_map(state, frame_map[frame_type]);
static apr_status_t transit(h2_stream *stream, int new_state)
{
- if (new_state == stream->state) {
+ if ((h2_stream_state_t)new_state == stream->state) {
return APR_SUCCESS;
}
else if (new_state < 0) {
AP_DEBUG_ASSERT(new_state > S_XXX);
return;
}
- else if (new_state == stream->state) {
+ else if ((h2_stream_state_t)new_state == stream->state) {
/* nop */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "non-state event %d"), ev);
}
if (name[0] == ':') {
- if ((vlen) > session->s->limit_req_line) {
+ if (vlen > APR_INT32_MAX || (int)vlen > session->s->limit_req_line) {
/* pseudo header: approximation of request line size check */
if (!h2_stream_is_ready(stream)) {
ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
buf_len = h2_brigade_mem_size(stream->out_buffer);
}
- if (buf_len >= stream->session->max_stream_mem) {
+ if (buf_len > APR_INT32_MAX
+ || (apr_size_t)buf_len >= stream->session->max_stream_mem) {
/* we have buffered enough. No need to read more.
* However, we have now output pending for which we may not
* receive another poll event. We need to make sure that this
* is requested. But we can reduce the size in case the master
* connection operates in smaller chunks. (TSL warmup) */
if (stream->session->io.write_size > 0) {
- apr_off_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ apr_size_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
if (length > chunk_len) {
length = chunk_len;
}
/* How much data do we have in our buffers that we can write? */
check_and_receive:
buf_len = output_data_buffered(stream, &eos, &header_blocked);
- while (buf_len < length && !eos && !header_blocked) {
+ while (buf_len < (apr_off_t)length && !eos && !header_blocked) {
/* read more? */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
H2_SSSN_STRM_MSG(session, stream_id,
return ih;
}
-size_t h2_ihash_count(h2_ihash_t *ih)
+unsigned int h2_ihash_count(h2_ihash_t *ih)
{
return apr_hash_count(ih->hash);
}
size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max)
{
- int i;
+ size_t i;
for (i = 0; i < max; ++i) {
pint[i] = h2_iq_shift(q);
if (pint[i] == 0) {
* h2_util for bucket brigades
******************************************************************************/
-static apr_status_t last_not_included(apr_bucket_brigade *bb,
- apr_off_t maxlen,
- apr_bucket **pend)
+static void fit_bucket_into(apr_bucket *b, apr_off_t *plen)
{
- apr_bucket *b;
- apr_status_t status = APR_SUCCESS;
-
- if (maxlen >= 0) {
- /* Find the bucket, up to which we reach maxlen/mem bytes */
- for (b = APR_BRIGADE_FIRST(bb);
- (b != APR_BRIGADE_SENTINEL(bb));
- b = APR_BUCKET_NEXT(b)) {
-
- if (APR_BUCKET_IS_METADATA(b)) {
- /* included */
- }
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (maxlen == 0 && b->length > 0) {
- *pend = b;
- return status;
- }
-
- if (APR_BUCKET_IS_FILE(b)
-#if APR_HAS_MMAP
- || APR_BUCKET_IS_MMAP(b)
-#endif
- ) {
- /* we like to move it, always */
- }
- else if (maxlen < (apr_off_t)b->length) {
- apr_bucket_split(b, (apr_size_t)maxlen);
- maxlen = 0;
- }
- else {
- maxlen -= b->length;
- }
- }
- }
+ /* signed apr_off_t is at least as large as unsigned apr_size_t.
+ * Propblems may arise when they are both the same size. Then
+ * the bucket length *may* be larger than a value we can hold
+ * in apr_off_t. Before casting b->length to apr_off_t we must
+ * check the limitations.
+ * After we resized the bucket, it is safe to cast and substract.
+ */
+ if ((sizeof(apr_off_t) == sizeof(apr_int64_t)
+ && b->length > APR_INT64_MAX)
+ || (sizeof(apr_off_t) == sizeof(apr_int32_t)
+ && b->length > APR_INT32_MAX)
+ || *plen < (apr_off_t)b->length) {
+ /* bucket is longer the *plen */
+ apr_bucket_split(b, *plen);
}
- *pend = APR_BRIGADE_SENTINEL(bb);
- return status;
+ *plen -= (apr_off_t)b->length;
}
apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
APR_BRIGADE_INSERT_TAIL(dest, b);
}
else {
- if (remain == b->length) {
- /* fall through */
- }
- else if (remain <= 0) {
+ if (remain <= 0) {
return status;
}
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (remain < b->length) {
- apr_bucket_split(b, remain);
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
}
+ fit_bucket_into(b, &remain);
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(dest, b);
- remain -= b->length;
}
}
return status;
/* fall through */
}
else {
- if (remain == b->length) {
- /* fall through */
- }
- else if (remain <= 0) {
+ if (remain <= 0) {
return status;
}
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (remain < b->length) {
- apr_bucket_split(b, remain);
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
}
+ fit_bucket_into(b, &remain);
}
status = apr_bucket_copy(b, &b);
if (status != APR_SUCCESS) {
return status;
}
APR_BRIGADE_INSERT_TAIL(dest, b);
- remain -= b->length;
}
return status;
}
-int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
-{
- apr_bucket *b, *end;
-
- apr_status_t status = last_not_included(bb, len, &end);
- if (status != APR_SUCCESS) {
- return status;
- }
-
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb) && b != end;
- b = APR_BUCKET_NEXT(b))
- {
- if (APR_BUCKET_IS_EOS(b)) {
- return 1;
- }
- }
- return 0;
-}
-
-apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos)
-{
- apr_status_t status;
- apr_off_t blen = 0;
-
- /* test read to determine available length */
- status = apr_brigade_length(bb, 1, &blen);
- if (status != APR_SUCCESS) {
- return status;
- }
- else if (blen == 0) {
- /* brigade without data, does it have an EOS bucket somewhere? */
- *plen = 0;
- *peos = h2_util_has_eos(bb, -1);
- }
- else {
- /* data in the brigade, limit the length returned. Check for EOS
- * bucket only if we indicate data. This is required since plen == 0
- * means "the whole brigade" for h2_util_has_eos()
- */
- if (blen < *plen || *plen < 0) {
- *plen = blen;
- }
- *peos = h2_util_has_eos(bb, *plen);
- }
- return APR_SUCCESS;
-}
-
apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
apr_bucket *b, const char *sep)
{
h2_bucket_gate *should_append)
{
apr_bucket *e;
- apr_off_t len = 0, remain = *plen;
+ apr_off_t start, remain;
apr_status_t rv;
*peos = 0;
+ start = remain = *plen;
while (!APR_BRIGADE_EMPTY(from)) {
e = APR_BRIGADE_FIRST(from);
}
}
else {
- if (remain > 0 && e->length == ((apr_size_t)-1)) {
+ if (remain <= 0) {
+ goto leave;
+ }
+ if (e->length == ((apr_size_t)-1)) {
const char *ign;
apr_size_t ilen;
rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
return rv;
}
}
-
- if (remain < e->length) {
- if (remain <= 0) {
- goto leave;
- }
- apr_bucket_split(e, (apr_size_t)remain);
- }
+ fit_bucket_into(e, &remain);
}
-
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(to, e);
- len += e->length;
- remain -= e->length;
}
leave:
- *plen = len;
+ *plen = start - remain;
return APR_SUCCESS;
}
*/
h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
-size_t h2_ihash_count(h2_ihash_t *ih);
+unsigned int h2_ihash_count(h2_ihash_t *ih);
int h2_ihash_empty(h2_ihash_t *ih);
void *h2_ihash_get(h2_ihash_t *ih, int id);
apr_bucket_brigade *src,
apr_off_t length);
-/**
- * Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
- * @param bb the brigade to check on
- * @return != 0 iff brigade holds FLUSH or EOS bucket (or both)
- */
-int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len);
-
-/**
- * Check how many bytes of the desired amount are available and if the
- * end of stream is reached by that amount.
- * @param bb the brigade to check
- * @param plen the desired length and, on return, the available length
- * @param on return, if eos has been reached
- */
-apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos);
-
-typedef apr_status_t h2_util_pass_cb(void *ctx,
+typedef apr_status_t h2_util_pass_cb(void *ctx,
const char *data, apr_off_t len);
/**
typedef struct h2_slot h2_slot;
struct h2_slot {
APR_RING_ENTRY(h2_slot) link;
- int id;
+ apr_uint32_t id;
apr_pool_t *pool;
h2_slot_state_t state;
volatile int should_shutdown;
h2_workers *workers;
apr_pool_t *pool;
apr_allocator_t *allocator;
- int i, locked = 0;
+ int locked = 0;
+ apr_uint32_t i;
ap_assert(s);
ap_assert(pchild);
return NULL;
}
-apr_size_t h2_workers_get_max_workers(h2_workers *workers)
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers)
{
return workers->max_slots;
}
/**
* Get the maximum number of workers.
*/
-apr_size_t h2_workers_get_max_workers(h2_workers *workers);
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers);
/**
* ap_conn_producer_t is the source of connections (conn_rec*) to run.
static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
conn_rec *c, request_rec *r, char *name)
{
- int i;
+ unsigned int i;
/* If the # of vars grow, we need to put definitions in a hash */
for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
h2_var_def *vdef = &H2_VARS[i];
{
if (r->connection->master) {
h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
- int i;
+ unsigned int i;
apr_interval_time_t stream_timeout;
for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {