]> git.ipfire.org Git - thirdparty/ntp.git/commitdiff
Update to libevent master (to be 2.1) as of 2.0.14-stable, HEAD git commit
authorDave Hart <hart@ntp.org>
Wed, 31 Aug 2011 16:12:20 +0000 (16:12 +0000)
committerDave Hart <hart@ntp.org>
Wed, 31 Aug 2011 16:12:20 +0000 (16:12 +0000)
  148458e0a1fd25e167aa2ef229d1c9a70b27c3e9

bk: 4e5e5d64ZZuMF5dkklDGtUNF08JSAg

36 files changed:
sntp/libevent/ChangeLog
sntp/libevent/Makefile.am
sntp/libevent/buffer.c
sntp/libevent/buffer_iocp.c
sntp/libevent/bufferevent-internal.h
sntp/libevent/bufferevent.c
sntp/libevent/bufferevent_async.c
sntp/libevent/bufferevent_filter.c
sntp/libevent/bufferevent_openssl.c
sntp/libevent/bufferevent_ratelim.c
sntp/libevent/bufferevent_sock.c
sntp/libevent/configure.in
sntp/libevent/defer-internal.h
sntp/libevent/evbuffer-internal.h
sntp/libevent/evdns.c
sntp/libevent/event-internal.h
sntp/libevent/event.c
sntp/libevent/evrpc.c
sntp/libevent/evutil.c
sntp/libevent/evutil_rand.c
sntp/libevent/http.c
sntp/libevent/include/event2/dns.h
sntp/libevent/include/event2/event.h
sntp/libevent/include/event2/event_compat.h
sntp/libevent/kqueue.c
sntp/libevent/m4/libevent_openssl.m4 [new file with mode: 0644]
sntp/libevent/m4/ntp_pkg_config.m4 [new file with mode: 0644]
sntp/libevent/minheap-internal.h
sntp/libevent/sample/Makefile.am
sntp/libevent/sample/dns-example.c
sntp/libevent/sample/http-server.c
sntp/libevent/signal.c
sntp/libevent/test/Makefile.am
sntp/libevent/test/bench_http.c
sntp/libevent/test/rpcgen_wrapper.sh [new file with mode: 0644]
sntp/libevent/test/test-ratelim.c

index dea8ccda260c90d13e1a181f9b5ff7d41aa7937c..520e7397d4822a6ca87c53c2590d173aa3f5339a 100644 (file)
@@ -1,3 +1,28 @@
+Changes in version 2.0.14-stable (?? ??? 2011)
+BUGFIXES (bufferevents and evbuffers):
+ o Propagate errors on the underlying bufferevent to the user. (4a34394 Joachim Bauch)
+ o Ignore OpenSSL deprecation warnings on OS X (5d1b255 Sebastian Hahn)
+ o Fix handling of group rate limits under 64 bytes of burst (6d5440e)
+ o Solaris sendfile: correctly detect amount of data sent (643922e Michael Herf)
+ o Make rate limiting work with common_timeout logic (5b18f13)
+
+BUGFIXES (IOCP):
+ o IOCP: don't launch reads or writes on an unconnected socket (495c227)
+ o Make IOCP rate-limiting group support stricter and less surprising. (a98da7b)
+ o Have test-ratelim.c support IOCP (0ff2c5a)
+ o Make overlapped reads result in evbuffer callbacks getting invoked (6acfbdd)
+ o Correctly terminate IO on an async bufferevent on bufferevent_free (e6af35d)
+
+BUGFIXES (other):
+ o Fix evsig_dealloc memory leak with debugging turned on. (9b724b2 Leonid Evdokimov)
+ o Fix request_finished memory leak with debugging turned on. (aff6ba1 Leonid Evdokimov)
+
+BUILD AND TESTING FIXES:
+ o Allow OS-neutral builds for platforms where some versions have arc4random_buf (b442302 Mitchell Livingston)
+ o Try to fix 'make distcheck' errors when building out-of-tree (04656ea Dave Hart)
+ o Clean up some problems identified by Coverity. (7c11e51 Harlan Stenn)
+
+
 Changes in version 2.0.13-stable (18 Jul 2011)
 BUGFIXES
  o Avoid race-condition when initializing global locks (b683cae)
index 7847112730ba3dcb51051757d569d601ee1f85a5..ff07b26871fe94db97b23b59fcf87f876a0febde 100644 (file)
@@ -209,6 +209,7 @@ if OPENSSL
 libevent_openssl_la_SOURCES = bufferevent_openssl.c
 libevent_openssl_la_LIBADD = $(MAYBE_CORE) $(OPENSSL_LIBS)
 libevent_openssl_la_LDFLAGS = $(GENERIC_LDFLAGS)
+libevent_openssl_la_CPPFLAGS = $(AM_CPPFLAGS) $(OPENSSL_INCS)
 endif
 
 noinst_HEADERS = util-internal.h mm-internal.h ipv6-internal.h \
index df6402e858d10a29f8011a60a3b27e973f97f29a..7336c214f4be188217c7245c9a723a9cd5b2c515 100644 (file)
@@ -432,7 +432,7 @@ evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
        }
 }
 
-static inline void
+void
 evbuffer_invoke_callbacks(struct evbuffer *buffer)
 {
        if (TAILQ_EMPTY(&buffer->callbacks)) {
@@ -2218,12 +2218,18 @@ evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
        }
        return (res);
 #elif defined(SENDFILE_IS_SOLARIS)
-       res = sendfile(dest_fd, source_fd, &offset, chain->off);
-       if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
-               /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
-               return (0);
+       {
+               const off_t offset_orig = offset;
+               res = sendfile(dest_fd, source_fd, &offset, chain->off);
+               if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
+                       if (offset - offset_orig)
+                               return offset - offset_orig;
+                       /* if this is EAGAIN or EINTR and no bytes were
+                        * written, return 0 */
+                       return (0);
+               }
+               return (res);
        }
-       return (res);
 #endif
 }
 #endif
index f59b42a3e0ba4f363da4f2a9eb313d3aa11e05b5..a917c01f27fd57a9b4fbaa33e9a734ec7e3309ab 100644 (file)
@@ -127,6 +127,9 @@ evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
        buf->read_in_progress = 0;
 
        evbuf->total_len += nBytes;
+       evbuf->n_add_for_cb += nBytes;
+
+       evbuffer_invoke_callbacks(evbuf);
 
        _evbuffer_decref_and_unlock(evbuf);
 }
index bbf17eba2dd47dcf327440ecdd75abe8a7762d79..8f47979b6eb73935fe21cadde6095fd259c01fbf 100644 (file)
@@ -99,6 +99,8 @@ struct bufferevent_rate_limit_group {
        /** The smallest number of bytes that any member of the group should
         * be limited to read or write at a time. */
        ev_ssize_t min_share;
+       ev_ssize_t configured_min_share;
+
        /** Timeout event that goes off once a tick, when the bucket is ready
         * to refill. */
        struct event master_refill_event;
@@ -196,7 +198,8 @@ struct bufferevent_private {
 enum bufferevent_ctrl_op {
        BEV_CTRL_SET_FD,
        BEV_CTRL_GET_FD,
-       BEV_CTRL_GET_UNDERLYING
+       BEV_CTRL_GET_UNDERLYING,
+       BEV_CTRL_CANCEL_ALL
 };
 
 /** Possible data types for a control callback */
index 1c064e5362923adca77f30cfe6b20f1de0bd8d15..3a99186050a524ac73f3698d27a9b5f1261c3b5f 100644 (file)
@@ -60,6 +60,9 @@
 #include "evbuffer-internal.h"
 #include "util-internal.h"
 
+static void _bufferevent_cancel_all(struct bufferevent *bev);
+
+
 void
 bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what)
 {
@@ -675,6 +678,7 @@ bufferevent_free(struct bufferevent *bufev)
 {
        BEV_LOCK(bufev);
        bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
+       _bufferevent_cancel_all(bufev);
        _bufferevent_decref_and_unlock(bufev);
 }
 
@@ -751,6 +755,17 @@ bufferevent_getfd(struct bufferevent *bev)
        return (res<0) ? -1 : d.fd;
 }
 
+static void
+_bufferevent_cancel_all(struct bufferevent *bev)
+{
+       union bufferevent_ctrl_data d;
+       memset(&d, 0, sizeof(d));
+       BEV_LOCK(bev);
+       if (bev->be_ops->ctrl)
+               bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
+       BEV_UNLOCK(bev);
+}
+
 short
 bufferevent_get_enabled(struct bufferevent *bufev)
 {
index 81cc0b72a06f1cbd450b85ff3574212e2961226a..287371d7b4dc3e347d7b08578fe75be98cfdc455 100644 (file)
@@ -81,8 +81,8 @@ struct bufferevent_async {
        struct event_overlapped connect_overlapped;
        struct event_overlapped read_overlapped;
        struct event_overlapped write_overlapped;
-       unsigned read_in_progress : 1;
-       unsigned write_in_progress : 1;
+       size_t read_in_progress;
+       size_t write_in_progress;
        unsigned ok : 1;
        unsigned read_added : 1;
        unsigned write_added : 1;
@@ -189,7 +189,7 @@ bev_async_consider_writing(struct bufferevent_async *beva)
 
        /* Don't write if there's a write in progress, or we do not
         * want to write, or when there's nothing left to write. */
-       if (beva->write_in_progress)
+       if (beva->write_in_progress || beva->bev.connecting)
                return;
        if (!beva->ok || !(bev->enabled&EV_WRITE) ||
            !evbuffer_get_length(bev->output)) {
@@ -199,7 +199,6 @@ bev_async_consider_writing(struct bufferevent_async *beva)
 
        at_most = evbuffer_get_length(bev->output);
 
-       /* XXXX This over-commits. */
        /* This is safe so long as bufferevent_get_write_max never returns
         * more than INT_MAX.  That's true for now. XXXX */
        limit = (int)_bufferevent_get_write_max(&beva->bev);
@@ -219,7 +218,8 @@ bev_async_consider_writing(struct bufferevent_async *beva)
                beva->ok = 0;
                _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
        } else {
-               beva->write_in_progress = 1;
+               beva->write_in_progress = at_most;
+               _bufferevent_decrement_write_buckets(&beva->bev, at_most);
                bev_async_add_write(beva);
        }
 }
@@ -235,7 +235,7 @@ bev_async_consider_reading(struct bufferevent_async *beva)
 
        /* Don't read if there is a read in progress, or we do not
         * want to read. */
-       if (beva->read_in_progress)
+       if (beva->read_in_progress || beva->bev.connecting)
                return;
        if (!beva->ok || !(bev->enabled&EV_READ)) {
                bev_async_del_read(beva);
@@ -269,10 +269,11 @@ bev_async_consider_reading(struct bufferevent_async *beva)
        bufferevent_incref(bev);
        if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
                beva->ok = 0;
-               bufferevent_decref(bev);
                _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+               bufferevent_decref(bev);
        } else {
-               beva->read_in_progress = 1;
+               beva->read_in_progress = at_most;
+               _bufferevent_decrement_read_buckets(&beva->bev, at_most);
                bev_async_add_read(beva);
        }
 
@@ -325,7 +326,11 @@ be_async_enable(struct bufferevent *buf, short what)
        if (!bev_async->ok)
                return -1;
 
-       /* NOTE: This interferes with non-blocking connect */
+       if (bev_async->bev.connecting) {
+               /* Don't launch anything during connection attempts. */
+               return 0;
+       }
+
        if (what & EV_READ)
                BEV_RESET_GENERIC_READ_TIMEOUT(buf);
        if (what & EV_WRITE)
@@ -375,8 +380,10 @@ be_async_destruct(struct bufferevent *bev)
        bev_async_del_write(bev_async);
 
        fd = _evbuffer_overlapped_get_fd(bev->input);
-       if (bev_p->options & BEV_OPT_CLOSE_ON_FREE)
+       if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
+               /* XXXX possible double-close */
                evutil_closesocket(fd);
+       }
        /* delete this in case non-blocking connect was used */
        if (event_initialized(&bev->ev_write)) {
                event_del(&bev->ev_write);
@@ -439,12 +446,15 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
        struct bufferevent_async *bev_a = upcast_read(eo);
        struct bufferevent *bev = &bev_a->bev.bev;
        short what = BEV_EVENT_READING;
-
+       ev_ssize_t amount_unread;
        BEV_LOCK(bev);
        EVUTIL_ASSERT(bev_a->read_in_progress);
 
+       amount_unread = bev_a->read_in_progress - nbytes;
        evbuffer_commit_read(bev->input, nbytes);
        bev_a->read_in_progress = 0;
+       if (amount_unread)
+               _bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
 
        if (!ok)
                bev_async_set_wsa_error(bev, eo);
@@ -452,8 +462,6 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
        if (bev_a->ok) {
                if (ok && nbytes) {
                        BEV_RESET_GENERIC_READ_TIMEOUT(bev);
-                       _bufferevent_decrement_read_buckets(&bev_a->bev,
-                                       nbytes);
                        if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
                                _bufferevent_run_readcb(bev);
                        bev_async_consider_reading(bev_a);
@@ -478,20 +486,26 @@ write_complete(struct event_overlapped *eo, ev_uintptr_t key,
        struct bufferevent_async *bev_a = upcast_write(eo);
        struct bufferevent *bev = &bev_a->bev.bev;
        short what = BEV_EVENT_WRITING;
+       ev_ssize_t amount_unwritten;
 
        BEV_LOCK(bev);
        EVUTIL_ASSERT(bev_a->write_in_progress);
+
+       amount_unwritten = bev_a->write_in_progress - nbytes;
        evbuffer_commit_write(bev->output, nbytes);
        bev_a->write_in_progress = 0;
 
+       if (amount_unwritten)
+               _bufferevent_decrement_write_buckets(&bev_a->bev,
+                                                    -amount_unwritten);
+
+
        if (!ok)
                bev_async_set_wsa_error(bev, eo);
 
        if (bev_a->ok) {
                if (ok && nbytes) {
                        BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
-                       _bufferevent_decrement_write_buckets(&bev_a->bev,
-                                       nbytes);
                        if (evbuffer_get_length(bev->output) <=
                            bev->wm_write.low)
                                _bufferevent_run_writecb(bev);
@@ -658,8 +672,20 @@ be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
                _evbuffer_overlapped_set_fd(bev->output, data->fd);
                return 0;
        }
+       case BEV_CTRL_CANCEL_ALL: {
+               struct bufferevent_async *bev_a = upcast(bev);
+               evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
+               if (fd != (evutil_socket_t)INVALID_SOCKET &&
+                   (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
+                       closesocket(fd);
+               }
+               bev_a->ok = 0;
+               return 0;
+       }
        case BEV_CTRL_GET_UNDERLYING:
        default:
                return -1;
        }
 }
+
+
index eeb9a7d3b0cf549d01224434478c15e9c812fb29..bc30b6dfc07b6fd6681d53cfeca28a79dbab97ed 100644 (file)
@@ -506,6 +506,7 @@ be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
                return 0;
        case BEV_CTRL_GET_FD:
        case BEV_CTRL_SET_FD:
+       case BEV_CTRL_CANCEL_ALL:
        default:
                return -1;
        }
index 2a448cdc3d040d6192b75f6bd5314f360dc140b4..ff8138fd8ccb54cd9ded09201350dd5aa5371ab7 100644 (file)
@@ -815,6 +815,9 @@ be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx)
        } else if (what & BEV_EVENT_TIMEOUT) {
                /* We sure didn't set this.  Propagate it to the user. */
                event = what;
+       } else if (what & BEV_EVENT_ERROR) {
+               /* An error occurred on the connection.  Propagate it to the user. */
+               event = what;
        } else if (what & BEV_EVENT_CONNECTED) {
                /* Ignore it.  We're saying SSL_connect() already, which will
                   eat it. */
@@ -1165,6 +1168,7 @@ be_openssl_ctrl(struct bufferevent *bev,
                        return -1;
                data->ptr = bev_ssl->underlying;
                return 0;
+       case BEV_CTRL_CANCEL_ALL:
        default:
                return -1;
        }
@@ -1246,6 +1250,7 @@ bufferevent_openssl_new_impl(struct event_base *base,
        }
 
        if (underlying) {
+               bufferevent_setwatermark(underlying, EV_READ, 0, 0);
                bufferevent_enable(underlying, EV_READ|EV_WRITE);
                if (state == BUFFEREVENT_SSL_OPEN)
                        bufferevent_suspend_read(underlying,
index 7e51c2e5c0044aff4bf9f45a7beab65133dd6dcd..506e82ab912179b6e2eaed2f1ba89c6bc831ac0d 100644 (file)
@@ -44,6 +44,7 @@
 #include "bufferevent-internal.h"
 #include "mm-internal.h"
 #include "util-internal.h"
+#include "event-internal.h"
 
 int
 ev_token_bucket_init(struct ev_token_bucket *bucket,
@@ -166,7 +167,8 @@ ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst,
        r->read_maximum = read_burst;
        r->write_maximum = write_burst;
        memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval));
-       r->msec_per_tick = (tick_len->tv_sec * 1000) + tick_len->tv_usec/1000;
+       r->msec_per_tick = (tick_len->tv_sec * 1000) +
+           (tick_len->tv_usec & COMMON_TIMEOUT_MICROSECONDS_MASK)/1000;
        return r;
 }
 
@@ -188,6 +190,8 @@ ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
 
 static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g);
 static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g);
+static void _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g);
+static void _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g);
 
 /** Helper: figure out the maximum amount we should write if is_write, or
     the maximum amount we should read if is_read.  Return that maximum, or
@@ -284,6 +288,10 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
                        if (event_add(&bev->rate_limiting->refill_bucket_event,
                                &bev->rate_limiting->cfg->tick_timeout) < 0)
                                r = -1;
+               } else if (bev->read_suspended & BEV_SUSPEND_BW) {
+                       if (!(bev->write_suspended & BEV_SUSPEND_BW))
+                               event_del(&bev->rate_limiting->refill_bucket_event);
+                       bufferevent_unsuspend_read(&bev->bev, BEV_SUSPEND_BW);
                }
        }
 
@@ -293,6 +301,8 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
                bev->rate_limiting->group->total_read += bytes;
                if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
                        _bev_group_suspend_reading(bev->rate_limiting->group);
+               } else if (bev->rate_limiting->group->read_suspended) {
+                       _bev_group_unsuspend_reading(bev->rate_limiting->group);
                }
                UNLOCK_GROUP(bev->rate_limiting->group);
        }
@@ -316,6 +326,10 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
                        if (event_add(&bev->rate_limiting->refill_bucket_event,
                                &bev->rate_limiting->cfg->tick_timeout) < 0)
                                r = -1;
+               } else if (bev->write_suspended & BEV_SUSPEND_BW) {
+                       if (!(bev->read_suspended & BEV_SUSPEND_BW))
+                               event_del(&bev->rate_limiting->refill_bucket_event);
+                       bufferevent_unsuspend_write(&bev->bev, BEV_SUSPEND_BW);
                }
        }
 
@@ -325,6 +339,8 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
                bev->rate_limiting->group->total_written += bytes;
                if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
                        _bev_group_suspend_writing(bev->rate_limiting->group);
+               } else if (bev->rate_limiting->group->write_suspended) {
+                       _bev_group_unsuspend_writing(bev->rate_limiting->group);
                }
                UNLOCK_GROUP(bev->rate_limiting->group);
        }
@@ -519,6 +535,7 @@ _bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
        event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now);
 
        LOCK_GROUP(g);
+
        tick = ev_token_bucket_get_tick(&now, &g->rate_limit_cfg);
        ev_token_bucket_update(&g->rate_limit, &g->rate_limit_cfg, tick);
 
@@ -637,13 +654,15 @@ bufferevent_rate_limit_group_new(struct event_base *base,
 
        ev_token_bucket_init(&g->rate_limit, cfg, tick, 0);
 
-       g->min_share = 64;
        event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
            _bev_group_refill_callback, g);
        /*XXXX handle event_add failure */
        event_add(&g->master_refill_event, &cfg->tick_timeout);
 
        EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+       bufferevent_rate_limit_group_set_min_share(g, 64);
+
        return g;
 }
 
@@ -671,6 +690,9 @@ bufferevent_rate_limit_group_set_cfg(
                event_add(&g->master_refill_event, &cfg->tick_timeout);
        }
 
+       /* The new limits might force us to adjust min_share differently. */
+       bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share);
+
        UNLOCK_GROUP(g);
        return 0;
 }
@@ -683,6 +705,15 @@ bufferevent_rate_limit_group_set_min_share(
        if (share > EV_SSIZE_MAX)
                return -1;
 
+       g->configured_min_share = share;
+
+       /* Can't set share to less than the one-tick maximum.  IOW, at steady
+        * state, at least one connection can go per tick. */
+       if (share > g->rate_limit_cfg.read_rate)
+               share = g->rate_limit_cfg.read_rate;
+       if (share > g->rate_limit_cfg.write_rate)
+               share = g->rate_limit_cfg.write_rate;
+
        g->min_share = share;
        return 0;
 }
index 53a1fc7025efe1b3b3049181ebfa7c76c8e3d315..a37e302af7747ff6ca1e01a13f392420b46a63e5 100644 (file)
@@ -114,8 +114,9 @@ bufferevent_socket_outbuf_cb(struct evbuffer *buf,
            !bufev_p->write_suspended) {
                /* Somebody added data to the buffer, and we would like to
                 * write, and we were not writing.  So, start writing. */
-               be_socket_add(&bufev->ev_write, &bufev->timeout_write);
-               /* XXXX handle failure from be_socket_add */
+               if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) == -1) {
+                   // Should we log this?
+               }
        }
 }
 
@@ -685,6 +686,7 @@ be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
                data->fd = event_get_fd(&bev->ev_read);
                return 0;
        case BEV_CTRL_GET_UNDERLYING:
+       case BEV_CTRL_CANCEL_ALL:
        default:
                return -1;
        }
index 774d4afb70d5bb3db4472b16ad951372903a44ad..770c46fc71786b003fde99c0c02f2056575354f4 100644 (file)
@@ -54,6 +54,15 @@ if test "$GCC" = "yes" ; then
        CFLAGS="$CFLAGS -fno-strict-aliasing"
 fi
 
+# OS X Lion started deprecating the system openssl. Let's just disable
+# all deprecation warnings on OS X.
+case "$host_os" in
+
+ darwin*)
+    CFLAGS="$CFLAGS -Wno-deprecated-declarations"
+    ;;
+esac
+
 AC_ARG_ENABLE(gcc-warnings,
      AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC))
 AC_ARG_ENABLE(thread-support,
@@ -77,6 +86,9 @@ AC_ARG_ENABLE([libevent-regress],
 AC_ARG_ENABLE([function-sections],
      AS_HELP_STRING([--enable-function-sections, make static library allow smaller binaries with --gc-sections]),
        [], [enable_function_sections=no])
+AC_ARG_ENABLE(event-debugging,
+               AS_HELP_STRING([--enable-verbose-debug, verbose debug logging]),
+       [], [enable_verbose_debug=no])
 
 
 AC_PROG_LIBTOOL
@@ -151,22 +163,7 @@ fi
 AC_SUBST(EV_LIB_WS32)
 AC_SUBST(EV_LIB_GDI)
 
-AC_CHECK_HEADERS([openssl/bio.h])
-
-if test "$enable_openssl" = "yes"; then
-save_LIBS="$LIBS"
-LIBS=""
-OPENSSL_LIBS=""
-have_openssl=no
-AC_SEARCH_LIBS([SSL_new], [ssl],
-       [have_openssl=yes
-       OPENSSL_LIBS="$LIBS -lcrypto $EV_LIB_GDI $EV_LIB_WS32"
-       AC_DEFINE(HAVE_OPENSSL, 1, [Define if the system has openssl])],
-       [have_openssl=no],
-       [-lcrypto $EV_LIB_GDI $EV_LIB_WS32])
-LIBS="$save_LIBS"
-AC_SUBST(OPENSSL_LIBS)
-fi
+LIBEVENT_OPENSSL
 
 dnl Checks for header files.
 AC_HEADER_STDC
@@ -648,6 +645,11 @@ if test x$enable_debug_mode = xno; then
         [Define if libevent should build without support for a debug mode])
 fi
 
+# check if we should enable verbose debugging 
+if test x$enable_verbose_debug = xyes; then
+       CFLAGS="$CFLAGS -DUSE_DEBUG"
+fi
+
 # check if we have and should use openssl
 AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])
 
index 1892c556ea6f584d50302f34411317dd161e9027..68fe88514960c14731411646ad114ec9e0fd5a5b 100644 (file)
@@ -57,6 +57,10 @@ struct deferred_cb_queue {
        /** Lock used to protect the queue. */
        void *lock;
 
+       /** Which event_base does this queue associate itself with?
+        * (Used for timing) */
+       struct event_base *base;
+
        /** How many entries are in the queue? */
        int active_count;
 
index 6bb3e2c164e5bd8f1cde4056fa39c2f728651fe0..0a146910a77ecc163ee6d07b5a23f5c4f02a9d1a 100644 (file)
@@ -303,6 +303,8 @@ int _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
 /** Set the parent bufferevent object for buf to bev */
 void evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev);
 
+void evbuffer_invoke_callbacks(struct evbuffer *buf);
+
 #ifdef __cplusplus
 }
 #endif
index 9beac911b2bb605473355cf6d7186e5dbde94c46..9094f8442b53cdcbf920d7a60defca3b4bf2377d 100644 (file)
@@ -647,6 +647,8 @@ request_finished(struct request *const req, struct request **head, int free_hand
        } else {
                base->global_requests_waiting--;
        }
+       /* it was initialized during request_new / evtimer_assign */
+       event_debug_unassign(&req->timeout_event);
 
        if (!req->request_appended) {
                /* need to free the request data on it's own */
index 0fc3216bf9bd0fe5170e46330acaa971cf83b928..63895337326afd453948b2a6141f6d3c205c2bb7 100644 (file)
@@ -148,6 +148,9 @@ struct common_timeout_list {
        struct event_base *base;
 };
 
+/** Mask used to get the real tv_usec value from a common timeout. */
+#define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
+
 struct event_change;
 
 /* List of 'changes' since the last call to eventop.dispatch.  Only maintained
@@ -273,6 +276,10 @@ struct event_base {
        /** Flags that this base was configured with */
        enum event_base_config_flag flags;
 
+       struct timeval max_dispatch_time;
+       int max_dispatch_callbacks;
+       int limit_callbacks_after_prio;
+
        /* Notify main thread to wake up break, etc. */
        /** True if the base already has a pending notify, and we don't need
         * to add any more. */
@@ -299,6 +306,9 @@ struct event_config {
        TAILQ_HEAD(event_configq, event_config_entry) entries;
 
        int n_cpus_hint;
+       struct timeval max_dispatch_interval;
+       int max_dispatch_callbacks;
+       int limit_callbacks_after_prio;
        enum event_method_feature require_features;
        enum event_base_config_flag flags;
 };
index 4cc97e4b68038c6fa1d44035c854fa80c3b08d91..66fbfd588c62cf6a03932de776331379af42dc31 100644 (file)
@@ -54,6 +54,7 @@
 #include <signal.h>
 #include <string.h>
 #include <time.h>
+#include <limits.h>
 
 #include "event2/event.h"
 #include "event2/event_struct.h"
@@ -131,8 +132,14 @@ static inline int event_add_internal(struct event *ev,
     const struct timeval *tv, int tv_is_absolute);
 static inline int event_del_internal(struct event *ev);
 
-static void    event_queue_insert(struct event_base *, struct event *, int);
-static void    event_queue_remove(struct event_base *, struct event *, int);
+static void    event_queue_insert_active(struct event_base *, struct event *);
+static void    event_queue_insert_timeout(struct event_base *, struct event *);
+static void    event_queue_insert_inserted(struct event_base *, struct event *);
+static void    event_queue_remove_active(struct event_base *, struct event *);
+static void    event_queue_remove_timeout(struct event_base *, struct event *);
+static void    event_queue_remove_inserted(struct event_base *, struct event *);
+static void    event_queue_reinsert_timeout(struct event_base *,struct event *);
+
 static int     event_haveevents(struct event_base *);
 
 static int     event_process_active(struct event_base *);
@@ -146,6 +153,9 @@ static inline void  event_persist_closure(struct event_base *, struct event *ev);
 
 static int     evthread_notify_base(struct event_base *base);
 
+static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
+    struct event *ev);
+
 #ifndef _EVENT_DISABLE_DEBUG_MODE
 /* These functions implement a hashtable of which 'struct event *' structures
  * have been setup or added.  We don't want to trust the content of the struct
@@ -563,6 +573,7 @@ event_base_new_with_config(const struct event_config *cfg)
        base->th_notify_fd[1] = -1;
 
        event_deferred_cb_queue_init(&base->defer_queue);
+       base->defer_queue.base = base;
        base->defer_queue.notify_fn = notify_base_cbq_callback;
        base->defer_queue.notify_arg = base;
        if (cfg)
@@ -577,6 +588,24 @@ event_base_new_with_config(const struct event_config *cfg)
        should_check_environment =
            !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
 
+       if (cfg) {
+               memcpy(&base->max_dispatch_time,
+                   &cfg->max_dispatch_interval, sizeof(struct timeval));
+               base->limit_callbacks_after_prio =
+                   cfg->limit_callbacks_after_prio;
+       } else {
+               base->max_dispatch_time.tv_sec = -1;
+               base->limit_callbacks_after_prio = 1;
+       }
+       if (cfg && cfg->max_dispatch_callbacks >= 0) {
+               base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
+       } else {
+               base->max_dispatch_callbacks = INT_MAX;
+       }
+       if (base->max_dispatch_callbacks == INT_MAX &&
+           base->max_dispatch_time.tv_sec == -1)
+               base->limit_callbacks_after_prio = INT_MAX;
+
        for (i = 0; eventops[i] && !base->evbase; i++) {
                if (cfg != NULL) {
                        /* determine if this backend should be avoided */
@@ -806,22 +835,18 @@ event_reinit(struct event_base *base)
        if (base->sig.ev_signal_added) {
                /* we cannot call event_del here because the base has
                 * not been reinitialized yet. */
-               event_queue_remove(base, &base->sig.ev_signal,
-                   EVLIST_INSERTED);
+               event_queue_remove_inserted(base, &base->sig.ev_signal);
                if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
-                       event_queue_remove(base, &base->sig.ev_signal,
-                           EVLIST_ACTIVE);
+                       event_queue_remove_active(base, &base->sig.ev_signal);
                base->sig.ev_signal_added = 0;
        }
        if (base->th_notify_fd[0] != -1) {
                /* we cannot call event_del here because the base has
                 * not been reinitialized yet. */
                was_notifiable = 1;
-               event_queue_remove(base, &base->th_notify,
-                   EVLIST_INSERTED);
+               event_queue_remove_inserted(base, &base->th_notify);
                if (base->th_notify.ev_flags & EVLIST_ACTIVE)
-                       event_queue_remove(base, &base->th_notify,
-                           EVLIST_ACTIVE);
+                       event_queue_remove_active(base, &base->th_notify);
                base->sig.ev_signal_added = 0;
                EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
                if (base->th_notify_fd[1] != -1)
@@ -905,6 +930,9 @@ event_config_new(void)
                return (NULL);
 
        TAILQ_INIT(&cfg->entries);
+       cfg->max_dispatch_interval.tv_sec = -1;
+       cfg->max_dispatch_callbacks = INT_MAX;
+       cfg->limit_callbacks_after_prio = 1;
 
        return (cfg);
 }
@@ -974,6 +1002,23 @@ event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
        return (0);
 }
 
+int
+event_config_set_max_dispatch_interval(struct event_config *cfg,
+    const struct timeval *max_interval, int max_callbacks, int min_priority)
+{
+       if (max_interval)
+               memcpy(&cfg->max_dispatch_interval, max_interval,
+                   sizeof(struct timeval));
+       else
+               cfg->max_dispatch_interval.tv_sec = -1;
+       cfg->max_dispatch_callbacks =
+           max_callbacks >= 0 ? max_callbacks : INT_MAX;
+       if (min_priority <= 0)
+               min_priority = 1;
+       cfg->limit_callbacks_after_prio = min_priority;
+       return (0);
+}
+
 int
 event_priority_init(int npriorities)
 {
@@ -1059,7 +1104,7 @@ event_signal_closure(struct event_base *base, struct event *ev)
  * of index into the event_base's aray of common timeouts.
  */
 
-#define MICROSECONDS_MASK       0x000fffff
+#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
 #define COMMON_TIMEOUT_IDX_SHIFT 20
 #define COMMON_TIMEOUT_MASK     0xf0000000
@@ -1274,7 +1319,8 @@ event_persist_closure(struct event_base *base, struct event *ev)
 */
 static int
 event_process_active_single_queue(struct event_base *base,
-    struct event_list *activeq)
+    struct event_list *activeq,
+    int max_to_process, const struct timeval *endtime)
 {
        struct event *ev;
        int count = 0;
@@ -1283,7 +1329,7 @@ event_process_active_single_queue(struct event_base *base,
 
        for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
                if (ev->ev_events & EV_PERSIST)
-                       event_queue_remove(base, ev, EVLIST_ACTIVE);
+                       event_queue_remove_active(base, ev);
                else
                        event_del_internal(ev);
                if (!(ev->ev_flags & EVLIST_INTERNAL))
@@ -1327,6 +1373,15 @@ event_process_active_single_queue(struct event_base *base,
 
                if (base->event_break)
                        return -1;
+               if (count >= max_to_process)
+                       return count;
+               if (count && endtime) {
+                       struct timeval now;
+                       update_time_cache(base);
+                       gettime(base, &now);
+                       if (evutil_timercmp(&now, endtime, >=))
+                               return count;
+               }
        }
        return count;
 }
@@ -1338,12 +1393,16 @@ event_process_active_single_queue(struct event_base *base,
    we process.
  */
 static int
-event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
+event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr,
+    int max_to_process, const struct timeval *endtime)
 {
        int count = 0;
        struct deferred_cb *cb;
-
 #define MAX_DEFERRED 16
+       if (max_to_process > MAX_DEFERRED)
+               max_to_process = MAX_DEFERRED;
+#undef MAX_DEFERRED
+
        while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
                cb->queued = 0;
                TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
@@ -1355,10 +1414,16 @@ event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
                LOCK_DEFERRED_QUEUE(queue);
                if (*breakptr)
                        return -1;
-               if (++count == MAX_DEFERRED)
+               if (++count >= max_to_process)
                        break;
+               if (endtime) {
+                       struct timeval now;
+                       update_time_cache(queue->base);
+                       gettime(queue->base, &now);
+                       if (evutil_timercmp(&now, endtime, >=))
+                               return count;
+               }
        }
-#undef MAX_DEFERRED
        return count;
 }
 
@@ -1374,11 +1439,28 @@ event_process_active(struct event_base *base)
        /* Caller must hold th_base_lock */
        struct event_list *activeq = NULL;
        int i, c = 0;
+       const struct timeval *endtime;
+       struct timeval tv;
+       const int maxcb = base->max_dispatch_callbacks;
+       const int limit_after_prio = base->limit_callbacks_after_prio;
+       if (base->max_dispatch_time.tv_sec >= 0) {
+               update_time_cache(base);
+               gettime(base, &tv);
+               evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
+               endtime = &tv;
+       } else {
+               endtime = NULL;
+       }
 
        for (i = 0; i < base->nactivequeues; ++i) {
                if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
                        activeq = &base->activequeues[i];
-                       c = event_process_active_single_queue(base, activeq);
+                       if (i < limit_after_prio)
+                               c = event_process_active_single_queue(base, activeq,
+                                   INT_MAX, NULL);
+                       else
+                               c = event_process_active_single_queue(base, activeq,
+                                   maxcb, endtime);
                        if (c < 0)
                                return -1;
                        else if (c > 0)
@@ -1389,7 +1471,8 @@ event_process_active(struct event_base *base)
                }
        }
 
-       event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
+       event_process_deferred_callbacks(&base->defer_queue,&base->event_break,
+           maxcb-c, endtime);
        return c;
 }
 
@@ -2023,7 +2106,7 @@ event_add_internal(struct event *ev, const struct timeval *tv,
                else if (ev->ev_events & EV_SIGNAL)
                        res = evmap_signal_add(base, (int)ev->ev_fd, ev);
                if (res != -1)
-                       event_queue_insert(base, ev, EVLIST_INSERTED);
+                       event_queue_insert_inserted(base, ev);
                if (res == 1) {
                        /* evmap says we need to notify the main thread. */
                        notify = 1;
@@ -2048,17 +2131,6 @@ event_add_internal(struct event *ev, const struct timeval *tv,
                if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
                        ev->ev_io_timeout = *tv;
 
-               /*
-                * we already reserved memory above for the case where we
-                * are not replacing an existing timeout.
-                */
-               if (ev->ev_flags & EVLIST_TIMEOUT) {
-                       /* XXX I believe this is needless. */
-                       if (min_heap_elt_is_top(ev))
-                               notify = 1;
-                       event_queue_remove(base, ev, EVLIST_TIMEOUT);
-               }
-
                /* Check if it is active due to a timeout.  Rescheduling
                 * this timeout before the callback can be executed
                 * removes it from the active list. */
@@ -2074,7 +2146,7 @@ event_add_internal(struct event *ev, const struct timeval *tv,
                                }
                        }
 
-                       event_queue_remove(base, ev, EVLIST_ACTIVE);
+                       event_queue_remove_active(base, ev);
                }
 
                gettime(base, &now);
@@ -2093,10 +2165,11 @@ event_add_internal(struct event *ev, const struct timeval *tv,
                }
 
                event_debug((
-                        "event_add: timeout in %d seconds, call %p",
-                        (int)tv->tv_sec, ev->ev_callback));
+                        "event_add: event %p, timeout in %d seconds %d useconds, call %p",
+                        ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
+
+               event_queue_reinsert_timeout(base, ev);
 
-               event_queue_insert(base, ev, EVLIST_TIMEOUT);
                if (common_timeout) {
                        struct common_timeout_list *ctl =
                            get_common_timeout_list(base, &ev->ev_timeout);
@@ -2188,14 +2261,14 @@ event_del_internal(struct event *ev)
                 * dispatch loop early anyway, so we wouldn't gain anything by
                 * doing it.
                 */
-               event_queue_remove(base, ev, EVLIST_TIMEOUT);
+               event_queue_remove_timeout(base, ev);
        }
 
        if (ev->ev_flags & EVLIST_ACTIVE)
-               event_queue_remove(base, ev, EVLIST_ACTIVE);
+               event_queue_remove_active(base, ev);
 
        if (ev->ev_flags & EVLIST_INSERTED) {
-               event_queue_remove(base, ev, EVLIST_INSERTED);
+               event_queue_remove_inserted(base, ev);
                if (ev->ev_events & (EV_READ|EV_WRITE))
                        res = evmap_io_del(base, ev->ev_fd, ev);
                else
@@ -2266,7 +2339,7 @@ event_active_nolock(struct event *ev, int res, short ncalls)
                ev->ev_pncalls = NULL;
        }
 
-       event_queue_insert(base, ev, EVLIST_ACTIVE);
+       event_queue_insert_active(base, ev);
 
        if (EVBASE_NEED_NOTIFY(base))
                evthread_notify_base(base);
@@ -2353,7 +2426,7 @@ timeout_next(struct event_base *base, struct timeval **tv_p)
 
        EVUTIL_ASSERT(tv->tv_sec >= 0);
        EVUTIL_ASSERT(tv->tv_usec >= 0);
-       event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
+       event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
 
 out:
        return (res);
@@ -2438,49 +2511,91 @@ timeout_process(struct event_base *base)
                /* delete this event from the I/O queues */
                event_del_internal(ev);
 
-               event_debug(("timeout_process: call %p",
-                        ev->ev_callback));
+               event_debug(("timeout_process: event: %p, call %p",
+                        ev, ev->ev_callback));
                event_active_nolock(ev, EV_TIMEOUT, 1);
        }
 }
 
-/* Remove 'ev' from 'queue' (EVLIST_...) in base. */
+#if (EVLIST_INTERNAL >> 4) != 1
+#error "Mismatch for value of EVLIST_INTERNAL"
+#endif
+/* These are a fancy way to spell
+     if (~ev->ev_flags & EVLIST_INTERNAL)
+         base->event_count--/++;
+*/
+#define DECR_EVENT_COUNT(base,ev) \
+       ((base)->event_count -= (~((ev)->ev_flags >> 4) & 1))
+#define INCR_EVENT_COUNT(base, ev) \
+       ((base)->event_count += (~((ev)->ev_flags >> 4) & 1))
+
 static void
-event_queue_remove(struct event_base *base, struct event *ev, int queue)
+event_queue_remove_inserted(struct event_base *base, struct event *ev)
 {
        EVENT_BASE_ASSERT_LOCKED(base);
-
-       if (!(ev->ev_flags & queue)) {
+       if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
+               event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
+                          ev, ev->ev_fd, EVLIST_INSERTED);
+               return;
+       }
+       DECR_EVENT_COUNT(base, ev);
+       ev->ev_flags &= ~EVLIST_INSERTED;
+       TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
+}
+static void
+event_queue_remove_active(struct event_base *base, struct event *ev)
+{
+       EVENT_BASE_ASSERT_LOCKED(base);
+       if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_ACTIVE))) {
+               event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
+                          ev, ev->ev_fd, EVLIST_ACTIVE);
+               return;
+       }
+       DECR_EVENT_COUNT(base, ev);
+       ev->ev_flags &= ~EVLIST_ACTIVE;
+       base->event_count_active--;
+       TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
+           ev, ev_active_next);
+}
+static void
+event_queue_remove_timeout(struct event_base *base, struct event *ev)
+{
+       EVENT_BASE_ASSERT_LOCKED(base);
+       if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
                event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
-                          ev, ev->ev_fd, queue);
+                          ev, ev->ev_fd, EVLIST_TIMEOUT);
                return;
        }
+       DECR_EVENT_COUNT(base, ev);
+       ev->ev_flags &= ~EVLIST_TIMEOUT;
 
-       if (~ev->ev_flags & EVLIST_INTERNAL)
-               base->event_count--;
-
-       ev->ev_flags &= ~queue;
-       switch (queue) {
-       case EVLIST_INSERTED:
-               TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
-               break;
-       case EVLIST_ACTIVE:
-               base->event_count_active--;
-               TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
-                   ev, ev_active_next);
-               break;
-       case EVLIST_TIMEOUT:
-               if (is_common_timeout(&ev->ev_timeout, base)) {
-                       struct common_timeout_list *ctl =
-                           get_common_timeout_list(base, &ev->ev_timeout);
-                       TAILQ_REMOVE(&ctl->events, ev,
-                           ev_timeout_pos.ev_next_with_common_timeout);
-               } else {
-                       min_heap_erase(&base->timeheap, ev);
-               }
-               break;
-       default:
-               event_errx(1, "%s: unknown queue %x", __func__, queue);
+       if (is_common_timeout(&ev->ev_timeout, base)) {
+               struct common_timeout_list *ctl =
+                   get_common_timeout_list(base, &ev->ev_timeout);
+               TAILQ_REMOVE(&ctl->events, ev,
+                   ev_timeout_pos.ev_next_with_common_timeout);
+       } else {
+               min_heap_erase(&base->timeheap, ev);
+       }
+}
+
+/* Remove and reinsert 'ev' into the timeout queue. */
+static void
+event_queue_reinsert_timeout(struct event_base *base, struct event *ev)
+{
+       if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
+               event_queue_insert_timeout(base, ev);
+               return;
+       }
+
+       if (is_common_timeout(&ev->ev_timeout, base)) {
+               struct common_timeout_list *ctl =
+                   get_common_timeout_list(base, &ev->ev_timeout);
+               TAILQ_REMOVE(&ctl->events, ev,
+                   ev_timeout_pos.ev_next_with_common_timeout);
+               insert_common_timeout_inorder(ctl, ev);
+       } else {
+               min_heap_adjust(&base->timeheap, ev);
        }
 }
 
@@ -2516,44 +2631,63 @@ insert_common_timeout_inorder(struct common_timeout_list *ctl,
 }
 
 static void
-event_queue_insert(struct event_base *base, struct event *ev, int queue)
+event_queue_insert_inserted(struct event_base *base, struct event *ev)
 {
        EVENT_BASE_ASSERT_LOCKED(base);
 
-       if (ev->ev_flags & queue) {
-               /* Double insertion is possible for active events */
-               if (queue & EVLIST_ACTIVE)
-                       return;
+       if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
+               event_errx(1, "%s: %p(fd %d) already inserted", __func__,
+                          ev, ev->ev_fd);
+               return;
+       }
 
-               event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
-                          ev, ev->ev_fd, queue);
+       INCR_EVENT_COUNT(base, ev);
+
+       ev->ev_flags |= EVLIST_INSERTED;
+
+       TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
+}
+
+static void
+event_queue_insert_active(struct event_base *base, struct event *ev)
+{
+       EVENT_BASE_ASSERT_LOCKED(base);
+
+       if (ev->ev_flags & EVLIST_ACTIVE) {
+               /* Double insertion is possible for active events */
                return;
        }
 
-       if (~ev->ev_flags & EVLIST_INTERNAL)
-               base->event_count++;
-
-       ev->ev_flags |= queue;
-       switch (queue) {
-       case EVLIST_INSERTED:
-               TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
-               break;
-       case EVLIST_ACTIVE:
-               base->event_count_active++;
-               TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
-                   ev,ev_active_next);
-               break;
-       case EVLIST_TIMEOUT: {
-               if (is_common_timeout(&ev->ev_timeout, base)) {
-                       struct common_timeout_list *ctl =
-                           get_common_timeout_list(base, &ev->ev_timeout);
-                       insert_common_timeout_inorder(ctl, ev);
-               } else
-                       min_heap_push(&base->timeheap, ev);
-               break;
+       INCR_EVENT_COUNT(base, ev);
+
+       ev->ev_flags |= EVLIST_ACTIVE;
+
+       base->event_count_active++;
+       TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
+           ev,ev_active_next);
+}
+
+static void
+event_queue_insert_timeout(struct event_base *base, struct event *ev)
+{
+       EVENT_BASE_ASSERT_LOCKED(base);
+
+       if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
+               event_errx(1, "%s: %p(fd %d) already on timeout", __func__,
+                          ev, ev->ev_fd);
+               return;
        }
-       default:
-               event_errx(1, "%s: unknown queue %x", __func__, queue);
+
+       INCR_EVENT_COUNT(base, ev);
+
+       ev->ev_flags |= EVLIST_TIMEOUT;
+
+       if (is_common_timeout(&ev->ev_timeout, base)) {
+               struct common_timeout_list *ctl =
+                   get_common_timeout_list(base, &ev->ev_timeout);
+               insert_common_timeout_inorder(ctl, ev);
+       } else {
+               min_heap_push(&base->timeheap, ev);
        }
 }
 
index 4ad1be0a4101cc482361439a178e9b9ca6c66b38..3fe306f4f89ea70e4090739eb7f3946a69029b60 100644 (file)
@@ -339,8 +339,12 @@ static void
 evrpc_request_cb_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
 {
        struct evrpc_req_generic *rpc_state = arg;
-       struct evrpc *rpc = rpc_state->rpc;
-       struct evhttp_request *req = rpc_state->http_req;
+       struct evrpc *rpc;
+       struct evhttp_request *req;
+
+       EVUTIL_ASSERT(rpc_state);
+       rpc = rpc_state->rpc;
+       req = rpc_state->http_req;
 
        if (hook_res == EVRPC_TERMINATE)
                goto error;
@@ -400,8 +404,13 @@ evrpc_request_done_closure(void *, enum EVRPC_HOOK_RESULT);
 void
 evrpc_request_done(struct evrpc_req_generic *rpc_state)
 {
-       struct evhttp_request *req = rpc_state->http_req;
-       struct evrpc *rpc = rpc_state->rpc;
+       struct evhttp_request *req;
+       struct evrpc *rpc;
+
+       EVUTIL_ASSERT(rpc_state);
+
+       req = rpc_state->http_req;
+       rpc = rpc_state->rpc;
 
        if (rpc->reply_complete(rpc_state->reply) == -1) {
                /* the reply was not completely filled in.  error out */
@@ -467,7 +476,9 @@ static void
 evrpc_request_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
 {
        struct evrpc_req_generic *rpc_state = arg;
-       struct evhttp_request *req = rpc_state->http_req;
+       struct evhttp_request *req;
+       EVUTIL_ASSERT(rpc_state);
+       req = rpc_state->http_req;
 
        if (hook_res == EVRPC_TERMINATE)
                goto error;
index a8dfb4c32864981df6d7c2cf4315b86ec47de696..4f1c85949d7b0dc66233bfc45c5907b0629584f8 100644 (file)
@@ -459,9 +459,9 @@ evutil_socket_connect(evutil_socket_t *fd_ptr, struct sockaddr *sa, int socklen)
        int made_fd = 0;
 
        if (*fd_ptr < 0) {
-               made_fd = 1;
                if ((*fd_ptr = socket(sa->sa_family, SOCK_STREAM, 0)) < 0)
                        goto err;
+               made_fd = 1;
                if (evutil_make_socket_nonblocking(*fd_ptr) < 0) {
                        goto err;
                }
index 9b42f52a97c61b551d04c3056f7f9acc12872057..610158681daf05140a11d58793d3ea2b2dd757b3 100644 (file)
@@ -56,10 +56,12 @@ evutil_secure_rng_global_setup_locks_(const int enable_locks)
        return 0;
 }
 
-#ifndef _EVENT_HAVE_ARC4RANDOM_BUF
 static void
-arc4random_buf(void *buf, size_t n)
+ev_arc4random_buf(void *buf, size_t n)
 {
+#ifdef _EVENT_HAVE_ARC4RANDOM_BUF
+       return arc4random_buf(buf, n);
+#else
        unsigned char *b = buf;
        /* Make sure that we start out with b at a 4-byte alignment; plenty
         * of CPUs care about this for 32-bit access. */
@@ -79,8 +81,8 @@ arc4random_buf(void *buf, size_t n)
                ev_uint32_t u = arc4random();
                memcpy(b, &u, n);
        }
-}
 #endif
+}
 
 #else /* !_EVENT_HAVE_ARC4RANDOM { */
 
@@ -123,12 +125,18 @@ evutil_secure_rng_init(void)
        return val;
 }
 
+static void
+ev_arc4random_buf(void *buf, size_t n)
+{
+       arc4random_buf(buf, n);
+}
+
 #endif /* } !_EVENT_HAVE_ARC4RANDOM */
 
 void
 evutil_secure_rng_get_bytes(void *buf, size_t n)
 {
-       arc4random_buf(buf, n);
+       ev_arc4random_buf(buf, n);
 }
 
 void
index 200b31fcce170bc913c3a774a445ff74cd2de7a3..58fef1695a258badec647ea607ec94ddcf6d12cc 100644 (file)
@@ -1588,6 +1588,7 @@ evhttp_parse_request_line(struct evhttp_request *req, char *line)
                    default:
                        break;
                }
+               break;
            case 5:
                /* Method length is 5 bytes, which can only encompass PATCH and TRACE */
                switch (*method) {
index 113a15400de4df79df6576587879b417fbe76929..be726faa4e2f833799f18236ecda9c99caa8b330 100644 (file)
@@ -377,7 +377,7 @@ struct evdns_request *evdns_base_resolve_reverse_ipv6(struct evdns_base *base, c
 
   @param base the evdns_base that was used to make the request
   @param req the evdns_request that was returned by calling a resolve function
-  @see evdns_base_resolve_ip4(), evdns_base_resolve_ipv6, evdns_base_resolve_reverse
+  @see evdns_base_resolve_ipv4(), evdns_base_resolve_ipv6, evdns_base_resolve_reverse
 */
 void evdns_cancel_request(struct evdns_base *base, struct evdns_request *req);
 
index c487f7808888a542d4019d9a5079ed19b1a4799a..fae5bb87a8acdb114f583117955acd51c0650165 100644 (file)
@@ -536,6 +536,38 @@ int event_config_set_flag(struct event_config *cfg, int flag);
  */
 int event_config_set_num_cpus_hint(struct event_config *cfg, int cpus);
 
+/**
+ * Record an interval and/or a number of callbacks after which the event base
+ * should check for new events.  By default, the event base will run as many
+ * events are as activated at the higest activated priority before checking
+ * for new events.  If you configure it by setting max_interval, it will check
+ * the time after each callback, and not allow more than max_interval to
+ * elapse before checking for new events.  If you configure it by setting
+ * max_callbacks to a value >= 0, it will run no more than max_callbacks
+ * callbacks before checking for new events.
+ *
+ * This option can decrease the latency of high-priority events, and
+ * avoid priority inversions where multiple low-priority events keep us from
+ * polling for high-priority events, but at the expense of slightly decreasing
+ * the throughput.  Use it with caution!
+ *
+ * @param cfg The event_base configuration object.
+ * @param max_interval An interval after which Libevent should stop running
+ *     callbacks and check for more events, or NULL if there should be
+ *     no such interval.
+ * @param max_callbacks A number of callbacks after which Libevent should
+ *     stop running callbacks and check for more events, or -1 if there
+ *     should be no such limit.
+ * @param min_priority A priority below which max_interval and max_callbacks
+ *     should not be enforced.  If this is set to 0, they are enforced
+ *     for events of every priority; if it's set to 1, they're enforced
+ *     for events of priority 1 and above, and so on.
+ * @return 0 on success, -1 on failure.
+ **/
+int event_config_set_max_dispatch_interval(struct event_config *cfg,
+    const struct timeval *max_interval, int max_callbacks,
+    int min_priority);
+
 /**
   Initialize the event API.
 
index d501ef7d96c0bb19ad25e842a4a70ea8176c9b80..cb77515400e4b094cda76e411b2beb34a1524972 100644 (file)
@@ -181,7 +181,7 @@ void event_set(struct event *, evutil_socket_t, short, void (*)(evutil_socket_t,
 /**
    @name timeout_* macros
 
-   @deprecated These macros are deprecated because their naming is inconsisten
+   @deprecated These macros are deprecated because their naming is inconsistent
      with the rest of Libevent.  Use the evtimer_* macros instead.
    @{
  */
@@ -195,7 +195,7 @@ void event_set(struct event *, evutil_socket_t, short, void (*)(evutil_socket_t,
 /**
    @name signal_* macros
 
-   @deprecated These macros are deprecated because their naming is inconsisten
+   @deprecated These macros are deprecated because their naming is inconsistent
      with the rest of Libevent.  Use the evsignal_* macros instead.
    @{
  */
index b0772dc13f008ceed208a09740d21663531dd70f..dc61ae3969f9d97a0f27f0fd6ea902343c66089a 100644 (file)
@@ -164,12 +164,6 @@ err:
        return (NULL);
 }
 
-static void
-kq_sighandler(int sig)
-{
-       /* Do nothing here */
-}
-
 #define ADD_UDATA 0x30303
 
 static void
@@ -432,9 +426,13 @@ kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *
        if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
                return (-1);
 
-       /* XXXX The manpage suggest we could use SIG_IGN instead of a
-        * do-nothing handler */
-       if (_evsig_set_handler(base, nsignal, kq_sighandler) == -1)
+        /* We can set the handler for most signals to SIG_IGN and
+         * still have them reported to us in the queue.  However,
+         * if the handler for SIGCHLD is SIG_IGN, the system reaps
+         * zombie processes for us, and we don't get any notification.
+         * This appears to be the only signal with this quirk. */
+       if (_evsig_set_handler(base, nsignal,
+                               nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
                return (-1);
 
        return (0);
diff --git a/sntp/libevent/m4/libevent_openssl.m4 b/sntp/libevent/m4/libevent_openssl.m4
new file mode 100644 (file)
index 0000000..7b27325
--- /dev/null
@@ -0,0 +1,47 @@
+dnl ######################################################################
+dnl OpenSSL support
+AC_DEFUN([LIBEVENT_OPENSSL], [
+AC_REQUIRE([NTP_PKG_CONFIG])dnl
+
+case "$enable_openssl" in
+ yes)
+    have_openssl=no
+    case "$PKG_CONFIG" in
+     '')
+       ;;
+     *)
+       OPENSSL_LIBS=`$PKG_CONFIG --libs openssl 2>/dev/null`
+       case "$OPENSSL_LIBS" in
+        '') ;;
+        *) OPENSSL_LIBS="$OPENSSL_LIBS $EV_LIB_GDI $EV_LIB_WS32"
+           have_openssl=yes
+           ;;
+       esac
+       OPENSSL_INCS=`$PKG_CONFIG --cflags openssl 2>/dev/null`
+       ;;
+    esac
+    case "$have_openssl" in
+     yes) ;;
+     *)
+       save_LIBS="$LIBS"
+       LIBS=""
+       OPENSSL_LIBS=""
+       AC_SEARCH_LIBS([SSL_new], [ssl],
+           [have_openssl=yes
+           OPENSSL_LIBS="$LIBS -lcrypto $EV_LIB_GDI $EV_LIB_WS32"],
+           [have_openssl=no],
+           [-lcrypto $EV_LIB_GDI $EV_LIB_WS32])
+       LIBS="$save_LIBS"
+       ;;
+    esac
+    AC_SUBST(OPENSSL_INCS)
+    AC_SUBST(OPENSSL_LIBS)
+    case "$have_openssl" in
+     yes)  AC_DEFINE(HAVE_OPENSSL, 1, [Define if the system has openssl]) ;;
+    esac
+    ;;
+esac
+
+# check if we have and should use openssl
+AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])
+])
diff --git a/sntp/libevent/m4/ntp_pkg_config.m4 b/sntp/libevent/m4/ntp_pkg_config.m4
new file mode 100644 (file)
index 0000000..1bce8a6
--- /dev/null
@@ -0,0 +1,27 @@
+dnl NTP_PKG_CONFIG                                     -*- Autoconf -*-
+dnl
+dnl Look for pkg-config, which must be at least
+dnl $ntp_pkgconfig_min_version.
+dnl
+AC_DEFUN([NTP_PKG_CONFIG], [
+
+dnl lower the minimum version if you find an earlier one works
+ntp_pkgconfig_min_version='0.15.0'
+AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+AS_UNSET([ac_cv_path_PKG_CONFIG])
+AS_UNSET([ac_cv_path_ac_pt_PKG_CONFIG])
+
+case "$PKG_CONFIG" in
+ /*)
+    AC_MSG_CHECKING([if pkg-config is at least version $ntp_pkgconfig_min_version])
+    if $PKG_CONFIG --atleast-pkgconfig-version $ntp_pkgconfig_min_version; then
+       AC_MSG_RESULT([yes])
+    else
+       AC_MSG_RESULT([no])
+       PKG_CONFIG=""
+    fi
+    ;;
+esac
+
+]) dnl NTP_PKG_CONFIG
+
index 8055e903c176316f87724b11ef3baa3d4436f639..53b88886225bbdbb6023ceabb9c1b5c99d105da4 100644 (file)
@@ -46,21 +46,20 @@ static inline void       min_heap_ctor(min_heap_t* s);
 static inline void          min_heap_dtor(min_heap_t* s);
 static inline void          min_heap_elem_init(struct event* e);
 static inline int           min_heap_elt_is_top(const struct event *e);
-static inline int           min_heap_elem_greater(struct event *a, struct event *b);
 static inline int           min_heap_empty(min_heap_t* s);
 static inline unsigned      min_heap_size(min_heap_t* s);
 static inline struct event*  min_heap_top(min_heap_t* s);
 static inline int           min_heap_reserve(min_heap_t* s, unsigned n);
 static inline int           min_heap_push(min_heap_t* s, struct event* e);
 static inline struct event*  min_heap_pop(min_heap_t* s);
+static inline int           min_heap_adjust(min_heap_t *s, struct event* e);
 static inline int           min_heap_erase(min_heap_t* s, struct event* e);
 static inline void          min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void          min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
 static inline void          min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
 
-int min_heap_elem_greater(struct event *a, struct event *b)
-{
-       return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
-}
+#define min_heap_elem_greater(a, b) \
+       (evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
 
 void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
 void min_heap_dtor(min_heap_t* s) { if (s->p) mm_free(s->p); }
@@ -106,7 +105,7 @@ int min_heap_erase(min_heap_t* s, struct event* e)
                   to be less than the parent, it can't need to shift both up and
                   down. */
                if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
-                       min_heap_shift_up_(s, e->ev_timeout_pos.min_heap_idx, last);
+                       min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
                else
                        min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
                e->ev_timeout_pos.min_heap_idx = -1;
@@ -115,6 +114,23 @@ int min_heap_erase(min_heap_t* s, struct event* e)
        return -1;
 }
 
+int min_heap_adjust(min_heap_t *s, struct event *e)
+{
+       if (-1 == e->ev_timeout_pos.min_heap_idx) {
+               return min_heap_push(s, e);
+       } else {
+               unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+               /* The position of e has changed; we shift it up or down
+                * as needed.  We can't need to do both. */
+               if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
+                       min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e);
+               else
+                       min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e);
+               return 0;
+       }
+       return -1;
+}
+
 int min_heap_reserve(min_heap_t* s, unsigned n)
 {
        if (s->a < n)
@@ -131,6 +147,18 @@ int min_heap_reserve(min_heap_t* s, unsigned n)
        return 0;
 }
 
+void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+    unsigned parent = (hole_index - 1) / 2;
+    do
+    {
+       (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+       hole_index = parent;
+       parent = (hole_index - 1) / 2;
+    } while (hole_index && min_heap_elem_greater(s->p[parent], e));
+    (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
 void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
 {
     unsigned parent = (hole_index - 1) / 2;
index df0d0df0441a0679b3d41f22530a0dddc6d00a56..5d02e77b34b03ad85e6b5f92d281894def0a6ba1 100644 (file)
@@ -13,6 +13,7 @@ hello_world_sources = hello-world.c
 http_server_sources = http-server.c
 
 if OPENSSL
+AM_CPPFLAGS += $(OPENSSL_INCS)
 noinst_PROGRAMS += le-proxy
 le_proxy_sources = le-proxy.c
 le_proxy_LDADD = $(LDADD) ../libevent_openssl.la
index 33a49cc77283712e65c2ce5d9a66ab2bf7b60c4b..d8134e8311b6ffbe81be156904cdc0e39bd8125a 100644 (file)
@@ -173,6 +173,10 @@ main(int c, char **v) {
                evutil_socket_t sock;
                struct sockaddr_in my_addr;
                sock = socket(PF_INET, SOCK_DGRAM, 0);
+               if (sock == -1) {
+                       perror("socket");
+                       exit(1);
+               }
                evutil_make_socket_nonblocking(sock);
                my_addr.sin_family = AF_INET;
                my_addr.sin_port = htons(10053);
index 2b2242bb5d7a9c0f5cb2ca1a8c66037780457e6d..5196fff116b1eb007e537cffcce95e6b84829027 100644 (file)
@@ -133,7 +133,8 @@ dump_request_cb(struct evhttp_request *req, void *arg)
                int n;
                char cbuf[128];
                n = evbuffer_remove(buf, cbuf, sizeof(buf)-1);
-               fwrite(cbuf, 1, n, stdout);
+               if (n > 0)
+                   fwrite(cbuf, 1, n, stdout);
        }
        puts(">>>");
 
@@ -179,6 +180,8 @@ send_document_cb(struct evhttp_request *req, void *arg)
 
        /* We need to decode it, to see what path the user really wanted. */
        decoded_path = evhttp_uridecode(path, 0, NULL);
+       if (decoded_path == NULL)
+               goto err;
        /* Don't allow any ".."s in the path, to avoid exposing stuff outside
         * of the docroot.  This test is both overzealous and underzealous:
         * it forbids aceptable paths like "/this/one..here", but it doesn't
index 72c0847ccd78ce2956c12879881a31e0d1399ec0..2079204dbd71b0d99e655193f8a72da1210363af 100644 (file)
@@ -400,9 +400,11 @@ evsig_dealloc(struct event_base *base)
        int i = 0;
        if (base->sig.ev_signal_added) {
                event_del(&base->sig.ev_signal);
-               event_debug_unassign(&base->sig.ev_signal);
                base->sig.ev_signal_added = 0;
        }
+       /* debug event is created in evsig_init/event_assign even when
+        * ev_signal_added == 0, so unassign is required */
+       event_debug_unassign(&base->sig.ev_signal);
 
        for (i = 0; i < NSIG; ++i) {
                if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
index 66e01ea35e1103ac9833b6eaa8ba94a9e8bed056..bcda535c0269cd245b423f6c64fd6eedf7553dc0 100644 (file)
@@ -2,7 +2,7 @@ AUTOMAKE_OPTIONS = foreign
 
 AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/compat -I$(top_srcdir)/include -I../include -DTINYTEST_LOCAL
 
-EXTRA_DIST = regress.rpc regress.gen.h regress.gen.c test.sh
+EXTRA_DIST = regress.rpc regress.gen.h regress.gen.c rpcgen_wrapper.sh test.sh
 
 noinst_PROGRAMS = test-init test-eof test-weof test-time \
        bench bench_cascade bench_http bench_httpclient test-ratelim \
@@ -71,15 +71,20 @@ bench_http_LDADD = $(LIBEVENT_GC_SECTIONS) ../libevent.la
 bench_httpclient_SOURCES = bench_httpclient.c
 bench_httpclient_LDADD = $(LIBEVENT_GC_SECTIONS) ../libevent_core.la
 
-regress.gen.c regress.gen.h: regress.rpc $(top_srcdir)/event_rpcgen.py
-       if $(top_srcdir)/event_rpcgen.py $(srcdir)/regress.rpc ; then \
-          echo "HI"; \
+regress.gen.c regress.gen.h: rpcgen-attempted
+
+rpcgen-attempted: $(srcdir)/regress.rpc $(srcdir)/../event_rpcgen.py $(srcdir)/rpcgen_wrapper.sh
+       date -u > $@
+       if $(srcdir)/rpcgen_wrapper.sh $(srcdir); then \
+          echo "rpcgen okay"; \
        else \
-          echo "No Python installed; can't test RPC."; \
+          echo "No Python installed; stubbing out RPC test." >&2; \
           echo " "> regress.gen.c; \
           echo "#define NO_PYTHON_EXISTS" > regress.gen.h; \
        fi
 
+CLEANFILES = rpcgen-attempted
+
 DISTCLEANFILES = *~
 
 verify: check
index 1ec6ad50d4942b07d1c15c62e104f3ba8f0aad17..a6aa4b7f8adcc57b9ec20e356473577a43461b01 100644 (file)
@@ -91,6 +91,7 @@ main(int argc, char **argv)
        int c;
        int use_iocp = 0;
        unsigned short port = 8080;
+       char *endptr = NULL;
 
 #ifdef _WIN32
        WSADATA WSAData;
@@ -113,11 +114,23 @@ main(int argc, char **argv)
 
                switch (c) {
                case 'p':
-                       port = atoi(argv[i+1]);
+                       if (i+1 >= argc || !argv[i+1]) {
+                               fprintf(stderr, "Missing port\n");
+                               exit(1);
+                       }
+                       port = (int)strtol(argv[i+1], &endptr, 10);
+                       if (*endptr != '\0') {
+                               fprintf(stderr, "Bad port\n");
+                               exit(1);
+                       }
                        break;
                case 'l':
-                       content_len = atol(argv[i+1]);
-                       if (content_len == 0) {
+                       if (i+1 >= argc || !argv[i+1]) {
+                               fprintf(stderr, "Missing content length\n");
+                               exit(1);
+                       }
+                       content_len = (size_t)strtol(argv[i+1], &endptr, 10);
+                       if (*endptr != '\0' || content_len == 0) {
                                fprintf(stderr, "Bad content length\n");
                                exit(1);
                        }
diff --git a/sntp/libevent/test/rpcgen_wrapper.sh b/sntp/libevent/test/rpcgen_wrapper.sh
new file mode 100644 (file)
index 0000000..a60331c
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/sh
+# libevent rpcgen_wrapper.sh
+# Transforms event_rpcgen.py failure into success for make, only if
+# regress.gen.c and regress.gen.h already exist in $srcdir.  This
+# is needed for "make distcheck" to pass the read-only $srcdir build,
+# as with read-only sources fresh from tarball, regress.gen.[ch] will
+# be correct in $srcdir but unwritable.  This previously triggered
+# Makefile.am to create stub regress.gen.c and regress.gen.h in the
+# distcheck _build directory, which were then detected as leftover
+# files in the build tree after distclean, breaking distcheck.
+# Note that regress.gen.[ch] are not in fresh git clones, making
+# working Python a requirement for make distcheck of a git tree.
+
+exit_updated() {
+    echo "Updated ${srcdir}\regress.gen.c and ${srcdir}\regress.gen.h"
+    exit 0
+}
+
+exit_reuse() {
+    echo "event_rpcgen.py failed, ${srcdir}\regress.gen.\[ch\] will be reused." >&2
+    exit 0
+}
+
+exit_failed() {
+    echo "Could not generate regress.gen.\[ch\] using event_rpcgen.sh" >&2
+    exit 1
+}
+
+srcdir=$1
+srcdir=${srcdir:-.}
+${srcdir}/../event_rpcgen.py ${srcdir}/regress.rpc
+case "$?" in
+ 0)
+    exit_updated
+    ;;
+ *)
+    test -r ${srcdir}/regress.gen.c -a -r ${srcdir}/regress.gen.h && \
+       exit_reuse
+    exit_failed
+    ;;
+esac
index 5c4b2a21ccc64d4da995de1ec414103d3c2b00c6..c7cfe5520d07c7f665bc04d186fa9ad3fc4320e3 100644 (file)
@@ -65,6 +65,10 @@ static int cfg_connlimit_tolerance = -1;
 static int cfg_grouplimit_tolerance = -1;
 static int cfg_stddev_tolerance = -1;
 
+#ifdef _WIN32
+static int cfg_enable_iocp = 0;
+#endif
+
 static struct timeval cfg_tick = { 0, 500*1000 };
 
 static struct ev_token_bucket_cfg *conn_bucket_cfg = NULL;
@@ -186,6 +190,7 @@ test_ratelimiting(void)
        double variance;
        double expected_total_persec = -1.0, expected_avg_persec = -1.0;
        int ok = 1;
+       struct event_config *base_cfg;
 
        memset(&sin, 0, sizeof(sin));
        sin.sin_family = AF_INET;
@@ -195,7 +200,16 @@ test_ratelimiting(void)
        if (0)
                event_enable_debug_mode();
 
-       base = event_base_new();
+       base_cfg = event_config_new();
+
+#ifdef _WIN32
+       if (cfg_enable_iocp) {
+               evthread_use_windows_threads();
+               event_config_set_flag(base_cfg, EVENT_BASE_FLAG_STARTUP_IOCP);
+       }
+#endif
+
+       base = event_base_new_with_config(base_cfg);
 
        listener = evconnlistener_new_bind(base, echo_listenercb, base,
            LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE, -1,
@@ -349,6 +363,9 @@ static struct option {
        { "--check-connlimit", &cfg_connlimit_tolerance, 0, 0 },
        { "--check-grouplimit", &cfg_grouplimit_tolerance, 0, 0 },
        { "--check-stddev", &cfg_stddev_tolerance, 0, 0 },
+#ifdef _WIN32
+       { "--iocp", &cfg_enable_iocp, 0, 1 },
+#endif
        { NULL, NULL, -1, 0 },
 };