netievent_udpstoplisten,
netievent_tcpstoplisten,
netievent_tcpclose,
+ netievent_tcpdnsclose,
netievent_prio = 0xff, /* event type values higher than this
* will be treated as high-priority
* events, which can be processed
typedef isc__netievent__socket_t isc__netievent_tcpstoplisten_t;
typedef isc__netievent__socket_t isc__netievent_tcpstopchildlisten_t;
typedef isc__netievent__socket_t isc__netievent_tcpclose_t;
+typedef isc__netievent__socket_t isc__netievent_tcpdnsclose_t;
typedef isc__netievent__socket_t isc__netievent_startread_t;
typedef isc__netievent__socket_t isc__netievent_pauseread_t;
typedef isc__netievent__socket_t isc__netievent_closecb_t;
* Close a TCPDNS socket.
*/
+void
+isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0);
+
#define isc__nm_uverr2result(x) \
isc___nm_uverr2result(x, true, __FILE__, __LINE__)
isc_result_t
case netievent_tcpclose:
isc__nm_async_tcpclose(worker, ievent);
break;
+ case netievent_tcpdnsclose:
+ isc__nm_async_tcpdnsclose(worker, ievent);
+ break;
case netievent_closecb:
isc__nm_async_closecb(worker, ievent);
break;
sock->pquota = NULL;
if (sock->timer_initialized) {
- uv_close((uv_handle_t *)&sock->timer, NULL);
sock->timer_initialized = false;
+ uv_timer_stop(&sock->timer);
+ uv_close((uv_handle_t *)&sock->timer, NULL);
}
isc_astack_destroy(sock->inactivehandles);
isc_nmsocket_t *sock = NULL;
size_t handlenum;
bool reuse = false;
+ bool do_close = true;
int refs;
REQUIRE(VALID_NMHANDLE(handle));
handle->doreset(handle->opaque);
}
-
-
/*
* The handle is closed. If the socket has a callback configured
* for that (e.g., to perform cleanup after request processing),
- * call it now.
+ * call it now, or schedule it to run asynchronously.
*/
- bool do_close = true;
if (sock->closehandle_cb != NULL) {
if (sock->tid == isc_nm_tid()) {
sock->closehandle_cb(sock);
} else {
- isc__netievent_closecb_t * event =
+ isc__netievent_closecb_t *event =
isc__nm_get_ievent(sock->mgr,
netievent_closecb);
isc_nmsocket_attach(sock, &event->sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *) event);
+
/*
- * If we do this asynchronously then the async event
- * will clean the socket, so clean up the handle from
- * socket and exit.
+ * If we're doing this asynchronously, then the
+ * async event will take care of closing the
+ * socket, so we can clean up the handle
+ * from the socket, but skip calling
+ * nmsocket_maybe_destory()
*/
do_close = false;
}
/*
* We do all of this under lock to avoid races with socket
- * destruction.
- * We have to do this now otherwise we might race - at this point
- * the socket is either unused or attached to event->sock.
+ * destruction. We have to do this now, because at this point the
+ * socket is either unused or still attached to event->sock.
*/
LOCK(&sock->lock);
}
UNLOCK(&sock->lock);
- /* Close callback will clean everything up */
- if (!do_close) {
- return;
- }
-
-
- if (atomic_load(&sock->ah) == 0 &&
- !atomic_load(&sock->active) &&
- !atomic_load(&sock->destroying))
+ if (do_close && atomic_load(&sock->ah) == 0 &&
+ !atomic_load(&sock->active) && !atomic_load(&sock->destroying))
{
nmsocket_maybe_destroy(sock);
}
}
}
if (sock->timer_initialized) {
- uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
sock->timer_initialized = false;
+ uv_timer_stop(&sock->timer);
+ uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
} else {
isc_nmsocket_detach(&sock->server);
uv_close(&sock->uv_handle.handle, tcp_close_cb);
timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = (isc_nmsocket_t *) uv_handle_get_data(handle);
INSIST(VALID_NMSOCK(sock));
- sock->timer_initialized = false;
atomic_store(&sock->closed, true);
isc_nmsocket_detach(&sock);
}
}
-void
-isc__nm_tcpdns_close(isc_nmsocket_t *sock) {
+static void
+tcpdns_close_direct(isc_nmsocket_t *sock) {
if (sock->outer != NULL) {
isc_nmsocket_detach(&sock->outer);
}
- uv_close((uv_handle_t *) &sock->timer, timer_close_cb);
+ /* We don't need atomics here, it's all in single network thread */
+ if (sock->timer_initialized) {
+ sock->timer_initialized = false;
+ uv_timer_stop(&sock->timer);
+ uv_close((uv_handle_t *) &sock->timer, timer_close_cb);
+ }
+}
+
+void
+isc__nm_tcpdns_close(isc_nmsocket_t *sock) {
+ REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(sock->type == isc_nm_tcpdnssocket);
+
+ if (sock->tid == isc_nm_tid()) {
+ tcpdns_close_direct(sock);
+ } else {
+ isc__netievent_tcpdnsclose_t *ievent =
+ isc__nm_get_ievent(sock->mgr, netievent_tcpdnsclose);
+
+ ievent->sock = sock;
+ isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ (isc__netievent_t *) ievent);
+ }
+}
+
+void
+isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0) {
+ isc__netievent_tcpdnsclose_t *ievent =
+ (isc__netievent_tcpdnsclose_t *) ievent0;
+
+ REQUIRE(worker->id == ievent->sock->tid);
+
+ tcpdns_close_direct(ievent->sock);
}