]> git.ipfire.org Git - thirdparty/bind9.git/commitdiff
Fix a race in tcpdns close with uv_close on timer
authorWitold Kręcicki <wpk@isc.org>
Fri, 6 Dec 2019 21:25:52 +0000 (22:25 +0100)
committerWitold Kręcicki <wpk@isc.org>
Mon, 9 Dec 2019 20:43:45 +0000 (21:43 +0100)
stop timers before closing

netmgr: tcpdns_close needs to be asynchronous, it manipulates sock->timer

lib/isc/netmgr/netmgr-int.h
lib/isc/netmgr/netmgr.c
lib/isc/netmgr/tcp.c
lib/isc/netmgr/tcpdns.c

index 1d7b09db328647489b946958ab4d6d83ed669da7..b2a201c55320143a0af5b9e67c3bdf5eca0a0953 100644 (file)
@@ -122,6 +122,7 @@ typedef enum isc__netievent_type {
        netievent_udpstoplisten,
        netievent_tcpstoplisten,
        netievent_tcpclose,
+       netievent_tcpdnsclose,
        netievent_prio = 0xff,  /* event type values higher than this
                                 * will be treated as high-priority
                                 * events, which can be processed
@@ -194,6 +195,7 @@ typedef isc__netievent__socket_t isc__netievent_udpstoplisten_t;
 typedef isc__netievent__socket_t isc__netievent_tcpstoplisten_t;
 typedef isc__netievent__socket_t isc__netievent_tcpstopchildlisten_t;
 typedef isc__netievent__socket_t isc__netievent_tcpclose_t;
+typedef isc__netievent__socket_t isc__netievent_tcpdnsclose_t;
 typedef isc__netievent__socket_t isc__netievent_startread_t;
 typedef isc__netievent__socket_t isc__netievent_pauseread_t;
 typedef isc__netievent__socket_t isc__netievent_closecb_t;
@@ -644,6 +646,9 @@ isc__nm_tcpdns_close(isc_nmsocket_t *sock);
  * Close a TCPDNS socket.
  */
 
+void
+isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0);
+
 #define isc__nm_uverr2result(x) \
        isc___nm_uverr2result(x, true, __FILE__, __LINE__)
 isc_result_t
index 09f5ef3b79e7711827921de79e3f2620e118336a..8d1c71168327f2c309b0c281d394bcc4c4be9f5f 100644 (file)
@@ -550,6 +550,9 @@ process_queue(isc__networker_t *worker, isc_queue_t *queue) {
                case netievent_tcpclose:
                        isc__nm_async_tcpclose(worker, ievent);
                        break;
+               case netievent_tcpdnsclose:
+                       isc__nm_async_tcpdnsclose(worker, ievent);
+                       break;
                case netievent_closecb:
                        isc__nm_async_closecb(worker, ievent);
                        break;
@@ -675,8 +678,9 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree) {
        sock->pquota = NULL;
 
        if (sock->timer_initialized) {
-               uv_close((uv_handle_t *)&sock->timer, NULL);
                sock->timer_initialized = false;
+               uv_timer_stop(&sock->timer);
+               uv_close((uv_handle_t *)&sock->timer, NULL);
        }
 
        isc_astack_destroy(sock->inactivehandles);
@@ -1022,6 +1026,7 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
        isc_nmsocket_t *sock = NULL;
        size_t handlenum;
        bool reuse = false;
+       bool do_close = true;
        int refs;
 
        REQUIRE(VALID_NMHANDLE(handle));
@@ -1039,28 +1044,28 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
                handle->doreset(handle->opaque);
        }
 
-
-
        /*
         * The handle is closed. If the socket has a callback configured
         * for that (e.g., to perform cleanup after request processing),
-        * call it now.
+        * call it now, or schedule it to run asynchronously.
         */
-       bool do_close = true;
        if (sock->closehandle_cb != NULL) {
                if (sock->tid == isc_nm_tid()) {
                        sock->closehandle_cb(sock);
                } else {
-                       isc__netievent_closecb_t * event =
+                       isc__netievent_closecb_t *event =
                                isc__nm_get_ievent(sock->mgr,
                                                   netievent_closecb);
                        isc_nmsocket_attach(sock, &event->sock);
                        isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
                                               (isc__netievent_t *) event);
+
                        /*
-                        * If we do this asynchronously then the async event
-                        * will clean the socket, so clean up the handle from
-                        * socket and exit.
+                        * If we're doing this asynchronously, then the
+                        * async event will take care of closing the
+                        * socket, so we can clean up the handle
+                        * from the socket, but skip calling
+                        * nmsocket_maybe_destory()
                         */
                        do_close = false;
                }
@@ -1068,9 +1073,8 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
 
        /*
         * We do all of this under lock to avoid races with socket
-        * destruction.
-        * We have to do this now otherwise we might race - at this point
-        * the socket is either unused or attached to event->sock.
+        * destruction.  We have to do this now, because at this point the
+        * socket is either unused or still attached to event->sock.
         */
        LOCK(&sock->lock);
 
@@ -1092,15 +1096,8 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
        }
        UNLOCK(&sock->lock);
 
-       /* Close callback will clean everything up */
-       if (!do_close) {
-               return;
-       }
-
-
-       if (atomic_load(&sock->ah) == 0 &&
-           !atomic_load(&sock->active) &&
-           !atomic_load(&sock->destroying))
+       if (do_close && atomic_load(&sock->ah) == 0 &&
+           !atomic_load(&sock->active) && !atomic_load(&sock->destroying))
        {
                nmsocket_maybe_destroy(sock);
        }
index 8a9eac74f1c4f06253dfc135a150b793e32dc46a..72a402643ed6bc0f5bf63826ca577900a6d90029 100644 (file)
@@ -1003,8 +1003,9 @@ tcp_close_direct(isc_nmsocket_t *sock) {
                }
        }
        if (sock->timer_initialized) {
-               uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
                sock->timer_initialized = false;
+               uv_timer_stop(&sock->timer);
+               uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
        } else {
                isc_nmsocket_detach(&sock->server);
                uv_close(&sock->uv_handle.handle, tcp_close_cb);
index 2b719e8191a099537ec6bb900df184fe5f588a89..1dd19daf342bbce957a697254fa6d1b5f9350ac1 100644 (file)
@@ -79,7 +79,6 @@ static void
 timer_close_cb(uv_handle_t *handle) {
        isc_nmsocket_t *sock = (isc_nmsocket_t *) uv_handle_get_data(handle);
        INSIST(VALID_NMSOCK(sock));
-       sock->timer_initialized = false;
        atomic_store(&sock->closed, true);
        isc_nmsocket_detach(&sock);
 }
@@ -489,10 +488,42 @@ isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
 }
 
 
-void
-isc__nm_tcpdns_close(isc_nmsocket_t *sock) {
+static void
+tcpdns_close_direct(isc_nmsocket_t *sock) {
        if (sock->outer != NULL) {
                isc_nmsocket_detach(&sock->outer);
        }
-       uv_close((uv_handle_t *) &sock->timer, timer_close_cb);
+       /* We don't need atomics here, it's all in single network thread */
+       if (sock->timer_initialized) {
+               sock->timer_initialized = false;
+               uv_timer_stop(&sock->timer);
+               uv_close((uv_handle_t *) &sock->timer, timer_close_cb);
+       }
+}
+
+void
+isc__nm_tcpdns_close(isc_nmsocket_t *sock) {
+       REQUIRE(VALID_NMSOCK(sock));
+       REQUIRE(sock->type == isc_nm_tcpdnssocket);
+
+       if (sock->tid == isc_nm_tid()) {
+               tcpdns_close_direct(sock);
+       } else {
+               isc__netievent_tcpdnsclose_t *ievent =
+                       isc__nm_get_ievent(sock->mgr, netievent_tcpdnsclose);
+
+               ievent->sock = sock;
+               isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+                                      (isc__netievent_t *) ievent);
+       }
+}
+
+void
+isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0) {
+       isc__netievent_tcpdnsclose_t *ievent =
+               (isc__netievent_tcpdnsclose_t *) ievent0;
+
+       REQUIRE(worker->id == ievent->sock->tid);
+
+       tcpdns_close_direct(ievent->sock);
 }