]> git.ipfire.org Git - thirdparty/bind9.git/commitdiff
clean up comments
authorEvan Hunt <each@isc.org>
Fri, 15 Nov 2019 21:22:13 +0000 (13:22 -0800)
committerEvan Hunt <each@isc.org>
Mon, 18 Nov 2019 02:59:40 +0000 (18:59 -0800)
lib/isc/netmgr/netmgr-int.h
lib/isc/netmgr/netmgr.c
lib/isc/netmgr/tcp.c
lib/isc/netmgr/udp.c

index c1edf1ca9baff376c13d7bb252ea5496561e40d6..3961a6ff3b9bea8ddcbe615c9ca49681d6d4c1a4 100644 (file)
@@ -49,8 +49,8 @@ typedef struct isc__networker {
        isc_queue_t                *ievents;     /* incoming async events */
        isc_refcount_t             references;
        atomic_int_fast64_t        pktcount;
-       char                       udprecvbuf[65536];
-       bool                       udprecvbuf_inuse;
+       char                       recvbuf[65536];
+       bool                       recvbuf_inuse;
 } isc__networker_t;
 
 /*
@@ -362,8 +362,9 @@ struct isc_nmsocket {
         *  - ah_frees[--ah] = x
         *  - ah_handles[x] = NULL;
         *
-        * XXXWPK for now this is locked with socket->lock, but we
-        * might want to change it to something lockless
+        * XXX: for now this is locked with socket->lock, but we
+        * might want to change it to something lockless in the
+        * future.
         */
        size_t                  ah;
        size_t                  ah_size;
index 3d65bca45726e8aaf251b779ad129c07e6d1da56..ba7094baddb70f41b372431497fe9d7639b766e7 100644 (file)
@@ -136,9 +136,6 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
 
 /*
  * Free the resources of the network manager.
- *
- * TODO we need to clean up properly - launch all missing callbacks,
- * destroy all listeners, etc.
  */
 static void
 nm_destroy(isc_nm_t **mgr0) {
@@ -351,14 +348,16 @@ nm_thread(void *worker0) {
                UNLOCK(&worker->lock);
 
                if (worker->finished) {
-                       /* TODO walk the handles and free them! */
                        /*
                         * We need to launch the loop one more time
-                        * to make sure that worker->async is closed,
-                        * so that we can close the loop cleanly.
-                        * We don't care about the callback as in this
-                        * case we can be certain that uv_run will
-                        * eat this event.
+                        * in UV_RUN_NOWAIT mode to make sure that
+                        * worker->async is closed, so that we can
+                        * close the loop cleanly.  We don't care
+                        * about the callback, as in this case we can
+                        * be certain that uv_run() will eat the event.
+                        *
+                        * XXX: We may need to take steps here to ensure
+                        * that all netmgr handles are freed.
                         */
                        uv_close((uv_handle_t *)&worker->async, NULL);
                        uv_run(&worker->loop, UV_RUN_NOWAIT);
@@ -367,8 +366,13 @@ nm_thread(void *worker0) {
 
                if (r == 0) {
                        /*
-                        * TODO it should never happen - we don't have
-                        * any sockets we're listening on?
+                        * XXX: uv_run() in UV_RUN_DEFAULT mode returns
+                        * zero if there are still active uv_handles.
+                        * This shouldn't happen, but if it does, we just
+                        * to keep checking until they're done. We nap for a
+                        * tenth of a second on each loop so as not to burn
+                        * CPU. (We do a conditional wait instead, but it
+                        * seems like overkill for this case.)
                         */
 #ifdef WIN32
                        _sleep(100);
@@ -460,7 +464,7 @@ isc__nm_get_ievent(isc_nm_t *mgr, isc__netievent_type type) {
        isc__netievent_storage_t *event =
                isc_mem_get(mgr->mctx, sizeof(isc__netievent_storage_t));
 
-       /* XXX: use a memory pool? */
+       /* XXX: Use a memory pool? */
        *event = (isc__netievent_storage_t) {
                .ni.type = type
        };
@@ -732,12 +736,11 @@ isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
        REQUIRE(isc__nm_in_netthread());
        REQUIRE(size <= 65536);
 
-       /* TODO that's for UDP only! */
        worker = &sock->mgr->workers[sock->tid];
-       INSIST(!worker->udprecvbuf_inuse);
+       INSIST(!worker->recvbuf_inuse);
 
-       buf->base = worker->udprecvbuf;
-       worker->udprecvbuf_inuse = true;
+       buf->base = worker->recvbuf;
+       worker->recvbuf_inuse = true;
        buf->len = size;
 }
 
@@ -752,10 +755,10 @@ isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
        }
        worker = &sock->mgr->workers[sock->tid];
 
-       REQUIRE(worker->udprecvbuf_inuse);
-       REQUIRE(buf->base == worker->udprecvbuf);
+       REQUIRE(worker->recvbuf_inuse);
+       REQUIRE(buf->base == worker->recvbuf);
 
-       worker->udprecvbuf_inuse = false;
+       worker->recvbuf_inuse = false;
 }
 
 static isc_nmhandle_t *
index 59861b604f5ca62337656c2412b38096983fba43..4b6c9ca9a4787401a90781260bedcc45aeba3017 100644 (file)
@@ -116,8 +116,11 @@ tcp_connect_cb(uv_connect_t *uvreq, int status) {
                handle = isc__nmhandle_get(sock, NULL, NULL);
                req->cb.connect(handle, ISC_R_SUCCESS, req->cbarg);
        } else {
-               /* TODO handle it properly, free sock, translate code */
-               req->cb.connect(NULL, ISC_R_FAILURE, req->cbarg);
+               /*
+                * TODO:
+                * Handle the connect error properly and free the socket.
+                */
+               req->cb.connect(NULL, isc__nm_uverr2result(status), req->cbarg);
        }
 
        isc__nm_uvreq_put(&req, sock);
@@ -367,8 +370,9 @@ read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
        sock->rcb.recv(sock->tcphandle, NULL, sock->rcbarg);
 
        /*
-        * XXXWPK TODO clean up handles, close the connection,
-        * reclaim quota
+        * We don't need to clean up now; the socket will be closed and
+        * resources and quota reclaimed when handle is freed in
+        * isc__nm_tcp_close().
         */
 }
 
@@ -453,13 +457,10 @@ tcp_connection_cb(uv_stream_t *server, int status) {
                if (result == ISC_R_QUOTA || result == ISC_R_SOFTQUOTA) {
                        ssock->overquota = true;
                }
-               /* XXXWPK TODO LOG */
+               /* TODO: Log the error. */
        }
 }
 
-/*
- * isc__nm_tcp_send sends buf to a peer on a socket.
- */
 isc_result_t
 isc__nm_tcp_send(isc_nmhandle_t *handle, isc_region_t *region,
                 isc_nm_cb_t cb, void *cbarg)
@@ -579,7 +580,6 @@ tcp_close_direct(isc_nmsocket_t *sock) {
                isc_quota_detach(&sock->quota);
 
                if (ssock->overquota) {
-                       /* XXXWPK TODO we should loop here */
                        isc_result_t result = accept_connection(ssock);
                        if (result != ISC_R_QUOTA && result != ISC_R_SOFTQUOTA)
                        {
index e14dc2dd93d8626c8db20132e1b843ab3b08fe7f..0aae5577e0decdf59ab4c9ef804064ae7f4663ab 100644 (file)
@@ -277,7 +277,11 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
 
        REQUIRE(VALID_NMSOCK(sock));
 
-       /* XXXWPK TODO handle it! */
+       /*
+        * We can ignore the flags; currently the only one in use by libuv
+        * is UV_UDP_PARTIAL, which only occurs if the receive buffer is
+        * too small, which can't happen here.
+        */
        UNUSED(flags);
 
        /*