]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
daemon: proper connection closures on queue overflow
authorOto Šťáva <oto.stava@nic.cz>
Tue, 11 Jun 2024 11:20:16 +0000 (13:20 +0200)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Mon, 22 Jul 2024 15:57:47 +0000 (17:57 +0200)
daemon/io.c
daemon/proxyv2.c
daemon/session2.c
daemon/worker.c
daemon/worker.h

index 1154d77df6dd2a61d264945fabb37bdbad4476b5..6a72d0494d4753472e42a7747e5f14a84ed90b5f 100644 (file)
@@ -328,7 +328,7 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
                                       uv_strerror(nread));
                }
                session2_penalize(s);
-               worker_end_tcp(s);
+               session2_force_close(s);
                return;
        }
 
index 110d341578f58827739ccc1dfa7803e322be8938..31eeb624867744c2f27d886d79cb5b5f5d55a8e6 100644 (file)
@@ -407,7 +407,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
                                                "for this peer, close\n",
                                                kr_straddr(peer));
                        }
-                       worker_end_tcp(s);
+                       session2_force_close(s);
                        return protolayer_break(ctx, kr_error(ECONNRESET));
                }
 
@@ -424,7 +424,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
                                                        kr_straddr(comm->src_addr));
                                }
                        }
-                       worker_end_tcp(s);
+                       session2_force_close(s);
                        return protolayer_break(ctx, kr_error(ECONNRESET));
                } else if (trimmed == 0) {
                        session2_close(s);
index 67d1c32dbb8fc52369eeab524a6c05b9994627eb..da54beeeab4003b64197e9f85c2736f4dda8ac25 100644 (file)
@@ -1442,6 +1442,9 @@ static int session2_transport_pushv(struct session2 *s,
                        } else {
                                int ret = uv_udp_try_send((uv_udp_t*)handle,
                                                (uv_buf_t *)iov, iovcnt, comm->comm_addr);
+                               if (ret == UV_EAGAIN)
+                                       ret = kr_error(ENOBUFS);
+
                                if (false && ret == UV_EAGAIN) { // XXX: see uv_try_write() below
                                        uv_udp_send_t *req = malloc(sizeof(*req));
                                        req->data = ctx;
@@ -1453,9 +1456,10 @@ static int session2_transport_pushv(struct session2 *s,
                                                        session2_transport_udp_pushv_finished);
                                        if (ret)
                                                session2_transport_udp_pushv_finished(req, ret);
-                               } else {
-                                       session2_transport_pushv_finished(ret, ctx);
+                                       return ret;
                                }
+
+                               session2_transport_pushv_finished(ret, ctx);
                                return ret;
                        }
                } else if (handle->type == UV_TCP) {
@@ -1463,8 +1467,11 @@ static int session2_transport_pushv(struct session2 *s,
                        // XXX: queueing disabled for now if the OS can't accept the data.
                        //      Typically that happens when OS buffers are full.
                        //      We were missing any handling of partial write success, too.
-                       if (ret == UV_EAGAIN || (ret >= 0 && ret != iovec_sum(iov, iovcnt)))
+                       if (ret == UV_EAGAIN || (ret >= 0 && ret != iovec_sum(iov, iovcnt))) {
                                ret = kr_error(ENOBUFS);
+                               session2_force_close(s);
+                       }
+
                        if (false && ret == UV_EAGAIN) {
                                uv_write_t *req = malloc(sizeof(*req));
                                req->data = ctx;
@@ -1475,10 +1482,11 @@ static int session2_transport_pushv(struct session2 *s,
                                                session2_transport_stream_pushv_finished);
                                if (ret)
                                        session2_transport_stream_pushv_finished(req, ret);
-                       } else {
-                               session2_transport_pushv_finished(ret, ctx);
+                               return ret;
                        }
-                       return ret; // TODO: check again that errors ensure connection closure
+
+                       session2_transport_pushv_finished(ret, ctx);
+                       return ret;
 #if ENABLE_XDP
                } else if (handle->type == UV_POLL) {
                        xdp_handle_data_t *xhd = handle->data;
index f620904c58db7b375e955506299992af12d87af0..caf11e55f4ee4483db47ea90716e42f4675241a2 100644 (file)
@@ -585,7 +585,7 @@ int qr_task_on_send(struct qr_task *task, struct session2 *s, int status)
                                                "=> disconnected from '%s': %s\n",
                                                peer_str, uv_strerror(status));
                        }
-                       worker_end_tcp(s);
+                       session2_force_close(s);
                        return status;
                }
 
@@ -1287,7 +1287,7 @@ static int qr_task_step(struct qr_task *task,
 
 static int worker_submit(struct session2 *session, struct comm_info *comm, knot_pkt_t *pkt)
 {
-       if (!session || !pkt)
+       if (!session || !pkt || session->closing)
                return kr_error(EINVAL);
 
        const bool is_query = pkt->size > KNOT_WIRE_OFFSET_FLAGS1
@@ -1469,16 +1469,6 @@ static struct session2* worker_find_tcp_waiting(const struct sockaddr* addr)
        return trie_find_tcp_session(the_worker->tcp_waiting, addr);
 }
 
-int worker_end_tcp(struct session2 *session)
-{
-       if (!session)
-               return kr_error(EINVAL);
-
-       session2_timer_stop(session);
-       session2_force_close(session);
-       return kr_ok();
-}
-
 knot_pkt_t *worker_resolve_mk_pkt_dname(knot_dname_t *qname, uint16_t qtype, uint16_t qclass,
                                   const struct kr_qflags *options)
 {
@@ -2188,7 +2178,7 @@ exit:
        wire_buf_movestart(wb);
        mp_flush(the_worker->pkt_pool.ctx);
        if (status < 0)
-               worker_end_tcp(session);
+               session2_force_close(session);
        return protolayer_break(ctx, status);
 }
 
index 8f89e58661918578ef53719f591d3663e82923aa..42614cc94099f77683791d3bf511a703658459f1 100644 (file)
@@ -28,12 +28,6 @@ int worker_init(void);
 /** Destroy the worker (free memory). */
 void worker_deinit(void);
 
-/**
- * End current DNS/TCP session, this disassociates pending tasks from this session
- * which may be freely closed afterwards.
- */
-int worker_end_tcp(struct session2 *session);
-
 KR_EXPORT knot_pkt_t *worker_resolve_mk_pkt_dname(knot_dname_t *qname, uint16_t qtype, uint16_t qclass,
                                   const struct kr_qflags *options);