]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
daemon/io: fixed corrupted handles with exhausted TCP clients
authorMarek Vavruša <marek.vavrusa@nic.cz>
Tue, 5 May 2015 21:21:49 +0000 (23:21 +0200)
committerMarek Vavruša <marek.vavrusa@nic.cz>
Tue, 5 May 2015 21:21:49 +0000 (23:21 +0200)
the TCP allowed parallel processing of multiple requests over
one client socket, however if the client socket disconnected,
it left the running tasks reading from bad handle
now each task takes ownership of the handle until it is finished,
only then it is returned to the loop and closed

daemon/io.c
daemon/worker.c
daemon/worker.h

index 298fa0d4523b337e51d290b04b964ab196215281..a62bea6997a3fda9d9ff1ce1e236a3be01c06e7a 100644 (file)
@@ -105,7 +105,15 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
 
        knot_pkt_t *query = knot_pkt_new(buf->base + 2, nbytes, worker->mm);
        query->max_size = sizeof(worker->bufs.wire);
-       worker_exec(worker, (uv_handle_t *)handle, query, NULL);
+       int ret = worker_exec(worker, (uv_handle_t *)handle, query, NULL);
+       if (ret == 0) {
+               /* Push - pull, stop reading from this handle until
+                * the task is finished. Since the handle has no track of the
+                * pending tasks, it might be freed before the task finishes
+                * leading various errors. */
+               uv_unref((uv_handle_t *)handle);
+               io_stop_read((uv_handle_t *)handle);
+       }
        knot_pkt_free(&query);
 }
 
index 5a812309fdd47689edd380ade4f8920e15b6960b..26a913a37fd085cf2837334d71ee218e7fb9a3bc 100644 (file)
@@ -104,6 +104,12 @@ static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *ha
 static void qr_task_free(uv_handle_t *handle)
 {
        struct qr_task *task = handle->data;
+       /* Return handle to the event loop in case
+        * it was exclusively taken by this task. */
+       if (!uv_has_ref(task->source.handle)) {
+               uv_ref(task->source.handle);
+               io_start_read(task->source.handle);
+       }
        mp_delete(task->req.pool.ctx);
 }
 
@@ -165,7 +171,10 @@ static void qr_task_on_connect(uv_connect_t *connect, int status)
 static int qr_task_finalize(struct qr_task *task, int state)
 {
        kr_resolve_finish(&task->req, state);
-       qr_task_send(task, task->source.handle, (struct sockaddr *)&task->source.addr, task->req.answer);
+       int ret = qr_task_send(task, task->source.handle, (struct sockaddr *)&task->source.addr, task->req.answer);
+       if (ret != 0) { /* Broken connection */
+               uv_close((uv_handle_t *)&task->timeout, qr_task_free);
+       }
        return state == KNOT_STATE_DONE ? 0 : kr_error(EIO);
 }
 
index 503aba431d7969f0f2dbfa6f664f423a5ac5dc18..1f6afae23bd293c741b4cd6380036e2b2f009734 100644 (file)
@@ -27,9 +27,9 @@ struct worker_ctx {
        struct engine *engine;
        uv_loop_t *loop;
        mm_ctx_t *mm;
-    struct {
-        uint8_t wire[KNOT_WIRE_MAX_PKTSIZE];
-    } bufs;
+       struct {
+               uint8_t wire[KNOT_WIRE_MAX_PKTSIZE];
+       } bufs;
 };
 
 /**