From 4a8688ece5c02fee071f127c3f10bb1ba0caab43 Mon Sep 17 00:00:00 2001 From: Grigorii Demidov Date: Wed, 12 Sep 2018 11:30:55 +0200 Subject: [PATCH] daemon: fix errors introduced by cherrypicking --- daemon/io.c | 19 ++++++++++++++----- daemon/tls.c | 34 +++++++++++++++++++++++++++++----- 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/daemon/io.c b/daemon/io.c index ae39261b6..c82462f7f 100644 --- a/daemon/io.c +++ b/daemon/io.c @@ -277,25 +277,34 @@ static void _tcp_accept(uv_stream_t *master, int status, bool tls) worker->too_many_open = true; worker->rconcurrent_highwatermark = worker->stats.rconcurrent; } + /* Since res isn't OK struct session wasn't allocated \ borrowed. + * We must release client handle only. + */ worker_iohandle_release(worker, client); return; } + + /* struct session was allocated \ borrowed from memory pool. */ + struct session *session = client->data; + assert(session->outgoing == false); + if (uv_accept(master, client) != 0) { - uv_close((uv_handle_t *)client, io_release); + /* close session, close underlying uv handles and + * deallocate (or return to memory pool) memory. */ + worker_session_close(session); return; } /* Set deadlines for TCP connection and start reading. * It will re-check every half of a request time limit if the connection * is idle and should be terminated, this is an educated guess. */ - struct session *session = client->data; - assert(session->outgoing == false); struct sockaddr *addr = &(session->peer.ip); int addr_len = sizeof(union inaddr); int ret = uv_tcp_getpeername((uv_tcp_t *)client, addr, &addr_len); if (ret || addr->sa_family == AF_UNSPEC) { - worker_iohandle_release(worker, client); + /* close session, close underlying uv handles and + * deallocate (or return to memory pool) memory. */ worker_session_close(session); return; } @@ -424,7 +433,7 @@ int tcp_bindfd_tls(uv_tcp_t *handle, int fd) int io_create(uv_loop_t *loop, uv_handle_t *handle, int type, unsigned family) { - int ret = 0; + int ret = -1; if (type == SOCK_DGRAM) { ret = uv_udp_init(loop, (uv_udp_t *)handle); } else if (type == SOCK_STREAM) { diff --git a/daemon/tls.c b/daemon/tls.c index 2d36a1d84..0b31dca25 100644 --- a/daemon/tls.c +++ b/daemon/tls.c @@ -152,25 +152,49 @@ static ssize_t kres_gnutls_vec_push(gnutls_transport_ptr_t h, const giovec_t * i > 0: number of bytes written (can be less than the supplied buffer size). < 0: negative error code (UV_EAGAIN is returned if no data can be sent immediately). */ - if (ret != UV_EAGAIN) { - /* Either we have successful write here or - * error code other then UV_EAGAIN. + if ((ret == total_len) || (ret < 0 && ret != UV_EAGAIN)) { + /* Either all the data were buffered by libuv or + * uv_try_write() has returned error code other then UV_EAGAIN. * Return. */ return ret; } + /* Since we are here expression below is true + * (ret != total_len) && (ret >= 0 || ret == UV_EAGAIN) + * or the same + * (ret != total_len && ret >= 0) || (ret != total_len && ret == UV_EAGAIN) + * i.e. either occurs partial write or UV_EAGAIN. + * Proceed and copy data amount to owned memory and perform async write. + */ + if (ret == UV_EAGAIN) { + /* No data were buffered, so we must buffer all the data. */ + ret = 0; + } } /* Fallback when the queue is full, and it's not possible to do an immediate write */ - char *buf = malloc(total_len); + char *buf = malloc(total_len - ret); if (buf != NULL) { + /* Skip data written in the partial write */ + int to_skip = ret; /* Copy the buffer into owned memory */ size_t off = 0; for (int i = 0; i < iovcnt; ++i) { + if (to_skip > 0) { + /* Ignore current buffer if it's all skipped */ + if (to_skip >= uv_buf[i].len) { + to_skip -= uv_buf[i].len; + continue; + } + /* Skip only part of the buffer */ + uv_buf[i].base += to_skip; + uv_buf[i].len -= to_skip; + to_skip = 0; + } memcpy(buf + off, uv_buf[i].base, uv_buf[i].len); off += uv_buf[i].len; } uv_buf[0].base = buf; - uv_buf[0].len = total_len; + uv_buf[0].len = off; /* Create an asynchronous write request */ uv_write_t *write_req = calloc(1, sizeof(uv_write_t)); -- 2.47.2