uv_strerror(nread));
}
session2_penalize(s);
- worker_end_tcp(s);
+ session2_force_close(s);
return;
}
"for this peer, close\n",
kr_straddr(peer));
}
- worker_end_tcp(s);
+ session2_force_close(s);
return protolayer_break(ctx, kr_error(ECONNRESET));
}
kr_straddr(comm->src_addr));
}
}
- worker_end_tcp(s);
+ session2_force_close(s);
return protolayer_break(ctx, kr_error(ECONNRESET));
} else if (trimmed == 0) {
session2_close(s);
} else {
int ret = uv_udp_try_send((uv_udp_t*)handle,
(uv_buf_t *)iov, iovcnt, comm->comm_addr);
+ if (ret == UV_EAGAIN)
+ ret = kr_error(ENOBUFS);
+
if (false && ret == UV_EAGAIN) { // XXX: see uv_try_write() below
uv_udp_send_t *req = malloc(sizeof(*req));
req->data = ctx;
session2_transport_udp_pushv_finished);
if (ret)
session2_transport_udp_pushv_finished(req, ret);
- } else {
- session2_transport_pushv_finished(ret, ctx);
+ return ret;
}
+
+ session2_transport_pushv_finished(ret, ctx);
return ret;
}
} else if (handle->type == UV_TCP) {
// XXX: queueing disabled for now if the OS can't accept the data.
// Typically that happens when OS buffers are full.
// We were missing any handling of partial write success, too.
- if (ret == UV_EAGAIN || (ret >= 0 && ret != iovec_sum(iov, iovcnt)))
+ if (ret == UV_EAGAIN || (ret >= 0 && ret != iovec_sum(iov, iovcnt))) {
ret = kr_error(ENOBUFS);
+ session2_force_close(s);
+ }
+
if (false && ret == UV_EAGAIN) {
uv_write_t *req = malloc(sizeof(*req));
req->data = ctx;
session2_transport_stream_pushv_finished);
if (ret)
session2_transport_stream_pushv_finished(req, ret);
- } else {
- session2_transport_pushv_finished(ret, ctx);
+ return ret;
}
- return ret; // TODO: check again that errors ensure connection closure
+
+ session2_transport_pushv_finished(ret, ctx);
+ return ret;
#if ENABLE_XDP
} else if (handle->type == UV_POLL) {
xdp_handle_data_t *xhd = handle->data;
"=> disconnected from '%s': %s\n",
peer_str, uv_strerror(status));
}
- worker_end_tcp(s);
+ session2_force_close(s);
return status;
}
static int worker_submit(struct session2 *session, struct comm_info *comm, knot_pkt_t *pkt)
{
- if (!session || !pkt)
+ if (!session || !pkt || session->closing)
return kr_error(EINVAL);
const bool is_query = pkt->size > KNOT_WIRE_OFFSET_FLAGS1
return trie_find_tcp_session(the_worker->tcp_waiting, addr);
}
-int worker_end_tcp(struct session2 *session)
-{
- if (!session)
- return kr_error(EINVAL);
-
- session2_timer_stop(session);
- session2_force_close(session);
- return kr_ok();
-}
-
knot_pkt_t *worker_resolve_mk_pkt_dname(knot_dname_t *qname, uint16_t qtype, uint16_t qclass,
const struct kr_qflags *options)
{
wire_buf_movestart(wb);
mp_flush(the_worker->pkt_pool.ctx);
if (status < 0)
- worker_end_tcp(session);
+ session2_force_close(session);
return protolayer_break(ctx, status);
}
/** Destroy the worker (free memory). */
void worker_deinit(void);
-/**
- * End current DNS/TCP session, this disassociates pending tasks from this session
- * which may be freely closed afterwards.
- */
-int worker_end_tcp(struct session2 *session);
-
KR_EXPORT knot_pkt_t *worker_resolve_mk_pkt_dname(knot_dname_t *qname, uint16_t qtype, uint16_t qclass,
const struct kr_qflags *options);