#define IDLE_TIMEOUT 1000000 // ns (THREAD_CPUTIME); if exceeded, continue processing after next poll phase
#define PHASE_UDP_TIMEOUT 400000 // ns (THREAD_CPUTIME); switch between udp, non-udp phases
#define PHASE_NON_UDP_TIMEOUT 400000 // ns (THREAD_CPUTIME); after timeout or emptying queue
-#define MAX_WAITING_REQS 10000 // if exceeded, process single deferred request immediatelly in poll phase
- // TODO measure memory usage instead
+#define MAX_WAITING_REQS_SIZE (64 * 1024 * 1024) // bytes; if exceeded, some deferred requests are processed in poll phase
+ // single TCP allocates more than 64KiB wire buffer
+ // TODO check whether all important allocations are counted;
+ // different things are not counted: tasks and subsessions (not deferred after created), uv handles, queues overhead, ...;
+ // payload is counted either as part of session wire buffer (for stream) or as part of iter ctx (for datagrams)
#define VERBOSE_LOG(...) kr_log_debug(DEFER, " | " __VA_ARGS__)
protolayer_iter_ctx_queue_t queues[QUEUES_CNT];
int waiting_requests = 0;
+ptrdiff_t waiting_requests_size = 0; // signed for non-negativeness asserts
int queue_ix = QUEUES_CNT; // MIN( last popped queue, first non-empty queue )
enum phase {
struct protolayer_data h;
protolayer_iter_ctx_queue_t queue; // properly ordered sequence of deferred packets, for stream only
// the first ctx in the queue is also in a defer queue
+ size_t size;
};
struct pl_defer_iter_data {
struct protolayer_data h;
uint64_t req_stamp; // time when request was received, uses get_stamp()
+ size_t size;
};
/// Return whether we're using optimized variant right now.
{
if (ctx->session->stream) {
struct pl_defer_sess_data *sdata = protolayer_sess_data_get_current(ctx);
+ waiting_requests_size -= sdata->size;
if (!ctx->session->closing) {
session2_force_close(ctx->session); // session is not freed here as iter contexts exist
}
queue_pop(sdata->queue);
while (queue_len(sdata->queue) > 0) {
+ struct pl_defer_iter_data *idata = protolayer_iter_data_get_current(ctx);
+ waiting_requests_size -= idata->size;
protolayer_break(ctx, kr_error(err)); // session is not freed here as other contexts exist
ctx = queue_head(sdata->queue);
queue_pop(sdata->queue);
}
}
+ struct pl_defer_iter_data *idata = protolayer_iter_data_get_current(ctx);
+ waiting_requests_size -= idata->size;
protolayer_break(ctx, kr_error(err));
+ kr_assert(waiting_requests ? waiting_requests_size > 0 : waiting_requests_size == 0);
}
/// Process a single deferred query (or defer again) if there is any.
if (queue_len(sdata->queue) > 0) {
VERBOSE_LOG(" PUSH follow-up to head of %d\n", priority);
push_query(queue_head(sdata->queue), priority, true);
+ } else {
+ waiting_requests_size -= sdata->size;
}
}
+ waiting_requests_size -= idata->size;
+ kr_assert(waiting_requests ? waiting_requests_size > 0 : waiting_requests_size == 0);
+
if (eof) {
// Keep session alive even if it is somehow force-closed during continuation.
// TODO Is it needed?
if (queue_len(sdata->queue) > 0) { // stream with preceding packet already deferred
queue_push(sdata->queue, ctx);
+ waiting_requests_size += data->size = protolayer_iter_size_est(ctx, false);
+ // payload counted in session wire buffer
VERBOSE_LOG(" PUSH as follow-up\n");
return protolayer_async();
}
VERBOSE_LOG(" PUSH to %d\n", priority);
if (ctx->session->stream) {
queue_push(sdata->queue, ctx);
+ waiting_requests_size += sdata->size = protolayer_sess_size_est(ctx->session);
}
push_query(ctx, priority, false);
- while (waiting_requests > MAX_WAITING_REQS) { // TODO follow-up stream packets are not counted here
+ waiting_requests_size += data->size = protolayer_iter_size_est(ctx, !ctx->session->stream);
+ // for stream, payload is counted in session wire buffer
+ while (waiting_requests_size > MAX_WAITING_REQS_SIZE) {
defer_sample_restart();
- process_single_deferred(); // possibly defers again without decreasing waiting_requests
+ process_single_deferred(); // possibly defers again without decreasing waiting_requests_size
// defer_sample_stop should be called soon outside
}
* To be used after returning from its callback for async continuation but before calling protolayer_continue. */
void *protolayer_iter_data_get_current(struct protolayer_iter_ctx *ctx);
+/** Gets rough memory footprint estimate of session/iteration for use in defer.
+ * Different, hopefully minor, allocations are not counted here;
+ * tasks and subsessions are also not counted;
+ * read the code before using elsewhere. */
+size_t protolayer_sess_size_est(struct session2 *s);
+size_t protolayer_iter_size_est(struct protolayer_iter_ctx *ctx, bool incl_payload);
+
/** Layer-specific data - the generic struct. To be added as the first member of
* each specific struct. */
struct protolayer_data {
* (`struct protolayer_iter_ctx`), including layer-specific data. */
size_t iter_ctx_size;
+ /** The size of this session struct. */
+ size_t session_size;
+
/** The following flexible array has basically this structure:
*
* struct {