- tune.events.max-events-at-once
- tune.fail-alloc
- tune.fd.edge-triggered
+ - tune.h1.be.glitches-threshold
+ - tune.h1.fe.glitches-threshold
- tune.h1.zero-copy-fwd-recv
- tune.h1.zero-copy-fwd-send
- tune.h2.be.glitches-threshold
will automatically get killed. A rule of thumb would be to set this value to
twice the usually observed CPU usage, or the commonly observed CPU usage plus
half the idle one (i.e. if CPU commonly reaches 60%, setting 80 here can make
- sense). This parameter has no effect without tune.h2.fe.glitches-threshold or
- tune.quic.fe.sec.glitches-threshold. See also the global parameters
- "tune.h2.fe.glitches-threshold" and "tune.quic.fe.sec.glitches-threshold".
+ sense). This parameter has no effect without tune.h2.fe.glitches-threshold,
+ tune.quic.fe.sec.glitches-threshold or tune.h1.fe.glitches-threshold. See
+ also the global parameters "tune.h2.fe.glitches-threshold",
+ "tune.h1.fe.glitches-threshold" and "tune.quic.fe.sec.glitches-threshold".
+
+tune.h1.be.glitches-threshold <number>
+ Sets the threshold for the number of glitches on a HTTP/1 backend connection,
+ after which that connection will automatically be killed. This allows to
+ automatically kill misbehaving connections without having to write explicit
+ rules for them. The default value is zero, indicating that no threshold is
+ set so that no event will cause a connection to be closed. Typical events
+ include improperly formatted headers that had been nevertheless accepted by
+ "accept-unsafe-violations-in-http-response". Any non-zero value here should
+ probably be in the hundreds or thousands to be effective without affecting
+ slightly bogus servers. It is also possible to only kill connections when the
+ CPU usage crosses a certain level, by using "tune.glitches.kill.cpu-usage".
+
+ See also: tune.h1.fe.glitches-threshold, bc_glitches, and
+ tune.glitches.kill.cpu-usage
+
+tune.h1.fe.glitches-threshold <number>
+ Sets the threshold for the number of glitches on a HTTP/1 frontend connection
+ after which that connection will automatically be killed. This allows to
+ automatically kill misbehaving connections without having to write explicit
+ rules for them. The default value is zero, indicating that no threshold is
+ set so that no event will cause a connection to be closed. Typical events
+ include improperly formatted headers that had been nevertheless accepted by
+ "accept-unsafe-violations-in-http-request". Any non-zero value here should
+ probably be in the hundreds or thousands to be effective without affecting
+ slightly bogus clients. It is also possible to only kill connections when the
+ CPU usage crosses a certain level, by using "tune.glitches.kill.cpu-usage".
+
+ See also: tune.h1.be.glitches-threshold, fc_glitches, and
+ tune.glitches.kill.cpu-usage
tune.h1.zero-copy-fwd-recv { on | off }
Enables ('on') of disabled ('off') the zero-copy receives of data for the H1
struct h1_counters *px_counters; /* h1 counters attached to proxy */
struct buffer_wait buf_wait; /* Wait list for buffer allocation */
struct wait_event wait_event; /* To be used if we're waiting for I/Os */
+ int glitches; /* Number of glitches on this connection */
};
/* H1 stream descriptor */
static struct h1_hdrs_map hdrs_map = { .name = NULL, .map = EB_ROOT };
static int accept_payload_with_any_method = 0;
+static int h1_be_glitches_threshold = 0; /* backend's max glitches: unlimited */
+static int h1_fe_glitches_threshold = 0; /* frontend's max glitches: unlimited */
+
/* trace source and events */
static void h1_trace(enum trace_level level, uint64_t mask,
const struct trace_source *src,
}
}
+/* report one or more glitches on the connection. That is any unexpected event
+ * that may occasionally happen but if repeated a bit too much, might indicate
+ * a misbehaving or completely bogus peer. It normally returns zero, unless the
+ * glitch limit was reached, in which case an error is also reported on the
+ * connection.
+ */
+#define h1_report_glitch(h1c, inc, ...) ({ \
+ COUNT_GLITCH(__VA_ARGS__); \
+ _h1_report_glitch(h1c, inc); \
+ })
+
+static inline int _h1_report_glitch(struct h1c *h1c, int increment)
+{
+ int thres = (h1c->flags & H1C_F_IS_BACK) ?
+ h1_be_glitches_threshold : h1_fe_glitches_threshold;
+
+ h1c->glitches += increment;
+ if (thres && h1c->glitches >= thres &&
+ (th_ctx->idle_pct <= global.tune.glitch_kill_maxidle)) {
+ h1c->flags |= H1C_F_ERROR;
+ return 1;
+ }
+ return 0;
+}
+
/*****************************************************/
/* functions below are for dynamic buffer management */
h1c->task = NULL;
h1c->req_count = 0;
h1c->term_evts_log = 0;
+ h1c->glitches = 0;
LIST_INIT(&h1c->buf_wait.list);
h1c->wait_event.tasklet = tasklet_new();
h1s->h1c->errcode = h1m->err_code;
TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "parsing error");
}
else if (ret == -2) {
TRACE_STATE("RX path congested, waiting for more space", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_BLK, h1s->h1c->conn, h1s);
h1s->h1c->errcode = 413;
TRACE_ERROR("HTTP/1.0 GET/HEAD/DELETE request with a payload forbidden", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "HTTP/1.0 GET/HEAD/DELETE with payload");
ret = 0;
goto end;
}
h1s->h1c->errcode = 422;
TRACE_ERROR("Unknown transfer-encoding", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "unknown transfer-encoding");
ret = 0;
goto end;
}
TRACE_ERROR("missing/invalid websocket key, reject H1 message",
H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_report_glitch(h1s->h1c, 1, "rejecting missing/invalid websocket key");
ret = 0;
goto end;
} else {
TRACE_ERROR("missing/invalid websocket key, but accepting this "
"violation according to configuration",
H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_report_glitch(h1s->h1c, 1, "accepting missing/invalid websocket key");
}
}
}
*/
TRACE_STATE("Ignored parsing error", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "ignored parsing error");
}
if (!(h1m->flags & H1_MF_RESP)) {
h1s->flags |= H1S_F_PARSING_ERROR;
TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_BODY|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "parsing error");
}
goto end;
}
h1s->flags |= H1S_F_PARSING_ERROR;
TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_TLRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ h1_report_glitch(h1s->h1c, 1, "parsing error");
}
else if (ret == -2) {
TRACE_STATE("RX path congested, waiting for more space", H1_EV_RX_DATA|H1_EV_RX_TLRS|H1_EV_H1S_BLK, h1s->h1c->conn, h1s);
if (b_data(&h1c->ibuf) && /* Input data to be processed */
((h1c->state < H1_CS_RUNNING) || (h1c->state == H1_CS_DRAINING)) && /* IDLE, EMBRYONIC, UPGRADING or DRAINING */
!(h1c->flags & (H1C_F_IN_SALLOC|H1C_F_ABRT_PENDING))) { /* No allocation failure on the stream rxbuf and no ERROR on the H1C */
+ int prev_glitches = h1c->glitches;
struct h1s *h1s = h1c->h1s;
struct buffer *buf;
size_t count;
h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
}
}
+ if (h1c->glitches != prev_glitches && !(h1c->flags & H1C_F_IS_BACK))
+ session_add_glitch_ctr(h1c->conn->owner, h1c->glitches - prev_glitches);
}
no_parsing:
if (!(h1c->wait_event.events & SUB_RETRY_RECV))
h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
return 0;
+ case MUX_CTL_GET_GLITCHES:
+ return h1c->glitches;
case MUX_CTL_GET_NBSTRM:
return h1_used_streams(conn);
case MUX_CTL_GET_MAXSTRM:
(unsigned int)b_head_ofs(&h1c->obuf), (unsigned int)b_size(&h1c->obuf),
tevt_evts2str(h1c->term_evts_log));
+ chunk_appendf(msg, " .glitches=%d", h1c->glitches);
chunk_appendf(msg, " .task=%p", h1c->task);
if (h1c->task) {
chunk_appendf(msg, " .exp=%s",
return 0;
}
+/* config parser for global "tune.h1.{fe,be}.glitches-threshold" */
+static int cfg_parse_h1_glitches_threshold(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int *vptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* backend/frontend */
+ vptr = (args[0][8] == 'b') ? &h1_be_glitches_threshold : &h1_fe_glitches_threshold;
+
+ *vptr = atoi(args[1]);
+ if (*vptr < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
/* config parser for global "tune.h1.zero-copy-fwd-recv" */
static int cfg_parse_h1_zero_copy_fwd_rcv(char **args, int section_type, struct proxy *curpx,
const struct proxy *defpx, const char *file, int line,
{ CFG_GLOBAL, "h1-accept-payload-with-any-method", cfg_parse_h1_accept_payload_with_any_method },
{ CFG_GLOBAL, "h1-case-adjust", cfg_parse_h1_header_case_adjust },
{ CFG_GLOBAL, "h1-case-adjust-file", cfg_parse_h1_headers_case_adjust_file },
+ { CFG_GLOBAL, "tune.h1.be.glitches-threshold", cfg_parse_h1_glitches_threshold },
+ { CFG_GLOBAL, "tune.h1.fe.glitches-threshold", cfg_parse_h1_glitches_threshold },
{ CFG_GLOBAL, "tune.h1.zero-copy-fwd-recv", cfg_parse_h1_zero_copy_fwd_rcv },
{ CFG_GLOBAL, "tune.h1.zero-copy-fwd-send", cfg_parse_h1_zero_copy_fwd_snd },
{ 0, NULL, NULL },