unsigned int flags; /* SC_FL_* */
unsigned int ioto; /* I/O activity timeout */
+ uint32_t term_evts_log; /* termination events log aggregating SE + connection events */
ssize_t room_needed; /* free space in the input buffer required to receive more data.
* -1 : the SC is waiting for room but not on a specific amount of data
* >= 0 : min free space required to progress. 0 means SC must be unblocked ASAP
return ret;
}
+static inline void sc_report_term_evt(struct stconn *sc, enum term_event_loc loc, enum term_event_type type)
+{
+ if (sc->flags & SC_FL_ISBACK)
+ loc += 8;
+ sc->term_evts_log = tevt_report_event(sc->term_evts_log, loc, type);
+ if (sc_strm(sc))
+ __sc_strm(sc)->term_evts_log = tevt_report_event(__sc_strm(sc)->term_evts_log, loc, type);
+}
+
#endif /* _HAPROXY_STCONN_H */
} waiting_entity; /* The entity waiting to continue its processing and interrupted by an error/timeout */
unsigned int stream_epoch; /* copy of stream_epoch when the stream was created */
+ uint32_t term_evts_log; /* termination events log */
struct hlua *hlua[2]; /* lua runtime context (0: global, 1: per-thread) */
/* Context */
0;
}
+static inline void stream_report_term_evt(struct stconn *sc, enum term_event_loc loc, enum term_event_type type)
+{
+ struct stream *s = sc_strm(sc);
+
+ if (!s)
+ return;
+
+ if (sc->flags & SC_FL_ISBACK)
+ loc += 8;
+ s->term_evts_log = tevt_report_event(s->term_evts_log, loc, type);
+ sc->term_evts_log = tevt_report_event(sc->term_evts_log, loc, type);
+}
+
int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout);
void stream_retnclose(struct stream *s, const struct buffer *msg);
/* fall through */
return_prx_cond:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
http_reply_and_close(s, txn->status, http_error_message(s));
if (!http_apply_redirect_rule(rule, s, txn)) {
goto return_int_err;
}
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
goto done;
}
/* fall through */
return_prx_cond:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
-
req->analysers &= AN_REQ_FLT_END;
req->analyse_exp = TICK_ETERNITY;
s->current_rule = s->current_rule_list = NULL;
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
http_reply_and_close(s, txn->status, http_error_message(s));
*/
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
http_reply_and_close(s, txn->status, (!(s->scf->flags & SC_FL_ERROR) ? http_error_message(s) : NULL));
/* fall through */
return_prx_cond:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
req->analysers &= AN_REQ_FLT_END;
goto return_prx_cond;
return_int_err:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
goto return_prx_cond;
return_bad_req:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
/* fall through */
return_prx_cond:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
http_reply_and_close(s, txn->status, http_error_message(s));
return_prx_cond:
s->scb->flags |= SC_FL_NOLINGER;
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_intercepted);
http_set_term_flags(s);
rep->analysers &= AN_RES_FLT_END;
goto return_error;
return_int_err:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_intercepted);
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
if (sess->listener && sess->listener->counters)
goto return_error;
return_bad_res:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_intercepted);
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
if (objt_server(s->target)) {
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
sedesc->fsb = TICK_ETERNITY;
sedesc->xref.peer = NULL;
se_fl_setall(sedesc, SE_FL_NONE);
-
+ sedesc->term_evts_log = 0;
sedesc->abort_info.info = 0;
sedesc->abort_info.code = 0;
struct se_abort_info *reason = NULL;
unsigned int flags = 0;
- if ((mode & (SE_SHW_SILENT|SE_SHW_NORMAL)) && !se_fl_test(sedesc, SE_FL_SHW))
+ if ((mode & (SE_SHW_SILENT|SE_SHW_NORMAL)) && !se_fl_test(sedesc, SE_FL_SHW)) {
+ sc_report_term_evt(sedesc->sc, tevt_loc_strm, tevt_type_shutw);
flags |= (mode & SE_SHW_NORMAL) ? SE_FL_SHWN : SE_FL_SHWS;
+ }
if ((mode & (SE_SHR_RESET|SE_SHR_DRAIN)) && !se_fl_test(sedesc, SE_FL_SHR))
flags |= (mode & SE_SHR_DRAIN) ? SE_FL_SHRD : SE_FL_SHRR;
sc->wait_event.tasklet = NULL;
sc->wait_event.events = 0;
+ sc->term_evts_log = 0;
+
/* If there is no endpoint, allocate a new one now */
if (!sedesc) {
sedesc = sedesc_new();
sc->flags |= SC_FL_EOS;
ic->flags |= CF_READ_EVENT;
sc_ep_report_read_activity(sc);
-
+ sc_report_term_evt(sc, tevt_loc_strm, (sc->flags & SC_FL_EOI ? tevt_type_shutr: tevt_type_truncated_shutr));
if (sc->state != SC_ST_EST)
return;
}
if (sc_ep_test(sc, SE_FL_ERROR)) {
sc->flags |= SC_FL_ERROR;
+ if (!(sc->flags & SC_FL_EOS))
+ sc_report_term_evt(sc, tevt_loc_strm, (sc->flags & SC_FL_EOI ? tevt_type_rcv_err: tevt_type_truncated_rcv_err));
ret = 1;
}
if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
oc->flags |= CF_WRITE_EVENT;
BUG_ON(sc_ep_test(sc, SE_FL_EOS|SE_FL_ERROR|SE_FL_ERR_PENDING) == (SE_FL_EOS|SE_FL_ERR_PENDING));
+ sc_report_term_evt(sc, tevt_loc_strm, tevt_type_snd_err);
if (sc_ep_test(sc, SE_FL_ERROR))
sc->flags |= SC_FL_ERROR;
return 1;
sc->state = SC_ST_RDY;
}
- /* Report EOS on the channel if it was reached from the mux point of
+ /* Report EOI on the channel if it was reached from the mux point of
* view.
*
* Note: This test is only required because sc_conn_process is also the SI
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
* care of it.
*/
- if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_EOS)) {
- /* we received a shutdown */
- if (ic->flags & CF_AUTO_CLOSE)
- sc_schedule_shutdown(sc_opposite(sc));
- sc_conn_eos(sc);
+ if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
+ sc->flags |= SC_FL_EOI;
+ ic->flags |= CF_READ_EVENT;
+ sc_ep_report_read_activity(sc);
}
- /* Report EOI on the channel if it was reached from the mux point of
+ /* Report EOS on the channel if it was reached from the mux point of
* view.
*
* Note: This test is only required because sc_conn_process is also the SI
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
* care of it.
*/
- if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
- sc->flags |= SC_FL_EOI;
- ic->flags |= CF_READ_EVENT;
- sc_ep_report_read_activity(sc);
+ if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_EOS)) {
+ /* we received a shutdown */
+ if (ic->flags & CF_AUTO_CLOSE)
+ sc_schedule_shutdown(sc_opposite(sc));
+ sc_conn_eos(sc);
}
- if (sc_ep_test(sc, SE_FL_ERROR))
+ if (sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_ERROR)) {
+ if (!(sc->flags & SC_FL_EOS))
+ sc_report_term_evt(sc, tevt_loc_strm, (sc->flags & SC_FL_EOI ? tevt_type_rcv_err: tevt_type_truncated_rcv_err));
sc->flags |= SC_FL_ERROR;
+ }
/* Second step : update the stream connector and channels, try to forward any
* pending data, then possibly wake the stream up based on the new
s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
+ s->term_evts_log = 0;
/* OK, we're keeping the stream, so let's properly initialize the stream */
LIST_INIT(&s->back_refs);
channel_check_timeout(&s->res);
if (unlikely(!(s->scb->flags & SC_FL_SHUT_DONE) && (s->req.flags & CF_WRITE_TIMEOUT))) {
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_tout);
s->scb->flags |= SC_FL_NOLINGER;
sc_shutdown(s->scb);
}
if (unlikely(!(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->req.flags & CF_READ_TIMEOUT))) {
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_tout);
if (s->scf->flags & SC_FL_NOHALF)
s->scf->flags |= SC_FL_NOLINGER;
sc_abort(s->scf);
}
if (unlikely(!(s->scf->flags & SC_FL_SHUT_DONE) && (s->res.flags & CF_WRITE_TIMEOUT))) {
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_tout);
s->scf->flags |= SC_FL_NOLINGER;
sc_shutdown(s->scf);
}
if (unlikely(!(s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->res.flags & CF_READ_TIMEOUT))) {
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_tout);
if (s->scb->flags & SC_FL_NOHALF)
s->scb->flags |= SC_FL_NOLINGER;
sc_abort(s->scb);
stream_abort(s);
abort:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scf, tevt_loc_strm, tevt_type_intercepted);
req->analysers &= AN_REQ_FLT_END;
s->current_rule = s->current_rule_list = NULL;
req->analyse_exp = s->rules_exp = TICK_ETERNITY;
stream_abort(s);
abort:
+ // XXX: All errors are handled as intercepted here !
+ stream_report_term_evt(s->scb, tevt_loc_strm, tevt_type_intercepted);
rep->analysers &= AN_RES_FLT_END;
s->current_rule = s->current_rule_list = NULL;
rep->analyse_exp = s->rules_exp = TICK_ETERNITY;