unsigned long long tot_alloc_calls, tot_free_calls;
unsigned long long tot_alloc_bytes, tot_free_bytes;
#endif
- struct stconn *sc = appctx_sc(appctx);
struct buffer *name_buffer = get_trash_chunk();
const struct ha_caller *caller;
const char *str;
int max_lines;
int i, j, max;
- /* FIXME: Don't watch the other side ! */
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
chunk_reset(&trash);
switch (profiling & HA_PROF_TASKS_MASK) {
static int cli_io_handler_show_tasks(struct appctx *appctx)
{
struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
- struct stconn *sc = appctx_sc(appctx);
struct buffer *name_buffer = get_trash_chunk();
struct sched_activity *entry;
const struct tasklet *tl;
int thr, queue;
int i, max;
- /* FIXME: Don't watch the other side ! */
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
/* It's not possible to scan queues in small chunks and yield in the
* middle of the dump and come back again. So what we're doing instead
* is to freeze all threads and inspect their queues at once as fast as
*/
static int cli_io_handler_show_activity(struct appctx *appctx)
{
- struct stconn *sc = appctx_sc(appctx);
struct show_activity_ctx *actctx = appctx->svcctx;
int tgt = actctx->thr; // target thread, -1 for all, 0 for total only
uint up_sec, up_usec;
int base_line;
ullong up;
- /* FIXME: Don't watch the other side ! */
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
/* this macro is used below to dump values. The thread number is "thr",
* and runs from 0 to nbt-1 when values are printed using the formula.
* We normally try to dmup integral lines in order to keep counters
static int cli_io_handler_show_env(struct appctx *appctx)
{
struct show_env_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
char **var = ctx->var;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
chunk_reset(&trash);
/* we have two inner loops here, one for the proxy, the other one for
*/
static int cli_io_handler_show_fd(struct appctx *appctx)
{
- struct stconn *sc = appctx_sc(appctx);
struct show_fd_ctx *fdctx = appctx->svcctx;
uint match = fdctx->show_mask;
int fd = fdctx->fd;
int ret = 1;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
chunk_reset(&trash);
/* isolate the threads once per round. We're limited to a buffer worth
*/
static int cli_io_handler_show_threads(struct appctx *appctx)
{
- struct stconn *sc = appctx_sc(appctx);
int thr;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
if (appctx->st0)
thr = appctx->st1;
else
static int debug_iohandler_fd(struct appctx *appctx)
{
struct dev_fd_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
struct sockaddr_storage sa;
struct stat statbuf;
socklen_t salen, vlen;
int ret = 1;
int i, fd;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
chunk_reset(&trash);
thread_isolate();
}
thread_release();
- end:
return ret;
}
const char *pfx = ctx->match;
int ret = 1;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
if (!ctx->width) {
/* we don't know the first column's width, let's compute it
* now based on a first pass on printable entries and their
static int cli_io_handler_pat_list(struct appctx *appctx)
{
struct show_map_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
struct pat_ref_elt *elt;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
- /* If we're forced to shut down, we might have to remove our
- * reference to the last ref_elt being dumped.
- */
- if (!LIST_ISEMPTY(&ctx->bref.users)) {
- HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
- LIST_DEL_INIT(&ctx->bref.users);
- HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
- }
- return 1;
- }
-
switch (ctx->state) {
case STATE_INIT:
ctx->state = STATE_LIST;
/* Displays workers and processes */
static int cli_io_handler_show_proc(struct appctx *appctx)
{
- struct stconn *sc = appctx_sc(appctx);
struct mworker_proc *child;
int old = 0;
int up = date.tv_sec - proc_self->timestamp;
char *uptime = NULL;
char *reloadtxt = NULL;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
if (up < 0) /* must never be negative because of clock drift */
up = 0;
static int cli_io_handler_show_loadstatus(struct appctx *appctx)
{
char *env;
- struct stconn *sc = appctx_sc(appctx);
if (!cli_has_level(appctx, ACCESS_LVL_OPER))
return 1;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
env = getenv("HAPROXY_LOAD_SUCCESS");
if (!env)
return 1;
* ->px, the proxy's id ->only_pxid, the server's pointer from ->sv, and the
* choice of what to dump from ->show_conn.
*/
-static int dump_servers_state(struct stconn *sc)
+static int dump_servers_state(struct appctx *appctx)
{
- struct appctx *appctx = __sc_appctx(sc);
struct show_srv_ctx *ctx = appctx->svcctx;
struct proxy *px = ctx->px;
struct server *srv;
static int cli_io_handler_servers_state(struct appctx *appctx)
{
struct show_srv_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
struct proxy *curproxy;
if (ctx->state == SHOW_SRV_HEAD) {
curproxy = ctx->px;
/* servers are only in backends */
if ((curproxy->cap & PR_CAP_BE) && !(curproxy->cap & PR_CAP_INT)) {
- if (!dump_servers_state(sc))
+ if (!dump_servers_state(appctx))
return 0;
}
/* only the selected proxy is dumped */
struct stconn *sc = appctx_sc(appctx);
extern const char *monthname[12];
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
chunk_reset(&trash);
if (!ctx->px) {
static int cli_io_handler_dump_quic(struct appctx *appctx)
{
struct show_quic_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
struct quic_conn *qc;
thread_isolate();
if (ctx->thr >= global.nbthread)
goto done;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
- /* If we're forced to shut down, we might have to remove our
- * reference to the last stream being dumped.
- */
- if (!LIST_ISEMPTY(&ctx->bref.users))
- LIST_DEL_INIT(&ctx->bref.users);
- goto done;
- }
-
chunk_reset(&trash);
if (!LIST_ISEMPTY(&ctx->bref.users)) {
size_t ofs;
int ret;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- return 1;
-
MT_LIST_DELETE(&appctx->wait_entry);
ret = ring_dispatch_messages(ring, appctx, &ctx->ofs, &last_ofs, ctx->flags, applet_append_line);
static int cli_io_handler_commit_cert(struct appctx *appctx)
{
struct commit_cert_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
int y = 0;
struct ckch_store *old_ckchs, *new_ckchs = NULL;
struct ckch_inst *ckchi;
usermsgs_clr("CLI");
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
while (1) {
switch (ctx->state) {
case CERT_ST_INIT:
static int cli_io_handler_commit_cafile_crlfile(struct appctx *appctx)
{
struct commit_cacrlfile_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
int y = 0;
struct cafile_entry *old_cafile_entry = ctx->old_entry;
struct cafile_entry *new_cafile_entry = ctx->new_entry;
struct ckch_inst_link *ckchi_link;
char *path;
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
/* The ctx was already validated by the ca-file/crl-file parsing
* function. Entries can only be NULL in CACRL_ST_SUCCESS or
* CACRL_ST_FIN states
{
struct add_crtlist_ctx *ctx = appctx->svcctx;
struct bind_conf_list *bind_conf_node;
- struct stconn *sc = appctx_sc(appctx);
struct crtlist *crtlist = ctx->crtlist;
struct crtlist_entry *entry = ctx->entry;
struct ckch_store *store = entry->node.key;
/* for each bind_conf which use the crt-list, a new ckch_inst must be
* created.
*/
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
- goto end;
-
switch (ctx->state) {
case ADDCRT_ST_INIT:
/* This state just print the update message */
};
INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
-
* - STATE_DONE : nothing left to dump, the buffer may contain some
* data though.
*/
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
- /* in case of abort, remove any refcount we might have set on an entry */
- if (ctx->state == STATE_DUMP) {
- stksess_kill_if_expired(ctx->t, ctx->entry, 1);
- }
- return 1;
- }
chunk_reset(&trash);
* buffer is full and it needs to be called again, otherwise non-zero. It is
* designed to be called from stats_dump_strm_to_buffer() below.
*/
-static int stats_dump_full_strm_to_buffer(struct stconn *sc, struct stream *strm)
+static int stats_dump_full_strm_to_buffer(struct appctx *appctx, struct stream *strm)
{
- struct appctx *appctx = __sc_appctx(sc);
struct show_sess_ctx *ctx = appctx->svcctx;
chunk_reset(&trash);
static int cli_io_handler_dump_sess(struct appctx *appctx)
{
struct show_sess_ctx *ctx = appctx->svcctx;
- struct stconn *sc = appctx_sc(appctx);
struct connection *conn;
thread_isolate();
goto done;
}
- /* FIXME: Don't watch the other side !*/
- if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
- /* If we're forced to shut down, we might have to remove our
- * reference to the last stream being dumped.
- */
- if (!LIST_ISEMPTY(&ctx->bref.users)) {
- LIST_DELETE(&ctx->bref.users);
- LIST_INIT(&ctx->bref.users);
- }
- goto done;
- }
-
chunk_reset(&trash);
/* first, let's detach the back-ref from a possible previous stream */
LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
/* call the proper dump() function and return if we're missing space */
- if (!stats_dump_full_strm_to_buffer(sc, curr_strm))
+ if (!stats_dump_full_strm_to_buffer(appctx, curr_strm))
goto full;
/* stream dump complete */