}
res_htx->flags |= HTX_FL_EOM;
res->flags |= CF_EOI;
- appctx->endp->flags |= CS_EP_EOI;
+ se_fl_set(appctx->endp, CS_EP_EOI);
appctx->st0 = PROMEX_ST_END;
/* fall through */
LIST_DEL_INIT(&appctx->buffer_wait.list);
if (appctx->sess)
session_free(appctx->sess);
- BUG_ON(appctx->endp && !(appctx->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(appctx->endp && !se_fl_test(appctx->endp, CS_EP_ORPHAN));
cs_endpoint_free(appctx->endp);
pool_free(pool_head_appctx, appctx);
_HA_ATOMIC_DEC(&nb_applets);
/* sets CS_EP_ERROR or CS_EP_ERR_PENDING on the endpoint */
static inline void cs_ep_set_error(struct cs_endpoint *endp)
{
- if (endp->flags & CS_EP_EOS)
- endp->flags |= CS_EP_ERROR;
+ if (se_fl_test(endp, CS_EP_EOS))
+ se_fl_set(endp, CS_EP_ERROR);
else
- endp->flags |= CS_EP_ERR_PENDING;
+ se_fl_set(endp, CS_EP_ERR_PENDING);
}
/* Retrieves any valid conn_stream from this connection, preferably the first
qcs->endp->target = qcs;
qcs->endp->conn = qcc->conn;
- qcs->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN|CS_EP_NOT_FIRST);
+ se_fl_set(qcs->endp, CS_EP_T_MUX | CS_EP_ORPHAN | CS_EP_NOT_FIRST);
/* TODO duplicated from mux_h2 */
sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
if (!endp)
goto fail_endp;
endp->target = appctx;
- endp->flags |= (CS_EP_T_APPLET|CS_EP_ORPHAN);
+ se_fl_set(endp, CS_EP_T_APPLET | CS_EP_ORPHAN);
}
appctx->endp = endp;
/* async startup is only possible for frontend appctx. Thus for orphan
* appctx. Because no backend appctx can be orphan.
*/
- BUG_ON(!(appctx->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(!se_fl_test(appctx->endp, CS_EP_ORPHAN));
sess = session_new(px, NULL, &appctx->obj_type);
if (!sess)
/* If a frontend apctx is attached to a conn-stream, release the stream
* instead of the appctx.
*/
- if (!(appctx->endp->flags & CS_EP_ORPHAN) && !(appctx_cs(appctx)->flags & CS_FL_ISBACK)) {
+ if (!se_fl_test(appctx->endp, CS_EP_ORPHAN) && !(appctx_cs(appctx)->flags & CS_FL_ISBACK)) {
stream_free(appctx_strm(appctx));
return;
}
*/
void appctx_shut(struct appctx *appctx)
{
- if (appctx->endp->flags & (CS_EP_SHR|CS_EP_SHW))
+ if (se_fl_test(appctx->endp, CS_EP_SHR | CS_EP_SHW))
return;
if (appctx->applet->release)
appctx->applet->release(appctx);
- appctx->endp->flags |= CS_EP_SHRR | CS_EP_SHWN;
+ se_fl_set(appctx->endp, CS_EP_SHRR | CS_EP_SHWN);
}
/* Callback used to wake up an applet when a buffer is available. The applet
struct conn_stream *cs = appctx_cs(appctx);
/* allocation requested ? */
- if (!(appctx->endp->flags & CS_EP_RXBLK_BUFF))
+ if (!se_fl_test(appctx->endp, CS_EP_RXBLK_BUFF))
return 0;
cs_rx_buff_rdy(cs);
return NULL;
}
- if (app->endp->flags & CS_EP_ORPHAN) {
+ if (se_fl_test(app->endp, CS_EP_ORPHAN)) {
/* Finalize init of orphan appctx. .init callback function must
* be defined and it must finalize appctx startup.
*/
/* measure the call rate and check for anomalies when too high */
rate = update_freq_ctr(&app->call_rate, 1);
if (rate >= 100000 && app->call_rate.prev_ctr && // looped more than 100k times over last second
- ((b_size(cs_ib(cs)) && app->endp->flags & CS_EP_RXBLK_BUFF) || // asks for a buffer which is present
- (b_size(cs_ib(cs)) && !b_data(cs_ib(cs)) && app->endp->flags & CS_EP_RXBLK_ROOM) || // asks for room in an empty buffer
+ ((b_size(cs_ib(cs)) && se_fl_test(app->endp, CS_EP_RXBLK_BUFF)) || // asks for a buffer which is present
+ (b_size(cs_ib(cs)) && !b_data(cs_ib(cs)) && se_fl_test(app->endp, CS_EP_RXBLK_ROOM)) || // asks for room in an empty buffer
(b_data(cs_ob(cs)) && cs_tx_endp_ready(cs) && !cs_tx_blocked(cs)) || // asks for data already present
(!b_data(cs_ib(cs)) && b_data(cs_ob(cs)) && // didn't return anything ...
(cs_oc(cs)->flags & (CF_WRITE_PARTIAL|CF_SHUTW_NOW)) == CF_SHUTW_NOW))) { // ... and left data pending after a shut
/* no more data are expected. */
res_htx->flags |= HTX_FL_EOM;
res->flags |= CF_EOI;
- appctx->endp->flags |= CS_EP_EOI;
+ se_fl_set(appctx->endp, CS_EP_EOI);
appctx->st0 = HTX_CACHE_END;
}
}
break;
default: /* abnormal state */
- appctx->endp->flags |= CS_EP_ERROR;
+ se_fl_set(appctx->endp, CS_EP_ERROR);
break;
}
s->srv_error(s, s->csb);
return 1;
}
- s->csb->endp->flags &= CS_EP_DETACHED;
+ se_fl_clr(s->csb->endp, ~CS_EP_DETACHED);
}
sockaddr_free(&s->csb->dst);
endp->target = NULL;
endp->conn = NULL;
endp->cs = NULL;
- endp->flags = CS_EP_NONE;
+ se_fl_setall(endp, CS_EP_NONE);
}
/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
pool_free(pool_head_connstream, cs);
cs = NULL;
}
- endp->flags &= ~CS_EP_ORPHAN;
+ se_fl_clr(endp, CS_EP_ORPHAN);
return cs;
}
if (conn->mux) {
if (cs->wait_event.events != 0)
conn->mux->unsubscribe(cs, cs->wait_event.events, &cs->wait_event);
- endp->flags |= CS_EP_ORPHAN;
+ se_fl_set(endp, CS_EP_ORPHAN);
endp->cs = NULL;
cs->endp = NULL;
conn->mux->detach(endp);
sc_ep_set(cs, CS_EP_ERROR);
return -1;
}
- new_endp->flags = sc_ep_get(cs) & CS_EP_APP_MASK;
+ se_fl_setall(new_endp, sc_ep_get(cs) & CS_EP_APP_MASK);
/* The app is still attached, the cs will not be released */
cs_detach_endp(&cs);
res_htx->flags |= HTX_FL_EOM;
res->flags |= CF_EOI;
- ctx->endp->flags |= CS_EP_EOI;
+ se_fl_set(ctx->endp, CS_EP_EOI);
strm->txn->status = http_ctx->status;
http_ctx->flags |= APPLET_RSP_SENT;
}
/* if the request contains the HTX_FL_EOM, we finished the request part. */
if (htx->flags & HTX_FL_EOM) {
req->flags |= CF_EOI;
- appctx->endp->flags |= CS_EP_EOI;
+ se_fl_set(appctx->endp,
+ CS_EP_EOI);
appctx->st0 = HTTPCLIENT_S_RES_STLINE;
}
if (!fstrm->id)
fstrm->fconn->nb_reserved--;
if (fstrm->endp->cs) {
- if (!(fstrm->endp->flags & CS_EP_EOS) && !b_data(&fstrm->rxbuf))
+ if (!se_fl_test(fstrm->endp, CS_EP_EOS) && !b_data(&fstrm->rxbuf))
fcgi_strm_notify_recv(fstrm);
}
fstrm->state = FCGI_SS_CLOSED;
*/
LIST_DEL_INIT(&fstrm->send_list);
tasklet_free(fstrm->shut_tl);
- BUG_ON(fstrm->endp && !(fstrm->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(fstrm->endp && !se_fl_test(fstrm->endp, CS_EP_ORPHAN));
cs_endpoint_free(fstrm->endp);
pool_free(pool_head_fcgi_strm, fstrm);
}
if ((fconn->state == FCGI_CS_CLOSED || fconn->conn->flags & CO_FL_ERROR)) {
- fstrm->endp->flags |= CS_EP_ERR_PENDING;
- if (fstrm->endp->flags & CS_EP_EOS)
- fstrm->endp->flags |= CS_EP_ERROR;
+ se_fl_set(fstrm->endp, CS_EP_ERR_PENDING);
+ if (se_fl_test(fstrm->endp, CS_EP_EOS))
+ se_fl_set(fstrm->endp, CS_EP_ERROR);
if (fstrm->state < FCGI_SS_ERROR) {
fstrm->state = FCGI_SS_ERROR;
fcgi_conn_read0_pending(fconn) ||
fstrm->state == FCGI_SS_CLOSED ||
(fstrm->flags & FCGI_SF_ES_RCVD) ||
- (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
+ se_fl_test(fstrm->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) {
/* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
- fstrm->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(fstrm->endp, CS_EP_RCV_MORE);
fcgi_strm_notify_recv(fstrm);
}
fstrm = tmp_fstrm;
fcgi_conn_read0_pending(fconn) ||
fstrm->state == FCGI_SS_CLOSED ||
(fstrm->flags & FCGI_SF_ES_RCVD) ||
- (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
+ se_fl_test(fstrm->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) {
/* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
- fstrm->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(fstrm->endp, CS_EP_RCV_MORE);
fcgi_strm_notify_recv(fstrm);
}
while (node) {
fstrm = container_of(node, struct fcgi_strm, by_id);
- if (fstrm->endp->cs && fstrm->endp->flags & CS_EP_WAIT_FOR_HS)
+ if (fstrm->endp->cs && se_fl_test(fstrm->endp, CS_EP_WAIT_FOR_HS))
fcgi_strm_notify_recv(fstrm);
node = eb32_next(node);
}
* for example because of a "tcp-request content reject" rule that is
* normally used to limit abuse.
*/
- if ((fstrm->endp->flags & CS_EP_KILL_CONN) &&
+ if (se_fl_test(fstrm->endp, CS_EP_KILL_CONN) &&
!(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) {
TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
fconn->state = FCGI_CS_CLOSED;
* for example because of a "tcp-request content reject" rule that is
* normally used to limit abuse.
*/
- if ((fstrm->endp->flags & CS_EP_KILL_CONN) &&
+ if (se_fl_test(fstrm->endp, CS_EP_KILL_CONN) &&
!(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) {
TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
fconn->state = FCGI_CS_CLOSED;
TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
if (b_data(&fstrm->rxbuf))
- fstrm->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(fstrm->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
else {
- fstrm->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_clr(fstrm->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) {
- fstrm->endp->flags |= CS_EP_EOI;
+ se_fl_set(fstrm->endp, CS_EP_EOI);
if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN)))
- fstrm->endp->flags |= CS_EP_EOS;
+ se_fl_set(fstrm->endp, CS_EP_EOS);
}
if (fcgi_conn_read0_pending(fconn))
- fstrm->endp->flags |= CS_EP_EOS;
- if (fstrm->endp->flags & CS_EP_ERR_PENDING)
- fstrm->endp->flags |= CS_EP_ERROR;
+ se_fl_set(fstrm->endp, CS_EP_EOS);
+ if (se_fl_test(fstrm->endp, CS_EP_ERR_PENDING))
+ se_fl_set(fstrm->endp, CS_EP_ERROR);
fcgi_release_buf(fconn, &fstrm->rxbuf);
}
if (id < 0) {
fcgi_strm_close(fstrm);
- fstrm->endp->flags |= CS_EP_ERROR;
+ se_fl_set(fstrm->endp, CS_EP_ERROR);
TRACE_DEVEL("couldn't get a stream ID, leaving in error", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
return 0;
}
(unsigned int)b_head_ofs(&fstrm->rxbuf), (unsigned int)b_size(&fstrm->rxbuf),
fstrm->endp->cs);
if (fstrm->endp) {
- chunk_appendf(msg, " .endp.flg=0x%08x", fstrm->endp->flags);
- if (!(fstrm->endp->flags & CS_EP_ORPHAN))
+ chunk_appendf(msg, " .endp.flg=0x%08x", se_fl_get(fstrm->endp));
+ if (!se_fl_test(fstrm->endp, CS_EP_ORPHAN))
chunk_appendf(msg, " .cs.flg=0x%08x .cs.app=%p",
fstrm->endp->cs->flags, fstrm->endp->cs->app);
}
if (h1s) {
chunk_appendf(&trace_buf, " h1s=%p(0x%08x)", h1s, h1s->flags);
if (h1s->endp)
- chunk_appendf(&trace_buf, " endp=%p(0x%08x)", h1s->endp, h1s->endp->flags);
+ chunk_appendf(&trace_buf, " endp=%p(0x%08x)", h1s->endp, se_fl_get(h1s->endp));
if (h1s->endp && h1s->endp->cs)
chunk_appendf(&trace_buf, " cs=%p(0x%08x)", h1s->endp->cs, h1s->endp->cs->flags);
}
TRACE_ENTER(H1_EV_STRM_NEW, h1c->conn, h1s);
if (h1s->flags & H1S_F_NOT_FIRST)
- h1s->endp->flags |= CS_EP_NOT_FIRST;
+ se_fl_set(h1s->endp, CS_EP_NOT_FIRST);
if (h1s->req.flags & H1_MF_UPG_WEBSOCKET)
- h1s->endp->flags |= CS_EP_WEBSOCKET;
+ se_fl_set(h1s->endp, CS_EP_WEBSOCKET);
if (!cs_new_from_endp(h1s->endp, h1c->conn->owner, input)) {
TRACE_ERROR("CS allocation failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, h1c->conn, h1s);
goto fail;
h1s->endp->target = h1s;
h1s->endp->conn = h1c->conn;
- h1s->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN);
+ se_fl_set(h1s->endp, CS_EP_T_MUX | CS_EP_ORPHAN);
}
h1s->sess = sess;
}
HA_ATOMIC_DEC(&h1c->px_counters->open_streams);
- BUG_ON(h1s->endp && !(h1s->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(h1s->endp && !se_fl_test(h1s->endp, CS_EP_ORPHAN));
cs_endpoint_free(h1s->endp);
pool_free(pool_head_h1s, h1s);
}
/* Here h1s->endp->cs is always defined */
if (!(h1m->flags & H1_MF_CHNK) && (h1m->state == H1_MSG_DATA || (h1m->state == H1_MSG_TUNNEL))) {
TRACE_STATE("notify the mux can use splicing", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s);
- h1s->endp->flags |= CS_EP_MAY_SPLICE;
+ se_fl_set(h1s->endp, CS_EP_MAY_SPLICE);
}
else {
TRACE_STATE("notify the mux can't use splicing anymore", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s);
- h1s->endp->flags &= ~CS_EP_MAY_SPLICE;
+ se_fl_clr(h1s->endp, CS_EP_MAY_SPLICE);
}
/* Set EOI on conn-stream in DONE state iff:
*/
if (((h1m->state == H1_MSG_DONE) && (h1m->flags & H1_MF_RESP)) ||
((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG)))
- h1s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h1s->endp, CS_EP_EOI);
out:
/* When Input data are pending for this message, notify upper layer that
* - Headers or trailers are pending to be copied.
*/
if (h1s->flags & (H1S_F_RX_CONGESTED)) {
- h1s->endp->flags |= CS_EP_RCV_MORE | CS_EP_WANT_ROOM;
+ se_fl_set(h1s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
TRACE_STATE("waiting for more room", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
}
else {
- h1s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_clr(h1s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (h1s->flags & H1S_F_REOS) {
- h1s->endp->flags |= CS_EP_EOS;
+ se_fl_set(h1s->endp, CS_EP_EOS);
if (h1m->state >= H1_MSG_DONE || !(h1m->flags & H1_MF_XFER_LEN)) {
/* DONE or TUNNEL or SHUTR without XFER_LEN, set
* EOI on the conn-stream */
- h1s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h1s->endp, CS_EP_EOI);
}
else if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE) {
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_ERROR("message aborted, set error on CS", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
}
err:
htx_to_buf(htx, buf);
- h1s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h1s->endp, CS_EP_EOI);
TRACE_DEVEL("leaving on error", H1_EV_RX_DATA|H1_EV_STRM_ERR, h1c->conn, h1s);
return 0;
}
h1c->flags |= H1C_F_ST_ERROR;
TRACE_ERROR("txn done but data waiting to be sent, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s);
}
- h1s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h1s->endp, CS_EP_EOI);
}
TRACE_LEAVE(H1_EV_TX_DATA, h1c->conn, h1s, chn_htx, (size_t[]){total});
TRACE_STATE("read0 on connection", H1_EV_H1C_RECV, conn, h1s);
}
if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) && !b_data(&h1c->ibuf)))
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s);
h1_alert(h1s);
}
BUG_ON(!h1s || h1c->flags & H1C_F_ST_READY);
if (conn_xprt_read0_pending(conn) || (h1s->flags & H1S_F_REOS))
- h1s->endp->flags |= CS_EP_EOS;
+ se_fl_set(h1s->endp, CS_EP_EOS);
if ((h1c->flags & H1C_F_ST_ERROR) || (conn->flags & CO_FL_ERROR))
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
h1_alert(h1s);
TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE);
}
if (h1c->flags & H1C_F_ST_ATTACHED) {
/* Don't release the H1 connection right now, we must destroy the
* attached CS first. Here, the H1C must not be READY */
- h1c->h1s->endp->flags |= (CS_EP_EOS|CS_EP_ERROR);
+ se_fl_set(h1c->h1s->endp, CS_EP_EOS | CS_EP_ERROR);
h1_alert(h1c->h1s);
h1_refresh_timeout(h1c);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_ENTER(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode});
- if (h1s->endp->flags & CS_EP_SHR)
+ if (se_fl_test(h1s->endp, CS_EP_SHR))
goto end;
- if (h1s->endp->flags & CS_EP_KILL_CONN) {
+ if (se_fl_test(h1s->endp, CS_EP_KILL_CONN)) {
TRACE_STATE("stream wants to kill the connection", H1_EV_STRM_SHUT, h1c->conn, h1s);
goto do_shutr;
}
do_shutr:
/* NOTE: Be sure to handle abort (cf. h2_shutr) */
- if (h1s->endp->flags & CS_EP_SHR)
+ if (se_fl_test(h1s->endp, CS_EP_SHR))
goto end;
if (conn_xprt_ready(h1c->conn) && h1c->conn->xprt->shutr)
TRACE_ENTER(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode});
- if (h1s->endp->flags & CS_EP_SHW)
+ if (se_fl_test(h1s->endp, CS_EP_SHW))
goto end;
- if (h1s->endp->flags & CS_EP_KILL_CONN) {
+ if (se_fl_test(h1s->endp, CS_EP_KILL_CONN)) {
TRACE_STATE("stream wants to kill the connection", H1_EV_STRM_SHUT, h1c->conn, h1s);
goto do_shutw;
}
else
TRACE_DEVEL("h1c ibuf not allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
- if ((flags & CO_RFL_BUF_FLUSH) && (h1s->endp->flags & CS_EP_MAY_SPLICE)) {
+ if ((flags & CO_RFL_BUF_FLUSH) && se_fl_test(h1s->endp, CS_EP_MAY_SPLICE)) {
h1c->flags |= H1C_F_WANT_SPLICE;
TRACE_STATE("Block xprt rcv_buf to flush stream's buffer (want_splice)", H1_EV_STRM_RECV, h1c->conn, h1s);
}
}
if (h1c->flags & H1C_F_ST_ERROR) {
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_ERROR("H1C on error, leaving in error", H1_EV_STRM_SEND|H1_EV_H1C_ERR|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
return 0;
}
}
if (h1c->flags & H1C_F_ST_ERROR) {
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
}
if (ret > h1m->curr_len) {
h1s->flags |= H1S_F_PARSING_ERROR;
h1c->flags |= H1C_F_ST_ERROR;
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_ERROR("too much payload, more than announced",
H1_EV_RX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto end;
if (!(h1c->flags & H1C_F_WANT_SPLICE)) {
TRACE_STATE("notify the mux can't use splicing anymore", H1_EV_STRM_RECV, h1c->conn, h1s);
- h1s->endp->flags &= ~CS_EP_MAY_SPLICE;
+ se_fl_clr(h1s->endp, CS_EP_MAY_SPLICE);
if (!(h1c->wait_event.events & SUB_RETRY_RECV)) {
TRACE_STATE("restart receiving data, subscribing", H1_EV_STRM_RECV, h1c->conn, h1s);
h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
if (ret > h1m->curr_len) {
h1s->flags |= H1S_F_PROCESSING_ERROR;
h1c->flags |= H1C_F_ST_ERROR;
- h1s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h1s->endp, CS_EP_ERROR);
TRACE_ERROR("too much payload, more than announced",
H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto end;
method = "UNKNOWN";
chunk_appendf(msg, " h1s=%p h1s.flg=0x%x .endp.flg=0x%x .req.state=%s .res.state=%s"
" .meth=%s status=%d",
- h1s, h1s->flags, h1s->endp->flags,
+ h1s, h1s->flags, se_fl_get(h1s->endp),
h1m_state_str(h1s->req.state),
h1m_state_str(h1s->res.state), method, h1s->status);
if (h1s->endp) {
- chunk_appendf(msg, " .endp.flg=0x%08x", h1s->endp->flags);
- if (!(h1s->endp->flags & CS_EP_ORPHAN))
+ chunk_appendf(msg, " .endp.flg=0x%08x",
+ se_fl_get(h1s->endp));
+ if (!se_fl_test(h1s->endp, CS_EP_ORPHAN))
chunk_appendf(msg, " .cs.flg=0x%08x .cs.app=%p",
h1s->endp->cs->flags, h1s->endp->cs->app);
}
if (!h2s->id)
h2s->h2c->nb_reserved--;
if (h2s->endp->cs) {
- if (!(h2s->endp->flags & CS_EP_EOS) && !b_data(&h2s->rxbuf))
+ if (!se_fl_test(h2s->endp, CS_EP_EOS) && !b_data(&h2s->rxbuf))
h2s_notify_recv(h2s);
}
HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
/* ditto, calling tasklet_free() here should be ok */
tasklet_free(h2s->shut_tl);
- BUG_ON(h2s->endp && !(h2s->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(h2s->endp && !se_fl_test(h2s->endp, CS_EP_ORPHAN));
cs_endpoint_free(h2s->endp);
pool_free(pool_head_h2s, h2s);
goto out_close;
h2s->endp->target = h2s;
h2s->endp->conn = h2c->conn;
- h2s->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN|CS_EP_NOT_FIRST);
+ se_fl_set(h2s->endp, CS_EP_T_MUX | CS_EP_ORPHAN | CS_EP_NOT_FIRST);
/* FIXME wrong analogy between ext-connect and websocket, this need to
* be refine.
*/
if (flags & H2_SF_EXT_CONNECT_RCVD)
- h2s->endp->flags |= CS_EP_WEBSOCKET;
+ se_fl_set(h2s->endp, CS_EP_WEBSOCKET);
/* The stream will record the request's accept date (which is either the
* end of the connection's or the date immediately after the previous
if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) ||
(h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
- h2s->endp->flags |= CS_EP_ERR_PENDING;
- if (h2s->endp->flags & CS_EP_EOS)
- h2s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h2s->endp, CS_EP_ERR_PENDING);
+ if (se_fl_test(h2s->endp, CS_EP_EOS))
+ se_fl_set(h2s->endp, CS_EP_ERROR);
if (h2s->st < H2_SS_ERROR)
h2s->st = H2_SS_ERROR;
if (h2c->dff & H2_F_HEADERS_END_STREAM)
h2s->flags |= H2_SF_ES_RCVD;
- if (h2s->endp->flags & CS_EP_ERROR && h2s->st < H2_SS_ERROR)
+ if (se_fl_test(h2s->endp, CS_EP_ERROR) && h2s->st < H2_SS_ERROR)
h2s->st = H2_SS_ERROR;
else if (h2s->flags & H2_SF_ES_RCVD) {
if (h2s->st == H2_SS_OPEN)
h2c_read0_pending(h2c) ||
h2s->st == H2_SS_CLOSED ||
(h2s->flags & H2_SF_ES_RCVD) ||
- (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
+ se_fl_test(h2s->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) {
/* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
- h2s->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(h2s->endp, CS_EP_RCV_MORE);
h2s_notify_recv(h2s);
}
h2s = tmp_h2s;
h2c_read0_pending(h2c) ||
h2s->st == H2_SS_CLOSED ||
(h2s->flags & H2_SF_ES_RCVD) ||
- (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
+ se_fl_test(h2s->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) {
/* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
- h2s->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(h2s->endp, CS_EP_RCV_MORE);
h2s_notify_recv(h2s);
}
while (node) {
h2s = container_of(node, struct h2s, by_id);
- if (h2s->endp->flags & CS_EP_WAIT_FOR_HS)
+ if (se_fl_test(h2s->endp, CS_EP_WAIT_FOR_HS))
h2s_notify_recv(h2s);
node = eb32_next(node);
}
* normally used to limit abuse. In this case we schedule a goaway to
* close the connection.
*/
- if ((h2s->endp->flags & CS_EP_KILL_CONN) &&
+ if (se_fl_test(h2s->endp, CS_EP_KILL_CONN) &&
!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
* normally used to limit abuse. In this case we schedule a goaway to
* close the connection.
*/
- if ((h2s->endp->flags & CS_EP_KILL_CONN) &&
+ if (se_fl_test(h2s->endp, CS_EP_KILL_CONN) &&
!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
break;
}
- if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) {
+ if (!h2s->endp->cs || se_fl_test(h2s->endp, CS_EP_SHW)) {
/* Response already closed: add END_STREAM */
es_now = 1;
}
break;
}
- if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) {
+ if (!h2s->endp->cs || se_fl_test(h2s->endp, CS_EP_SHW)) {
/* Request already closed: add END_STREAM */
es_now = 1;
}
if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
buf_htx->flags |= HTX_FL_PARSING_ERROR;
if (htx_is_empty(buf_htx))
- h2s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h2s->endp, CS_EP_EOI);
}
else if (htx_is_empty(h2s_htx))
buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
end:
if (b_data(&h2s->rxbuf))
- h2s->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(h2s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
else {
- h2s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_clr(h2s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (h2s->flags & H2_SF_ES_RCVD) {
- h2s->endp->flags |= CS_EP_EOI;
+ se_fl_set(h2s->endp, CS_EP_EOI);
/* Add EOS flag for tunnel */
if (h2s->flags & H2_SF_BODY_TUNNEL)
- h2s->endp->flags |= CS_EP_EOS;
+ se_fl_set(h2s->endp, CS_EP_EOS);
}
if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
- h2s->endp->flags |= CS_EP_EOS;
- if (h2s->endp->flags & CS_EP_ERR_PENDING)
- h2s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h2s->endp, CS_EP_EOS);
+ if (se_fl_test(h2s->endp, CS_EP_ERR_PENDING))
+ se_fl_set(h2s->endp, CS_EP_ERROR);
if (b_size(&h2s->rxbuf)) {
b_free(&h2s->rxbuf);
offer_buffers(NULL, 1);
}
if (h2s->h2c->st0 >= H2_CS_ERROR) {
- h2s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h2s->endp, CS_EP_ERROR);
TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
return 0;
}
int32_t id = h2c_get_next_sid(h2s->h2c);
if (id < 0) {
- h2s->endp->flags |= CS_EP_ERROR;
+ se_fl_set(h2s->endp, CS_EP_ERROR);
TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
return 0;
}
!b_data(&h2s->h2c->dbuf) &&
(h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
- if (h2s->endp->flags & CS_EP_EOS)
- h2s->endp->flags |= CS_EP_ERROR;
+ if (se_fl_test(h2s->endp, CS_EP_EOS))
+ se_fl_set(h2s->endp, CS_EP_ERROR);
else
- h2s->endp->flags |= CS_EP_ERR_PENDING;
+ se_fl_set(h2s->endp, CS_EP_ERR_PENDING);
}
if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
chunk_appendf(msg, "endp=%p", h2s->endp);
if (h2s->endp)
chunk_appendf(msg, "(.flg=0x%08x)",
- h2s->endp->flags);
+ se_fl_get(h2s->endp));
chunk_appendf(&trash, " .subs=%p", h2s->subs);
if (h2s->subs) {
/* Display conn and cs info, if defined (pointer + flags) */
chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn->flags);
- chunk_appendf(&trace_buf, " endp=%p(0x%08x)", ctx->endp, ctx->endp->flags);
+ chunk_appendf(&trace_buf, " endp=%p(0x%08x)", ctx->endp, se_fl_get(ctx->endp));
if (cs)
chunk_appendf(&trace_buf, " cs=%p(0x%08x)", cs, cs->flags);
if (conn && ctx->wait_event.events != 0)
conn->xprt->unsubscribe(conn, conn->xprt_ctx, ctx->wait_event.events,
&ctx->wait_event);
- BUG_ON(ctx->endp && !(ctx->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(ctx->endp && !se_fl_test(ctx->endp, CS_EP_ORPHAN));
cs_endpoint_free(ctx->endp);
pool_free(pool_head_pt_ctx, ctx);
struct mux_pt_ctx *ctx = tctx;
TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn);
- if (!(ctx->endp->flags & CS_EP_ORPHAN)) {
+ if (!se_fl_test(ctx->endp, CS_EP_ORPHAN)) {
/* There's a small race condition.
* mux_pt_io_cb() is only supposed to be called if we have no
* stream attached. However, maybe the tasklet got woken up,
}
ctx->endp->target = ctx;
ctx->endp->conn = conn;
- ctx->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN);
+ se_fl_set(ctx->endp, CS_EP_T_MUX | CS_EP_ORPHAN);
cs = cs_new_from_endp(ctx->endp, sess, input);
if (!cs) {
ctx->endp = cs->endp;
}
conn->ctx = ctx;
- ctx->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(ctx->endp, CS_EP_RCV_MORE);
if (global.tune.options & GTUNE_USE_SPLICE)
- ctx->endp->flags |= CS_EP_MAY_SPLICE;
+ se_fl_set(ctx->endp, CS_EP_MAY_SPLICE);
TRACE_LEAVE(PT_EV_CONN_NEW, conn);
return 0;
int ret = 0;
TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn);
- if (!(ctx->endp->flags & CS_EP_ORPHAN)) {
+ if (!se_fl_test(ctx->endp, CS_EP_ORPHAN)) {
ret = ctx->endp->cs->data_cb->wake ? ctx->endp->cs->data_cb->wake(ctx->endp->cs) : 0;
if (ret < 0) {
if (cs_attach_mux(endp->cs, ctx, conn) < 0)
return -1;
ctx->endp = endp;
- ctx->endp->flags |= CS_EP_RCV_MORE;
+ se_fl_set(ctx->endp, CS_EP_RCV_MORE);
TRACE_LEAVE(PT_EV_STRM_NEW, conn, endp->cs);
return 0;
struct mux_pt_ctx *pt = ctx;
TRACE_POINT(PT_EV_CONN_END, pt->conn, pt->endp->cs);
- if ((pt->endp->flags & CS_EP_ORPHAN) || pt->conn->ctx != pt) {
+ if (se_fl_test(pt->endp, CS_EP_ORPHAN) || pt->conn->ctx != pt) {
if (pt->conn->ctx != pt) {
pt->endp = NULL;
}
{
struct mux_pt_ctx *ctx = conn->ctx;
- return (!(ctx->endp->flags & CS_EP_ORPHAN) ? 1 : 0);
+ return (!se_fl_test(ctx->endp, CS_EP_ORPHAN) ? 1 : 0);
}
/* returns the number of streams still available on a connection */
TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
- if (ctx->endp->flags & CS_EP_SHR)
+ if (se_fl_test(ctx->endp, CS_EP_SHR))
return;
- ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (conn_xprt_ready(conn) && conn->xprt->shutr)
conn->xprt->shutr(conn, conn->xprt_ctx,
(mode == CO_SHR_DRAIN));
else if (mode == CO_SHR_DRAIN)
conn_ctrl_drain(conn);
- if (ctx->endp->flags & CS_EP_SHW)
+ if (se_fl_test(ctx->endp, CS_EP_SHW))
conn_full_close(conn);
TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs);
TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
- if (ctx->endp->flags & CS_EP_SHW)
+ if (se_fl_test(ctx->endp, CS_EP_SHW))
return;
if (conn_xprt_ready(conn) && conn->xprt->shutw)
conn->xprt->shutw(conn, conn->xprt_ctx,
(mode == CO_SHW_NORMAL));
- if (!(ctx->endp->flags & CS_EP_SHR))
+ if (!se_fl_test(ctx->endp, CS_EP_SHR))
conn_sock_shutw(conn, (mode == CO_SHW_NORMAL));
else
conn_full_close(conn);
TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count});
if (!count) {
- ctx->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
goto end;
}
b_realign_if_empty(buf);
ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags);
if (conn_xprt_read0_pending(conn)) {
- ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
- ctx->endp->flags |= CS_EP_EOS;
+ se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(ctx->endp, CS_EP_EOS);
TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
}
if (conn->flags & CO_FL_ERROR) {
- ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
- ctx->endp->flags |= CS_EP_ERROR;
+ se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(ctx->endp, CS_EP_ERROR);
TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
}
end:
b_del(buf, ret);
if (conn->flags & CO_FL_ERROR) {
- ctx->endp->flags |= CS_EP_ERROR;
+ se_fl_set(ctx->endp, CS_EP_ERROR);
TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
}
ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count);
if (conn_xprt_read0_pending(conn)) {
- ctx->endp->flags |= CS_EP_EOS;
+ se_fl_set(ctx->endp, CS_EP_EOS);
TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
}
if (conn->flags & CO_FL_ERROR) {
- ctx->endp->flags |= CS_EP_ERROR;
+ se_fl_set(ctx->endp, CS_EP_ERROR);
TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
}
ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe);
if (conn->flags & CO_FL_ERROR) {
- ctx->endp->flags |= CS_EP_ERROR;
+ se_fl_set(ctx->endp, CS_EP_ERROR);
TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
}
qc_stream_desc_release(qcs->stream);
- BUG_ON(qcs->endp && !(qcs->endp->flags & CS_EP_ORPHAN));
+ BUG_ON(qcs->endp && !se_fl_test(qcs->endp, CS_EP_ORPHAN));
cs_endpoint_free(qcs->endp);
eb64_delete(&qcs->by_id);
end:
if (b_data(&qcs->rx.app_buf)) {
- qcs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ se_fl_set(qcs->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
}
else {
- qcs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
- if (qcs->endp->flags & CS_EP_ERR_PENDING)
- qcs->endp->flags |= CS_EP_ERROR;
+ se_fl_clr(qcs->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
+ if (se_fl_test(qcs->endp, CS_EP_ERR_PENDING))
+ se_fl_set(qcs->endp, CS_EP_ERROR);
if (fin)
- qcs->endp->flags |= CS_EP_EOI;
+ se_fl_set(qcs->endp, CS_EP_EOI);
if (b_size(&qcs->rx.app_buf)) {
b_free(&qcs->rx.app_buf);
continue;
if (qcc->conn->flags & CO_FL_ERROR) {
- qcs->endp->flags |= CS_EP_ERR_PENDING;
- if (qcs->endp->flags & CS_EP_EOS)
- qcs->endp->flags |= CS_EP_ERROR;
+ se_fl_set(qcs->endp, CS_EP_ERR_PENDING);
+ if (se_fl_test(qcs->endp, CS_EP_EOS))
+ se_fl_set(qcs->endp, CS_EP_ERROR);
if (qcs->subs) {
qcs_notify_recv(qcs);
}
res_htx->flags |= HTX_FL_EOM;
res->flags |= CF_EOI;
- appctx->endp->flags |= CS_EP_EOI;
+ se_fl_set(appctx->endp, CS_EP_EOI);
appctx->st0 = STAT_HTTP_END;
}