From: Willy Tarreau Date: Tue, 17 May 2022 14:31:36 +0000 (+0200) Subject: CLEANUP: conn_stream: apply endp_flags.cocci tree-wide X-Git-Tag: v2.6-dev12~101 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d56377c5eb780761afdfac2284487ffb3cc49d61;p=thirdparty%2Fhaproxy.git CLEANUP: conn_stream: apply endp_flags.cocci tree-wide This changes all main uses of endp->flags to the se_fl_*() equivalent by applying coccinelle script endp_flags.cocci. The se_fl_*() functions themselves were manually excluded from the change, of course. Note: 144 locations were touched, manually reviewed and found to be OK. The script was applied with all includes: spatch --in-place --recursive-includes -I include --sp-file $script $files --- diff --git a/addons/promex/service-prometheus.c b/addons/promex/service-prometheus.c index 9501b1c09c..f277c88016 100644 --- a/addons/promex/service-prometheus.c +++ b/addons/promex/service-prometheus.c @@ -1564,7 +1564,7 @@ static void promex_appctx_handle_io(struct appctx *appctx) } res_htx->flags |= HTX_FL_EOM; res->flags |= CF_EOI; - appctx->endp->flags |= CS_EP_EOI; + se_fl_set(appctx->endp, CS_EP_EOI); appctx->st0 = PROMEX_ST_END; /* fall through */ diff --git a/include/haproxy/applet.h b/include/haproxy/applet.h index 937fdbc757..ed98270593 100644 --- a/include/haproxy/applet.h +++ b/include/haproxy/applet.h @@ -84,7 +84,7 @@ static inline void __appctx_free(struct appctx *appctx) LIST_DEL_INIT(&appctx->buffer_wait.list); if (appctx->sess) session_free(appctx->sess); - BUG_ON(appctx->endp && !(appctx->endp->flags & CS_EP_ORPHAN)); + BUG_ON(appctx->endp && !se_fl_test(appctx->endp, CS_EP_ORPHAN)); cs_endpoint_free(appctx->endp); pool_free(pool_head_appctx, appctx); _HA_ATOMIC_DEC(&nb_applets); diff --git a/include/haproxy/conn_stream.h b/include/haproxy/conn_stream.h index 8ab0327e19..0d71b07c31 100644 --- a/include/haproxy/conn_stream.h +++ b/include/haproxy/conn_stream.h @@ -273,10 +273,10 @@ static inline void cs_conn_drain_and_shut(struct conn_stream *cs) /* sets CS_EP_ERROR or CS_EP_ERR_PENDING on the endpoint */ static inline void cs_ep_set_error(struct cs_endpoint *endp) { - if (endp->flags & CS_EP_EOS) - endp->flags |= CS_EP_ERROR; + if (se_fl_test(endp, CS_EP_EOS)) + se_fl_set(endp, CS_EP_ERROR); else - endp->flags |= CS_EP_ERR_PENDING; + se_fl_set(endp, CS_EP_ERR_PENDING); } /* Retrieves any valid conn_stream from this connection, preferably the first diff --git a/include/haproxy/mux_quic.h b/include/haproxy/mux_quic.h index b3284b1745..4277a7d6f2 100644 --- a/include/haproxy/mux_quic.h +++ b/include/haproxy/mux_quic.h @@ -102,7 +102,7 @@ static inline struct conn_stream *qc_attach_cs(struct qcs *qcs, struct buffer *b qcs->endp->target = qcs; qcs->endp->conn = qcc->conn; - qcs->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN|CS_EP_NOT_FIRST); + se_fl_set(qcs->endp, CS_EP_T_MUX | CS_EP_ORPHAN | CS_EP_NOT_FIRST); /* TODO duplicated from mux_h2 */ sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake; diff --git a/src/applet.c b/src/applet.c index d5eb46a676..aff991f748 100644 --- a/src/applet.c +++ b/src/applet.c @@ -51,7 +51,7 @@ struct appctx *appctx_new(struct applet *applet, struct cs_endpoint *endp, unsig if (!endp) goto fail_endp; endp->target = appctx; - endp->flags |= (CS_EP_T_APPLET|CS_EP_ORPHAN); + se_fl_set(endp, CS_EP_T_APPLET | CS_EP_ORPHAN); } appctx->endp = endp; @@ -93,7 +93,7 @@ int appctx_finalize_startup(struct appctx *appctx, struct proxy *px, struct buff /* async startup is only possible for frontend appctx. Thus for orphan * appctx. Because no backend appctx can be orphan. */ - BUG_ON(!(appctx->endp->flags & CS_EP_ORPHAN)); + BUG_ON(!se_fl_test(appctx->endp, CS_EP_ORPHAN)); sess = session_new(px, NULL, &appctx->obj_type); if (!sess) @@ -114,7 +114,7 @@ void appctx_free_on_early_error(struct appctx *appctx) /* If a frontend apctx is attached to a conn-stream, release the stream * instead of the appctx. */ - if (!(appctx->endp->flags & CS_EP_ORPHAN) && !(appctx_cs(appctx)->flags & CS_FL_ISBACK)) { + if (!se_fl_test(appctx->endp, CS_EP_ORPHAN) && !(appctx_cs(appctx)->flags & CS_FL_ISBACK)) { stream_free(appctx_strm(appctx)); return; } @@ -145,13 +145,13 @@ void *applet_reserve_svcctx(struct appctx *appctx, size_t size) */ void appctx_shut(struct appctx *appctx) { - if (appctx->endp->flags & (CS_EP_SHR|CS_EP_SHW)) + if (se_fl_test(appctx->endp, CS_EP_SHR | CS_EP_SHW)) return; if (appctx->applet->release) appctx->applet->release(appctx); - appctx->endp->flags |= CS_EP_SHRR | CS_EP_SHWN; + se_fl_set(appctx->endp, CS_EP_SHRR | CS_EP_SHWN); } /* Callback used to wake up an applet when a buffer is available. The applet @@ -167,7 +167,7 @@ int appctx_buf_available(void *arg) struct conn_stream *cs = appctx_cs(appctx); /* allocation requested ? */ - if (!(appctx->endp->flags & CS_EP_RXBLK_BUFF)) + if (!se_fl_test(appctx->endp, CS_EP_RXBLK_BUFF)) return 0; cs_rx_buff_rdy(cs); @@ -199,7 +199,7 @@ struct task *task_run_applet(struct task *t, void *context, unsigned int state) return NULL; } - if (app->endp->flags & CS_EP_ORPHAN) { + if (se_fl_test(app->endp, CS_EP_ORPHAN)) { /* Finalize init of orphan appctx. .init callback function must * be defined and it must finalize appctx startup. */ @@ -244,8 +244,8 @@ struct task *task_run_applet(struct task *t, void *context, unsigned int state) /* measure the call rate and check for anomalies when too high */ rate = update_freq_ctr(&app->call_rate, 1); if (rate >= 100000 && app->call_rate.prev_ctr && // looped more than 100k times over last second - ((b_size(cs_ib(cs)) && app->endp->flags & CS_EP_RXBLK_BUFF) || // asks for a buffer which is present - (b_size(cs_ib(cs)) && !b_data(cs_ib(cs)) && app->endp->flags & CS_EP_RXBLK_ROOM) || // asks for room in an empty buffer + ((b_size(cs_ib(cs)) && se_fl_test(app->endp, CS_EP_RXBLK_BUFF)) || // asks for a buffer which is present + (b_size(cs_ib(cs)) && !b_data(cs_ib(cs)) && se_fl_test(app->endp, CS_EP_RXBLK_ROOM)) || // asks for room in an empty buffer (b_data(cs_ob(cs)) && cs_tx_endp_ready(cs) && !cs_tx_blocked(cs)) || // asks for data already present (!b_data(cs_ib(cs)) && b_data(cs_ob(cs)) && // didn't return anything ... (cs_oc(cs)->flags & (CF_WRITE_PARTIAL|CF_SHUTW_NOW)) == CF_SHUTW_NOW))) { // ... and left data pending after a shut diff --git a/src/cache.c b/src/cache.c index dfe8863b3b..7ec3bc578c 100644 --- a/src/cache.c +++ b/src/cache.c @@ -1527,7 +1527,7 @@ static void http_cache_io_handler(struct appctx *appctx) /* no more data are expected. */ res_htx->flags |= HTX_FL_EOM; res->flags |= CF_EOI; - appctx->endp->flags |= CS_EP_EOI; + se_fl_set(appctx->endp, CS_EP_EOI); appctx->st0 = HTX_CACHE_END; } diff --git a/src/cli.c b/src/cli.c index b533938772..a020394985 100644 --- a/src/cli.c +++ b/src/cli.c @@ -1108,7 +1108,7 @@ static void cli_io_handler(struct appctx *appctx) } break; default: /* abnormal state */ - appctx->endp->flags |= CS_EP_ERROR; + se_fl_set(appctx->endp, CS_EP_ERROR); break; } @@ -2782,7 +2782,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit) s->srv_error(s, s->csb); return 1; } - s->csb->endp->flags &= CS_EP_DETACHED; + se_fl_clr(s->csb->endp, ~CS_EP_DETACHED); } sockaddr_free(&s->csb->dst); diff --git a/src/conn_stream.c b/src/conn_stream.c index 6ff58c7ef0..bc8d3e2ae3 100644 --- a/src/conn_stream.c +++ b/src/conn_stream.c @@ -87,7 +87,7 @@ void cs_endpoint_init(struct cs_endpoint *endp) endp->target = NULL; endp->conn = NULL; endp->cs = NULL; - endp->flags = CS_EP_NONE; + se_fl_setall(endp, CS_EP_NONE); } /* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */ @@ -167,7 +167,7 @@ struct conn_stream *cs_new_from_endp(struct cs_endpoint *endp, struct session *s pool_free(pool_head_connstream, cs); cs = NULL; } - endp->flags &= ~CS_EP_ORPHAN; + se_fl_clr(endp, CS_EP_ORPHAN); return cs; } @@ -352,7 +352,7 @@ static void cs_detach_endp(struct conn_stream **csp) if (conn->mux) { if (cs->wait_event.events != 0) conn->mux->unsubscribe(cs, cs->wait_event.events, &cs->wait_event); - endp->flags |= CS_EP_ORPHAN; + se_fl_set(endp, CS_EP_ORPHAN); endp->cs = NULL; cs->endp = NULL; conn->mux->detach(endp); @@ -460,7 +460,7 @@ int cs_reset_endp(struct conn_stream *cs) sc_ep_set(cs, CS_EP_ERROR); return -1; } - new_endp->flags = sc_ep_get(cs) & CS_EP_APP_MASK; + se_fl_setall(new_endp, sc_ep_get(cs) & CS_EP_APP_MASK); /* The app is still attached, the cs will not be released */ cs_detach_endp(&cs); diff --git a/src/hlua.c b/src/hlua.c index 4b361a66b1..0b03befec3 100644 --- a/src/hlua.c +++ b/src/hlua.c @@ -9615,7 +9615,7 @@ void hlua_applet_http_fct(struct appctx *ctx) res_htx->flags |= HTX_FL_EOM; res->flags |= CF_EOI; - ctx->endp->flags |= CS_EP_EOI; + se_fl_set(ctx->endp, CS_EP_EOI); strm->txn->status = http_ctx->status; http_ctx->flags |= APPLET_RSP_SENT; } diff --git a/src/http_client.c b/src/http_client.c index 160ea1b93a..520f5f10b9 100644 --- a/src/http_client.c +++ b/src/http_client.c @@ -732,7 +732,8 @@ static void httpclient_applet_io_handler(struct appctx *appctx) /* if the request contains the HTX_FL_EOM, we finished the request part. */ if (htx->flags & HTX_FL_EOM) { req->flags |= CF_EOI; - appctx->endp->flags |= CS_EP_EOI; + se_fl_set(appctx->endp, + CS_EP_EOI); appctx->st0 = HTTPCLIENT_S_RES_STLINE; } diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index 77eb457e31..3ceec9c788 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -1007,7 +1007,7 @@ static inline void fcgi_strm_close(struct fcgi_strm *fstrm) if (!fstrm->id) fstrm->fconn->nb_reserved--; if (fstrm->endp->cs) { - if (!(fstrm->endp->flags & CS_EP_EOS) && !b_data(&fstrm->rxbuf)) + if (!se_fl_test(fstrm->endp, CS_EP_EOS) && !b_data(&fstrm->rxbuf)) fcgi_strm_notify_recv(fstrm); } fstrm->state = FCGI_SS_CLOSED; @@ -1039,7 +1039,7 @@ static void fcgi_strm_destroy(struct fcgi_strm *fstrm) */ LIST_DEL_INIT(&fstrm->send_list); tasklet_free(fstrm->shut_tl); - BUG_ON(fstrm->endp && !(fstrm->endp->flags & CS_EP_ORPHAN)); + BUG_ON(fstrm->endp && !se_fl_test(fstrm->endp, CS_EP_ORPHAN)); cs_endpoint_free(fstrm->endp); pool_free(pool_head_fcgi_strm, fstrm); @@ -1172,9 +1172,9 @@ static void fcgi_strm_wake_one_stream(struct fcgi_strm *fstrm) } if ((fconn->state == FCGI_CS_CLOSED || fconn->conn->flags & CO_FL_ERROR)) { - fstrm->endp->flags |= CS_EP_ERR_PENDING; - if (fstrm->endp->flags & CS_EP_EOS) - fstrm->endp->flags |= CS_EP_ERROR; + se_fl_set(fstrm->endp, CS_EP_ERR_PENDING); + if (se_fl_test(fstrm->endp, CS_EP_EOS)) + se_fl_set(fstrm->endp, CS_EP_ERROR); if (fstrm->state < FCGI_SS_ERROR) { fstrm->state = FCGI_SS_ERROR; @@ -2622,10 +2622,10 @@ static void fcgi_process_demux(struct fcgi_conn *fconn) fcgi_conn_read0_pending(fconn) || fstrm->state == FCGI_SS_CLOSED || (fstrm->flags & FCGI_SF_ES_RCVD) || - (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) { + se_fl_test(fstrm->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) { /* we may have to signal the upper layers */ TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm); - fstrm->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(fstrm->endp, CS_EP_RCV_MORE); fcgi_strm_notify_recv(fstrm); } fstrm = tmp_fstrm; @@ -2703,10 +2703,10 @@ static void fcgi_process_demux(struct fcgi_conn *fconn) fcgi_conn_read0_pending(fconn) || fstrm->state == FCGI_SS_CLOSED || (fstrm->flags & FCGI_SF_ES_RCVD) || - (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) { + se_fl_test(fstrm->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) { /* we may have to signal the upper layers */ TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm); - fstrm->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(fstrm->endp, CS_EP_RCV_MORE); fcgi_strm_notify_recv(fstrm); } @@ -3118,7 +3118,7 @@ static int fcgi_process(struct fcgi_conn *fconn) while (node) { fstrm = container_of(node, struct fcgi_strm, by_id); - if (fstrm->endp->cs && fstrm->endp->flags & CS_EP_WAIT_FOR_HS) + if (fstrm->endp->cs && se_fl_test(fstrm->endp, CS_EP_WAIT_FOR_HS)) fcgi_strm_notify_recv(fstrm); node = eb32_next(node); } @@ -3722,7 +3722,7 @@ static void fcgi_do_shutr(struct fcgi_strm *fstrm) * for example because of a "tcp-request content reject" rule that is * normally used to limit abuse. */ - if ((fstrm->endp->flags & CS_EP_KILL_CONN) && + if (se_fl_test(fstrm->endp, CS_EP_KILL_CONN) && !(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) { TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm); fconn->state = FCGI_CS_CLOSED; @@ -3783,7 +3783,7 @@ static void fcgi_do_shutw(struct fcgi_strm *fstrm) * for example because of a "tcp-request content reject" rule that is * normally used to limit abuse. */ - if ((fstrm->endp->flags & CS_EP_KILL_CONN) && + if (se_fl_test(fstrm->endp, CS_EP_KILL_CONN) && !(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) { TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm); fconn->state = FCGI_CS_CLOSED; @@ -3953,18 +3953,18 @@ static size_t fcgi_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t co TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm); if (b_data(&fstrm->rxbuf)) - fstrm->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(fstrm->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); else { - fstrm->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_clr(fstrm->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) { - fstrm->endp->flags |= CS_EP_EOI; + se_fl_set(fstrm->endp, CS_EP_EOI); if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN))) - fstrm->endp->flags |= CS_EP_EOS; + se_fl_set(fstrm->endp, CS_EP_EOS); } if (fcgi_conn_read0_pending(fconn)) - fstrm->endp->flags |= CS_EP_EOS; - if (fstrm->endp->flags & CS_EP_ERR_PENDING) - fstrm->endp->flags |= CS_EP_ERROR; + se_fl_set(fstrm->endp, CS_EP_EOS); + if (se_fl_test(fstrm->endp, CS_EP_ERR_PENDING)) + se_fl_set(fstrm->endp, CS_EP_ERROR); fcgi_release_buf(fconn, &fstrm->rxbuf); } @@ -4017,7 +4017,7 @@ static size_t fcgi_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t co if (id < 0) { fcgi_strm_close(fstrm); - fstrm->endp->flags |= CS_EP_ERROR; + se_fl_set(fstrm->endp, CS_EP_ERROR); TRACE_DEVEL("couldn't get a stream ID, leaving in error", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm); return 0; } @@ -4187,8 +4187,8 @@ static int fcgi_show_fd(struct buffer *msg, struct connection *conn) (unsigned int)b_head_ofs(&fstrm->rxbuf), (unsigned int)b_size(&fstrm->rxbuf), fstrm->endp->cs); if (fstrm->endp) { - chunk_appendf(msg, " .endp.flg=0x%08x", fstrm->endp->flags); - if (!(fstrm->endp->flags & CS_EP_ORPHAN)) + chunk_appendf(msg, " .endp.flg=0x%08x", se_fl_get(fstrm->endp)); + if (!se_fl_test(fstrm->endp, CS_EP_ORPHAN)) chunk_appendf(msg, " .cs.flg=0x%08x .cs.app=%p", fstrm->endp->cs->flags, fstrm->endp->cs->app); } diff --git a/src/mux_h1.c b/src/mux_h1.c index 14e399fe1f..91aee7fd4b 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -428,7 +428,7 @@ static void h1_trace(enum trace_level level, uint64_t mask, const struct trace_s if (h1s) { chunk_appendf(&trace_buf, " h1s=%p(0x%08x)", h1s, h1s->flags); if (h1s->endp) - chunk_appendf(&trace_buf, " endp=%p(0x%08x)", h1s->endp, h1s->endp->flags); + chunk_appendf(&trace_buf, " endp=%p(0x%08x)", h1s->endp, se_fl_get(h1s->endp)); if (h1s->endp && h1s->endp->cs) chunk_appendf(&trace_buf, " cs=%p(0x%08x)", h1s->endp->cs, h1s->endp->cs->flags); } @@ -722,9 +722,9 @@ static struct conn_stream *h1s_new_cs(struct h1s *h1s, struct buffer *input) TRACE_ENTER(H1_EV_STRM_NEW, h1c->conn, h1s); if (h1s->flags & H1S_F_NOT_FIRST) - h1s->endp->flags |= CS_EP_NOT_FIRST; + se_fl_set(h1s->endp, CS_EP_NOT_FIRST); if (h1s->req.flags & H1_MF_UPG_WEBSOCKET) - h1s->endp->flags |= CS_EP_WEBSOCKET; + se_fl_set(h1s->endp, CS_EP_WEBSOCKET); if (!cs_new_from_endp(h1s->endp, h1c->conn->owner, input)) { TRACE_ERROR("CS allocation failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, h1c->conn, h1s); @@ -823,7 +823,7 @@ static struct h1s *h1c_frt_stream_new(struct h1c *h1c, struct conn_stream *cs, s goto fail; h1s->endp->target = h1s; h1s->endp->conn = h1c->conn; - h1s->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN); + se_fl_set(h1s->endp, CS_EP_T_MUX | CS_EP_ORPHAN); } h1s->sess = sess; @@ -911,7 +911,7 @@ static void h1s_destroy(struct h1s *h1s) } HA_ATOMIC_DEC(&h1c->px_counters->open_streams); - BUG_ON(h1s->endp && !(h1s->endp->flags & CS_EP_ORPHAN)); + BUG_ON(h1s->endp && !se_fl_test(h1s->endp, CS_EP_ORPHAN)); cs_endpoint_free(h1s->endp); pool_free(pool_head_h1s, h1s); } @@ -1906,11 +1906,11 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count /* Here h1s->endp->cs is always defined */ if (!(h1m->flags & H1_MF_CHNK) && (h1m->state == H1_MSG_DATA || (h1m->state == H1_MSG_TUNNEL))) { TRACE_STATE("notify the mux can use splicing", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s); - h1s->endp->flags |= CS_EP_MAY_SPLICE; + se_fl_set(h1s->endp, CS_EP_MAY_SPLICE); } else { TRACE_STATE("notify the mux can't use splicing anymore", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s); - h1s->endp->flags &= ~CS_EP_MAY_SPLICE; + se_fl_clr(h1s->endp, CS_EP_MAY_SPLICE); } /* Set EOI on conn-stream in DONE state iff: @@ -1922,7 +1922,7 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count */ if (((h1m->state == H1_MSG_DONE) && (h1m->flags & H1_MF_RESP)) || ((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG))) - h1s->endp->flags |= CS_EP_EOI; + se_fl_set(h1s->endp, CS_EP_EOI); out: /* When Input data are pending for this message, notify upper layer that @@ -1932,20 +1932,20 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count * - Headers or trailers are pending to be copied. */ if (h1s->flags & (H1S_F_RX_CONGESTED)) { - h1s->endp->flags |= CS_EP_RCV_MORE | CS_EP_WANT_ROOM; + se_fl_set(h1s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); TRACE_STATE("waiting for more room", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s); } else { - h1s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_clr(h1s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); if (h1s->flags & H1S_F_REOS) { - h1s->endp->flags |= CS_EP_EOS; + se_fl_set(h1s->endp, CS_EP_EOS); if (h1m->state >= H1_MSG_DONE || !(h1m->flags & H1_MF_XFER_LEN)) { /* DONE or TUNNEL or SHUTR without XFER_LEN, set * EOI on the conn-stream */ - h1s->endp->flags |= CS_EP_EOI; + se_fl_set(h1s->endp, CS_EP_EOI); } else if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE) { - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_ERROR("message aborted, set error on CS", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s); } @@ -1963,7 +1963,7 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count err: htx_to_buf(htx, buf); - h1s->endp->flags |= CS_EP_EOI; + se_fl_set(h1s->endp, CS_EP_EOI); TRACE_DEVEL("leaving on error", H1_EV_RX_DATA|H1_EV_STRM_ERR, h1c->conn, h1s); return 0; } @@ -2572,7 +2572,7 @@ static size_t h1_process_mux(struct h1c *h1c, struct buffer *buf, size_t count) h1c->flags |= H1C_F_ST_ERROR; TRACE_ERROR("txn done but data waiting to be sent, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s); } - h1s->endp->flags |= CS_EP_EOI; + se_fl_set(h1s->endp, CS_EP_EOI); } TRACE_LEAVE(H1_EV_TX_DATA, h1c->conn, h1s, chn_htx, (size_t[]){total}); @@ -3053,7 +3053,7 @@ static int h1_process(struct h1c * h1c) TRACE_STATE("read0 on connection", H1_EV_H1C_RECV, conn, h1s); } if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) && !b_data(&h1c->ibuf))) - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s); h1_alert(h1s); } @@ -3109,9 +3109,9 @@ static int h1_process(struct h1c * h1c) BUG_ON(!h1s || h1c->flags & H1C_F_ST_READY); if (conn_xprt_read0_pending(conn) || (h1s->flags & H1S_F_REOS)) - h1s->endp->flags |= CS_EP_EOS; + se_fl_set(h1s->endp, CS_EP_EOS); if ((h1c->flags & H1C_F_ST_ERROR) || (conn->flags & CO_FL_ERROR)) - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); h1_alert(h1s); TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE); } @@ -3263,7 +3263,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned int state) if (h1c->flags & H1C_F_ST_ATTACHED) { /* Don't release the H1 connection right now, we must destroy the * attached CS first. Here, the H1C must not be READY */ - h1c->h1s->endp->flags |= (CS_EP_EOS|CS_EP_ERROR); + se_fl_set(h1c->h1s->endp, CS_EP_EOS | CS_EP_ERROR); h1_alert(h1c->h1s); h1_refresh_timeout(h1c); HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock); @@ -3475,9 +3475,9 @@ static void h1_shutr(struct conn_stream *cs, enum co_shr_mode mode) TRACE_ENTER(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode}); - if (h1s->endp->flags & CS_EP_SHR) + if (se_fl_test(h1s->endp, CS_EP_SHR)) goto end; - if (h1s->endp->flags & CS_EP_KILL_CONN) { + if (se_fl_test(h1s->endp, CS_EP_KILL_CONN)) { TRACE_STATE("stream wants to kill the connection", H1_EV_STRM_SHUT, h1c->conn, h1s); goto do_shutr; } @@ -3498,7 +3498,7 @@ static void h1_shutr(struct conn_stream *cs, enum co_shr_mode mode) do_shutr: /* NOTE: Be sure to handle abort (cf. h2_shutr) */ - if (h1s->endp->flags & CS_EP_SHR) + if (se_fl_test(h1s->endp, CS_EP_SHR)) goto end; if (conn_xprt_ready(h1c->conn) && h1c->conn->xprt->shutr) @@ -3518,9 +3518,9 @@ static void h1_shutw(struct conn_stream *cs, enum co_shw_mode mode) TRACE_ENTER(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode}); - if (h1s->endp->flags & CS_EP_SHW) + if (se_fl_test(h1s->endp, CS_EP_SHW)) goto end; - if (h1s->endp->flags & CS_EP_KILL_CONN) { + if (se_fl_test(h1s->endp, CS_EP_KILL_CONN)) { TRACE_STATE("stream wants to kill the connection", H1_EV_STRM_SHUT, h1c->conn, h1s); goto do_shutw; } @@ -3664,7 +3664,7 @@ static size_t h1_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t coun else TRACE_DEVEL("h1c ibuf not allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn); - if ((flags & CO_RFL_BUF_FLUSH) && (h1s->endp->flags & CS_EP_MAY_SPLICE)) { + if ((flags & CO_RFL_BUF_FLUSH) && se_fl_test(h1s->endp, CS_EP_MAY_SPLICE)) { h1c->flags |= H1C_F_WANT_SPLICE; TRACE_STATE("Block xprt rcv_buf to flush stream's buffer (want_splice)", H1_EV_STRM_RECV, h1c->conn, h1s); } @@ -3702,7 +3702,7 @@ static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun } if (h1c->flags & H1C_F_ST_ERROR) { - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_ERROR("H1C on error, leaving in error", H1_EV_STRM_SEND|H1_EV_H1C_ERR|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s); return 0; } @@ -3734,7 +3734,7 @@ static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun } if (h1c->flags & H1C_F_ST_ERROR) { - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s); } @@ -3779,7 +3779,7 @@ static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int c if (ret > h1m->curr_len) { h1s->flags |= H1S_F_PARSING_ERROR; h1c->flags |= H1C_F_ST_ERROR; - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_ERROR("too much payload, more than announced", H1_EV_RX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s); goto end; @@ -3804,7 +3804,7 @@ static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int c if (!(h1c->flags & H1C_F_WANT_SPLICE)) { TRACE_STATE("notify the mux can't use splicing anymore", H1_EV_STRM_RECV, h1c->conn, h1s); - h1s->endp->flags &= ~CS_EP_MAY_SPLICE; + se_fl_clr(h1s->endp, CS_EP_MAY_SPLICE); if (!(h1c->wait_event.events & SUB_RETRY_RECV)) { TRACE_STATE("restart receiving data, subscribing", H1_EV_STRM_RECV, h1c->conn, h1s); h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event); @@ -3837,7 +3837,7 @@ static int h1_snd_pipe(struct conn_stream *cs, struct pipe *pipe) if (ret > h1m->curr_len) { h1s->flags |= H1S_F_PROCESSING_ERROR; h1c->flags |= H1C_F_ST_ERROR; - h1s->endp->flags |= CS_EP_ERROR; + se_fl_set(h1s->endp, CS_EP_ERROR); TRACE_ERROR("too much payload, more than announced", H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s); goto end; @@ -3904,12 +3904,13 @@ static int h1_show_fd(struct buffer *msg, struct connection *conn) method = "UNKNOWN"; chunk_appendf(msg, " h1s=%p h1s.flg=0x%x .endp.flg=0x%x .req.state=%s .res.state=%s" " .meth=%s status=%d", - h1s, h1s->flags, h1s->endp->flags, + h1s, h1s->flags, se_fl_get(h1s->endp), h1m_state_str(h1s->req.state), h1m_state_str(h1s->res.state), method, h1s->status); if (h1s->endp) { - chunk_appendf(msg, " .endp.flg=0x%08x", h1s->endp->flags); - if (!(h1s->endp->flags & CS_EP_ORPHAN)) + chunk_appendf(msg, " .endp.flg=0x%08x", + se_fl_get(h1s->endp)); + if (!se_fl_test(h1s->endp, CS_EP_ORPHAN)) chunk_appendf(msg, " .cs.flg=0x%08x .cs.app=%p", h1s->endp->cs->flags, h1s->endp->cs->app); } diff --git a/src/mux_h2.c b/src/mux_h2.c index cd57d98f65..25969c90c7 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -1485,7 +1485,7 @@ static inline void h2s_close(struct h2s *h2s) if (!h2s->id) h2s->h2c->nb_reserved--; if (h2s->endp->cs) { - if (!(h2s->endp->flags & CS_EP_EOS) && !b_data(&h2s->rxbuf)) + if (!se_fl_test(h2s->endp, CS_EP_EOS) && !b_data(&h2s->rxbuf)) h2s_notify_recv(h2s); } HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams); @@ -1524,7 +1524,7 @@ static void h2s_destroy(struct h2s *h2s) /* ditto, calling tasklet_free() here should be ok */ tasklet_free(h2s->shut_tl); - BUG_ON(h2s->endp && !(h2s->endp->flags & CS_EP_ORPHAN)); + BUG_ON(h2s->endp && !se_fl_test(h2s->endp, CS_EP_ORPHAN)); cs_endpoint_free(h2s->endp); pool_free(pool_head_h2s, h2s); @@ -1614,13 +1614,13 @@ static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *in goto out_close; h2s->endp->target = h2s; h2s->endp->conn = h2c->conn; - h2s->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN|CS_EP_NOT_FIRST); + se_fl_set(h2s->endp, CS_EP_T_MUX | CS_EP_ORPHAN | CS_EP_NOT_FIRST); /* FIXME wrong analogy between ext-connect and websocket, this need to * be refine. */ if (flags & H2_SF_EXT_CONNECT_RCVD) - h2s->endp->flags |= CS_EP_WEBSOCKET; + se_fl_set(h2s->endp, CS_EP_WEBSOCKET); /* The stream will record the request's accept date (which is either the * end of the connection's or the date immediately after the previous @@ -2212,9 +2212,9 @@ static void h2s_wake_one_stream(struct h2s *h2s) if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) || (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) { - h2s->endp->flags |= CS_EP_ERR_PENDING; - if (h2s->endp->flags & CS_EP_EOS) - h2s->endp->flags |= CS_EP_ERROR; + se_fl_set(h2s->endp, CS_EP_ERR_PENDING); + if (se_fl_test(h2s->endp, CS_EP_EOS)) + se_fl_set(h2s->endp, CS_EP_ERROR); if (h2s->st < H2_SS_ERROR) h2s->st = H2_SS_ERROR; @@ -2978,7 +2978,7 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s) if (h2c->dff & H2_F_HEADERS_END_STREAM) h2s->flags |= H2_SF_ES_RCVD; - if (h2s->endp->flags & CS_EP_ERROR && h2s->st < H2_SS_ERROR) + if (se_fl_test(h2s->endp, CS_EP_ERROR) && h2s->st < H2_SS_ERROR) h2s->st = H2_SS_ERROR; else if (h2s->flags & H2_SF_ES_RCVD) { if (h2s->st == H2_SS_OPEN) @@ -3479,10 +3479,10 @@ static void h2_process_demux(struct h2c *h2c) h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED || (h2s->flags & H2_SF_ES_RCVD) || - (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) { + se_fl_test(h2s->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) { /* we may have to signal the upper layers */ TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s); - h2s->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(h2s->endp, CS_EP_RCV_MORE); h2s_notify_recv(h2s); } h2s = tmp_h2s; @@ -3650,10 +3650,10 @@ static void h2_process_demux(struct h2c *h2c) h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED || (h2s->flags & H2_SF_ES_RCVD) || - (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) { + se_fl_test(h2s->endp, CS_EP_ERROR | CS_EP_ERR_PENDING | CS_EP_EOS))) { /* we may have to signal the upper layers */ TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s); - h2s->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(h2s->endp, CS_EP_RCV_MORE); h2s_notify_recv(h2s); } @@ -4102,7 +4102,7 @@ static int h2_process(struct h2c *h2c) while (node) { h2s = container_of(node, struct h2s, by_id); - if (h2s->endp->flags & CS_EP_WAIT_FOR_HS) + if (se_fl_test(h2s->endp, CS_EP_WAIT_FOR_HS)) h2s_notify_recv(h2s); node = eb32_next(node); } @@ -4520,7 +4520,7 @@ static void h2_do_shutr(struct h2s *h2s) * normally used to limit abuse. In this case we schedule a goaway to * close the connection. */ - if ((h2s->endp->flags & CS_EP_KILL_CONN) && + if (se_fl_test(h2s->endp, CS_EP_KILL_CONN) && !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) { TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s); h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM); @@ -4598,7 +4598,7 @@ static void h2_do_shutw(struct h2s *h2s) * normally used to limit abuse. In this case we schedule a goaway to * close the connection. */ - if ((h2s->endp->flags & CS_EP_KILL_CONN) && + if (se_fl_test(h2s->endp, CS_EP_KILL_CONN) && !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) { TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s); h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM); @@ -5338,7 +5338,7 @@ static size_t h2s_frt_make_resp_headers(struct h2s *h2s, struct htx *htx) break; } - if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) { + if (!h2s->endp->cs || se_fl_test(h2s->endp, CS_EP_SHW)) { /* Response already closed: add END_STREAM */ es_now = 1; } @@ -5758,7 +5758,7 @@ static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx) break; } - if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) { + if (!h2s->endp->cs || se_fl_test(h2s->endp, CS_EP_SHW)) { /* Request already closed: add END_STREAM */ es_now = 1; } @@ -6482,7 +6482,7 @@ static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t coun if (h2s_htx->flags & HTX_FL_PARSING_ERROR) { buf_htx->flags |= HTX_FL_PARSING_ERROR; if (htx_is_empty(buf_htx)) - h2s->endp->flags |= CS_EP_EOI; + se_fl_set(h2s->endp, CS_EP_EOI); } else if (htx_is_empty(h2s_htx)) buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM); @@ -6494,19 +6494,19 @@ static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t coun end: if (b_data(&h2s->rxbuf)) - h2s->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(h2s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); else { - h2s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_clr(h2s->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); if (h2s->flags & H2_SF_ES_RCVD) { - h2s->endp->flags |= CS_EP_EOI; + se_fl_set(h2s->endp, CS_EP_EOI); /* Add EOS flag for tunnel */ if (h2s->flags & H2_SF_BODY_TUNNEL) - h2s->endp->flags |= CS_EP_EOS; + se_fl_set(h2s->endp, CS_EP_EOS); } if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED) - h2s->endp->flags |= CS_EP_EOS; - if (h2s->endp->flags & CS_EP_ERR_PENDING) - h2s->endp->flags |= CS_EP_ERROR; + se_fl_set(h2s->endp, CS_EP_EOS); + if (se_fl_test(h2s->endp, CS_EP_ERR_PENDING)) + se_fl_set(h2s->endp, CS_EP_ERROR); if (b_size(&h2s->rxbuf)) { b_free(&h2s->rxbuf); offer_buffers(NULL, 1); @@ -6558,7 +6558,7 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun } if (h2s->h2c->st0 >= H2_CS_ERROR) { - h2s->endp->flags |= CS_EP_ERROR; + se_fl_set(h2s->endp, CS_EP_ERROR); TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); return 0; } @@ -6572,7 +6572,7 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun int32_t id = h2c_get_next_sid(h2s->h2c); if (id < 0) { - h2s->endp->flags |= CS_EP_ERROR; + se_fl_set(h2s->endp, CS_EP_ERROR); TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); return 0; } @@ -6684,10 +6684,10 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun !b_data(&h2s->h2c->dbuf) && (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) { TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); - if (h2s->endp->flags & CS_EP_EOS) - h2s->endp->flags |= CS_EP_ERROR; + if (se_fl_test(h2s->endp, CS_EP_EOS)) + se_fl_set(h2s->endp, CS_EP_ERROR); else - h2s->endp->flags |= CS_EP_ERR_PENDING; + se_fl_set(h2s->endp, CS_EP_ERR_PENDING); } if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) && @@ -6763,7 +6763,7 @@ static int h2_show_fd(struct buffer *msg, struct connection *conn) chunk_appendf(msg, "endp=%p", h2s->endp); if (h2s->endp) chunk_appendf(msg, "(.flg=0x%08x)", - h2s->endp->flags); + se_fl_get(h2s->endp)); chunk_appendf(&trash, " .subs=%p", h2s->subs); if (h2s->subs) { diff --git a/src/mux_pt.c b/src/mux_pt.c index 338e4fe343..d57e82e372 100644 --- a/src/mux_pt.c +++ b/src/mux_pt.c @@ -154,7 +154,7 @@ static void pt_trace(enum trace_level level, uint64_t mask, const struct trace_s /* Display conn and cs info, if defined (pointer + flags) */ chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn->flags); - chunk_appendf(&trace_buf, " endp=%p(0x%08x)", ctx->endp, ctx->endp->flags); + chunk_appendf(&trace_buf, " endp=%p(0x%08x)", ctx->endp, se_fl_get(ctx->endp)); if (cs) chunk_appendf(&trace_buf, " cs=%p(0x%08x)", cs, cs->flags); @@ -208,7 +208,7 @@ static void mux_pt_destroy(struct mux_pt_ctx *ctx) if (conn && ctx->wait_event.events != 0) conn->xprt->unsubscribe(conn, conn->xprt_ctx, ctx->wait_event.events, &ctx->wait_event); - BUG_ON(ctx->endp && !(ctx->endp->flags & CS_EP_ORPHAN)); + BUG_ON(ctx->endp && !se_fl_test(ctx->endp, CS_EP_ORPHAN)); cs_endpoint_free(ctx->endp); pool_free(pool_head_pt_ctx, ctx); @@ -233,7 +233,7 @@ struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status) struct mux_pt_ctx *ctx = tctx; TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn); - if (!(ctx->endp->flags & CS_EP_ORPHAN)) { + if (!se_fl_test(ctx->endp, CS_EP_ORPHAN)) { /* There's a small race condition. * mux_pt_io_cb() is only supposed to be called if we have no * stream attached. However, maybe the tasklet got woken up, @@ -300,7 +300,7 @@ static int mux_pt_init(struct connection *conn, struct proxy *prx, struct sessio } ctx->endp->target = ctx; ctx->endp->conn = conn; - ctx->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN); + se_fl_set(ctx->endp, CS_EP_T_MUX | CS_EP_ORPHAN); cs = cs_new_from_endp(ctx->endp, sess, input); if (!cs) { @@ -315,9 +315,9 @@ static int mux_pt_init(struct connection *conn, struct proxy *prx, struct sessio ctx->endp = cs->endp; } conn->ctx = ctx; - ctx->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(ctx->endp, CS_EP_RCV_MORE); if (global.tune.options & GTUNE_USE_SPLICE) - ctx->endp->flags |= CS_EP_MAY_SPLICE; + se_fl_set(ctx->endp, CS_EP_MAY_SPLICE); TRACE_LEAVE(PT_EV_CONN_NEW, conn); return 0; @@ -342,7 +342,7 @@ static int mux_pt_wake(struct connection *conn) int ret = 0; TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn); - if (!(ctx->endp->flags & CS_EP_ORPHAN)) { + if (!se_fl_test(ctx->endp, CS_EP_ORPHAN)) { ret = ctx->endp->cs->data_cb->wake ? ctx->endp->cs->data_cb->wake(ctx->endp->cs) : 0; if (ret < 0) { @@ -383,7 +383,7 @@ static int mux_pt_attach(struct connection *conn, struct cs_endpoint *endp, stru if (cs_attach_mux(endp->cs, ctx, conn) < 0) return -1; ctx->endp = endp; - ctx->endp->flags |= CS_EP_RCV_MORE; + se_fl_set(ctx->endp, CS_EP_RCV_MORE); TRACE_LEAVE(PT_EV_STRM_NEW, conn, endp->cs); return 0; @@ -406,7 +406,7 @@ static void mux_pt_destroy_meth(void *ctx) struct mux_pt_ctx *pt = ctx; TRACE_POINT(PT_EV_CONN_END, pt->conn, pt->endp->cs); - if ((pt->endp->flags & CS_EP_ORPHAN) || pt->conn->ctx != pt) { + if (se_fl_test(pt->endp, CS_EP_ORPHAN) || pt->conn->ctx != pt) { if (pt->conn->ctx != pt) { pt->endp = NULL; } @@ -444,7 +444,7 @@ static int mux_pt_used_streams(struct connection *conn) { struct mux_pt_ctx *ctx = conn->ctx; - return (!(ctx->endp->flags & CS_EP_ORPHAN) ? 1 : 0); + return (!se_fl_test(ctx->endp, CS_EP_ORPHAN) ? 1 : 0); } /* returns the number of streams still available on a connection */ @@ -460,15 +460,15 @@ static void mux_pt_shutr(struct conn_stream *cs, enum co_shr_mode mode) TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs); - if (ctx->endp->flags & CS_EP_SHR) + if (se_fl_test(ctx->endp, CS_EP_SHR)) return; - ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); if (conn_xprt_ready(conn) && conn->xprt->shutr) conn->xprt->shutr(conn, conn->xprt_ctx, (mode == CO_SHR_DRAIN)); else if (mode == CO_SHR_DRAIN) conn_ctrl_drain(conn); - if (ctx->endp->flags & CS_EP_SHW) + if (se_fl_test(ctx->endp, CS_EP_SHW)) conn_full_close(conn); TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs); @@ -481,12 +481,12 @@ static void mux_pt_shutw(struct conn_stream *cs, enum co_shw_mode mode) TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs); - if (ctx->endp->flags & CS_EP_SHW) + if (se_fl_test(ctx->endp, CS_EP_SHW)) return; if (conn_xprt_ready(conn) && conn->xprt->shutw) conn->xprt->shutw(conn, conn->xprt_ctx, (mode == CO_SHW_NORMAL)); - if (!(ctx->endp->flags & CS_EP_SHR)) + if (!se_fl_test(ctx->endp, CS_EP_SHR)) conn_sock_shutw(conn, (mode == CO_SHW_NORMAL)); else conn_full_close(conn); @@ -516,19 +516,19 @@ static size_t mux_pt_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count}); if (!count) { - ctx->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); goto end; } b_realign_if_empty(buf); ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags); if (conn_xprt_read0_pending(conn)) { - ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); - ctx->endp->flags |= CS_EP_EOS; + se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(ctx->endp, CS_EP_EOS); TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs); } if (conn->flags & CO_FL_ERROR) { - ctx->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); - ctx->endp->flags |= CS_EP_ERROR; + se_fl_clr(ctx->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(ctx->endp, CS_EP_ERROR); TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs); } end: @@ -551,7 +551,7 @@ static size_t mux_pt_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t b_del(buf, ret); if (conn->flags & CO_FL_ERROR) { - ctx->endp->flags |= CS_EP_ERROR; + se_fl_set(ctx->endp, CS_EP_ERROR); TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs); } @@ -596,11 +596,11 @@ static int mux_pt_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned i ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count); if (conn_xprt_read0_pending(conn)) { - ctx->endp->flags |= CS_EP_EOS; + se_fl_set(ctx->endp, CS_EP_EOS); TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs); } if (conn->flags & CO_FL_ERROR) { - ctx->endp->flags |= CS_EP_ERROR; + se_fl_set(ctx->endp, CS_EP_ERROR); TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs); } @@ -619,7 +619,7 @@ static int mux_pt_snd_pipe(struct conn_stream *cs, struct pipe *pipe) ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe); if (conn->flags & CO_FL_ERROR) { - ctx->endp->flags |= CS_EP_ERROR; + se_fl_set(ctx->endp, CS_EP_ERROR); TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs); } diff --git a/src/mux_quic.c b/src/mux_quic.c index b7d1e17f43..bcb2a81c04 100644 --- a/src/mux_quic.c +++ b/src/mux_quic.c @@ -213,7 +213,7 @@ void qcs_free(struct qcs *qcs) qc_stream_desc_release(qcs->stream); - BUG_ON(qcs->endp && !(qcs->endp->flags & CS_EP_ORPHAN)); + BUG_ON(qcs->endp && !se_fl_test(qcs->endp, CS_EP_ORPHAN)); cs_endpoint_free(qcs->endp); eb64_delete(&qcs->by_id); @@ -1499,15 +1499,15 @@ static size_t qc_rcv_buf(struct conn_stream *cs, struct buffer *buf, end: if (b_data(&qcs->rx.app_buf)) { - qcs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + se_fl_set(qcs->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); } else { - qcs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM); - if (qcs->endp->flags & CS_EP_ERR_PENDING) - qcs->endp->flags |= CS_EP_ERROR; + se_fl_clr(qcs->endp, CS_EP_RCV_MORE | CS_EP_WANT_ROOM); + if (se_fl_test(qcs->endp, CS_EP_ERR_PENDING)) + se_fl_set(qcs->endp, CS_EP_ERROR); if (fin) - qcs->endp->flags |= CS_EP_EOI; + se_fl_set(qcs->endp, CS_EP_EOI); if (b_size(&qcs->rx.app_buf)) { b_free(&qcs->rx.app_buf); @@ -1586,9 +1586,9 @@ static int qc_wake_some_streams(struct qcc *qcc) continue; if (qcc->conn->flags & CO_FL_ERROR) { - qcs->endp->flags |= CS_EP_ERR_PENDING; - if (qcs->endp->flags & CS_EP_EOS) - qcs->endp->flags |= CS_EP_ERROR; + se_fl_set(qcs->endp, CS_EP_ERR_PENDING); + if (se_fl_test(qcs->endp, CS_EP_EOS)) + se_fl_set(qcs->endp, CS_EP_ERROR); if (qcs->subs) { qcs_notify_recv(qcs); diff --git a/src/stats.c b/src/stats.c index 49d6ef0ed1..012e84c6ac 100644 --- a/src/stats.c +++ b/src/stats.c @@ -4349,7 +4349,7 @@ static void http_stats_io_handler(struct appctx *appctx) } res_htx->flags |= HTX_FL_EOM; res->flags |= CF_EOI; - appctx->endp->flags |= CS_EP_EOI; + se_fl_set(appctx->endp, CS_EP_EOI); appctx->st0 = STAT_HTTP_END; }