From: Willy Tarreau Date: Tue, 16 Apr 2024 06:55:20 +0000 (+0200) Subject: MINOR: dynbuf: pass a criticality argument to b_alloc() X-Git-Tag: v3.0-dev11~24 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=72d0dcda8e80b9a7d76a0468fa0a751ded20a7cb;p=thirdparty%2Fhaproxy.git MINOR: dynbuf: pass a criticality argument to b_alloc() The goal is to indicate how critical the allocation is, between the least one (growing an existing buffer ring) and the topmost one (boot time allocation for the life of the process). The 3 tcp-based muxes (h1, h2, fcgi) use a common allocation function to try to allocate otherwise subscribe. There's currently no distinction of direction nor part that tries to allocate, and this should be revisited to improve this situation, particularly when we consider that mux-h2 can reduce its Tx allocations if needed. For now, 4 main levels are planned, to translate how the data travels inside haproxy from a producer to a consumer: - MUX_RX: buffer used to receive data from the OS - SE_RX: buffer used to place a transformation of the RX data for a mux, or to produce a response for an applet - CHANNEL: the channel buffer for sync recv - MUX_TX: buffer used to transfer data from the channel to the outside, generally a mux but there can be a few specificities (e.g. http client's response buffer passed to the application, which also gets a transformation of the channel data). The other levels are a bit different in that they don't strictly need to allocate for the first two ones, or they're permanent for the last one (used by compression). --- diff --git a/doc/internals/api/buffer-api.txt b/doc/internals/api/buffer-api.txt index ac353004e5..1e09ff971d 100644 --- a/doc/internals/api/buffer-api.txt +++ b/doc/internals/api/buffer-api.txt @@ -548,11 +548,15 @@ buffer_almost_full | const buffer *buf| returns true if the buffer is not null | | are used. A waiting buffer will match. --------------------+------------------+--------------------------------------- b_alloc | buffer *buf | ensures that is allocated or - | ret: buffer * | allocates a buffer and assigns it to - | | *buf. If no memory is available, (1) - | | is assigned instead with a zero size. + | enum dynbuf_crit | allocates a buffer and assigns it to + | criticality | *buf. If no memory is available, (1) + | ret: buffer * | is assigned instead with a zero size. | | The allocated buffer is returned, or - | | NULL in case no memory is available + | | NULL in case no memory is available. + | | The criticality indicates the how the + | | buffer might be used and how likely it + | | is that the allocated memory will be + | | quickly released. --------------------+------------------+--------------------------------------- __b_free | buffer *buf | releases which must be allocated | ret: void | and marks it empty diff --git a/include/haproxy/applet.h b/include/haproxy/applet.h index 705a8837e8..baa6402848 100644 --- a/include/haproxy/applet.h +++ b/include/haproxy/applet.h @@ -91,7 +91,7 @@ static inline struct buffer *appctx_get_buf(struct appctx *appctx, struct buffer struct buffer *buf = NULL; if (likely(!LIST_INLIST(&appctx->buffer_wait.list)) && - unlikely((buf = b_alloc(bptr)) == NULL)) { + unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) { appctx->buffer_wait.target = appctx; appctx->buffer_wait.wakeup_cb = appctx_buf_available; LIST_APPEND(&th_ctx->buffer_wq, &appctx->buffer_wait.list); diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h index 1b5c8a06fa..54f1e297ce 100644 --- a/include/haproxy/channel.h +++ b/include/haproxy/channel.h @@ -915,7 +915,7 @@ static inline int ci_space_for_replace(const struct channel *chn) */ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait) { - if (b_alloc(&chn->buf) != NULL) + if (b_alloc(&chn->buf, DB_CHANNEL) != NULL) return 1; if (!LIST_INLIST(&wait->list)) diff --git a/include/haproxy/dynbuf-t.h b/include/haproxy/dynbuf-t.h index 078ea41e7d..ed229bc36e 100644 --- a/include/haproxy/dynbuf-t.h +++ b/include/haproxy/dynbuf-t.h @@ -24,6 +24,41 @@ #include +/* Describe the levels of criticality of each allocation based on the expected + * use case. We distinguish multiple use cases, from the least important to the + * most important one: + * - allocate a buffer to grow a non-empty ring: this should be avoided when + * resources are becoming scarce. + * - allocate a buffer for very unlikely situations (e.g. L7 retries, early + * data). These may acceptably fail on low resources. + * - buffer used to receive data in the mux at the connection level. Please + * note that this level might later be resplit into two levels, one for + * initial data such as a new request, which may be rejected and postponed, + * and one for data continuation, which may be needed to complete a request + * or receive some control data allowing another buffer to be flushed. + * - buffer used to produce data at the endpoint for internal consumption, + * typically mux streams and applets. These buffers will be allocated until + * a channel picks them. Not processing them might sometimes lead to a mux + * being clogged and blocking other streams from progressing. + * - channel buffer: this one may be allocated to perform a synchronous recv, + * or just preparing for the possibility of an instant response. The + * response channel always allocates a buffer when entering process_stream, + * which is immediately released if unused when leaving. + * - buffer used by the mux sending side, often allocated by the mux's + * snd_buf() handler to encode the outgoing channel's data. + * - buffer permanently allocated at boot (e.g. temporary compression + * buffers). If these fail, we can't boot. + */ +enum dynbuf_crit { + DB_GROW_RING = 0, // used to grow an existing buffer ring + DB_UNLIKELY, // unlikely to be needed (e.g. L7 retries) + DB_MUX_RX, // buffer used to store incoming data from the system + DB_SE_RX, // buffer used to store incoming data for the channel + DB_CHANNEL, // buffer used by the channel for synchronous reads + DB_MUX_TX, // buffer used to store outgoing mux data + DB_PERMANENT, // buffers permanently allocated. +}; + /* an element of the list. It represents an object that need to * acquire a buffer to continue its process. */ struct buffer_wait { diff --git a/include/haproxy/dynbuf.h b/include/haproxy/dynbuf.h index a89800ca74..b8407951c9 100644 --- a/include/haproxy/dynbuf.h +++ b/include/haproxy/dynbuf.h @@ -63,10 +63,11 @@ static inline int buffer_almost_full(const struct buffer *buf) * performance. Due to the difficult buffer_wait management, they are not * subject to forced allocation failures either. */ -#define b_alloc(_buf) \ +#define b_alloc(_buf, _crit) \ ({ \ char *_area; \ struct buffer *_retbuf = _buf; \ + enum dynbuf_crit _criticality __maybe_unused = _crit; \ \ if (!_retbuf->size) { \ *_retbuf = BUF_WANTED; \ diff --git a/include/haproxy/quic_conn.h b/include/haproxy/quic_conn.h index 92caed4e55..60bc4071fe 100644 --- a/include/haproxy/quic_conn.h +++ b/include/haproxy/quic_conn.h @@ -138,7 +138,7 @@ static inline struct ncbuf *quic_get_ncbuf(struct ncbuf *ncbuf) if (!ncb_is_null(ncbuf)) return ncbuf; - b_alloc(&buf); + b_alloc(&buf, DB_MUX_RX); BUG_ON(b_is_null(&buf)); *ncbuf = ncb_make(buf.area, buf.size, 0); diff --git a/src/applet.c b/src/applet.c index b21979a525..dca53e7888 100644 --- a/src/applet.c +++ b/src/applet.c @@ -446,14 +446,14 @@ int appctx_buf_available(void *arg) struct appctx *appctx = arg; struct stconn *sc = appctx_sc(appctx); - if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) && b_alloc(&appctx->inbuf)) { + if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) && b_alloc(&appctx->inbuf, DB_CHANNEL)) { applet_fl_clr(appctx, APPCTX_FL_INBLK_ALLOC); TRACE_STATE("unblocking appctx, inbuf allocated", APPLET_EV_RECV|APPLET_EV_BLK|APPLET_EV_WAKE, appctx); task_wakeup(appctx->t, TASK_WOKEN_RES); return 1; } - if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC) && b_alloc(&appctx->outbuf)) { + if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC) && b_alloc(&appctx->outbuf, DB_CHANNEL)) { applet_fl_clr(appctx, APPCTX_FL_OUTBLK_ALLOC); TRACE_STATE("unblocking appctx, outbuf allocated", APPLET_EV_SEND|APPLET_EV_BLK|APPLET_EV_WAKE, appctx); task_wakeup(appctx->t, TASK_WOKEN_RES); @@ -471,7 +471,7 @@ int appctx_buf_available(void *arg) return 0; /* allocation possible now ? */ - if (!b_alloc(&sc_ic(sc)->buf)) { + if (!b_alloc(&sc_ic(sc)->buf, DB_CHANNEL)) { sc_need_buff(sc); return 0; } diff --git a/src/check.c b/src/check.c index 4e84fdba90..3a72b0c3dc 100644 --- a/src/check.c +++ b/src/check.c @@ -1505,13 +1505,13 @@ int check_buf_available(void *target) BUG_ON(!check->sc); - if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) { + if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi, DB_CHANNEL)) { TRACE_STATE("unblocking check, input buffer allocated", CHK_EV_TCPCHK_EXP|CHK_EV_RX_BLK, check); check->state &= ~CHK_ST_IN_ALLOC; tasklet_wakeup(check->sc->wait_event.tasklet); return 1; } - if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) { + if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo, DB_CHANNEL)) { TRACE_STATE("unblocking check, output buffer allocated", CHK_EV_TCPCHK_SND|CHK_EV_TX_BLK, check); check->state &= ~CHK_ST_OUT_ALLOC; tasklet_wakeup(check->sc->wait_event.tasklet); @@ -1529,7 +1529,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr) struct buffer *buf = NULL; if (likely(!LIST_INLIST(&check->buf_wait.list)) && - unlikely((buf = b_alloc(bptr)) == NULL)) { + unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) { check->buf_wait.target = check; check->buf_wait.wakeup_cb = check_buf_available; LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list); diff --git a/src/compression.c b/src/compression.c index 7b75461d2d..a4464e09b6 100644 --- a/src/compression.c +++ b/src/compression.c @@ -300,7 +300,7 @@ static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int * data and need a buffer now. We reuse the same buffer, as it's * not used out of the scope of a series of add_data()*, end(). */ - if (b_alloc(&tmpbuf) == NULL) + if (b_alloc(&tmpbuf, DB_PERMANENT) == NULL) return -1; /* no memory */ b_reset(&tmpbuf); memcpy(b_tail(&tmpbuf), comp_ctx->direct_ptr, comp_ctx->direct_len); diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c index 30f9d2a628..e601ff6721 100644 --- a/src/flt_http_comp.c +++ b/src/flt_http_comp.c @@ -73,9 +73,9 @@ comp_flt_init(struct proxy *px, struct flt_conf *fconf) static int comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf) { - if (b_alloc(&tmpbuf) == NULL) + if (b_alloc(&tmpbuf, DB_PERMANENT) == NULL) return -1; - if (b_alloc(&zbuf) == NULL) + if (b_alloc(&zbuf, DB_PERMANENT) == NULL) return -1; return 0; } diff --git a/src/flt_spoe.c b/src/flt_spoe.c index 16a6535df0..562e7c36fd 100644 --- a/src/flt_spoe.c +++ b/src/flt_spoe.c @@ -2855,7 +2855,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait) if (LIST_INLIST(&buffer_wait->list)) LIST_DEL_INIT(&buffer_wait->list); - if (b_alloc(buf)) + if (b_alloc(buf, DB_CHANNEL)) return 1; LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list); diff --git a/src/h3.c b/src/h3.c index 01ef2502ee..7e681cec05 100644 --- a/src/h3.c +++ b/src/h3.c @@ -565,7 +565,7 @@ static ssize_t h3_headers_to_htx(struct qcs *qcs, const struct buffer *buf, goto out; } - if (!b_alloc(&htx_buf)) { + if (!b_alloc(&htx_buf, DB_SE_RX)) { TRACE_ERROR("HTX buffer alloc failure", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs); h3c->err = H3_INTERNAL_ERROR; len = -1; diff --git a/src/hq_interop.c b/src/hq_interop.c index 02ef12626a..c88f888be8 100644 --- a/src/hq_interop.c +++ b/src/hq_interop.c @@ -27,7 +27,7 @@ static ssize_t hq_interop_rcv_buf(struct qcs *qcs, struct buffer *b, int fin) if (!fin) return 0; - b_alloc(&htx_buf); + b_alloc(&htx_buf, DB_MUX_RX); htx = htx_from_buf(&htx_buf); /* skip method */ diff --git a/src/http_client.c b/src/http_client.c index d8327804ef..7a8d7fdf61 100644 --- a/src/http_client.c +++ b/src/http_client.c @@ -284,7 +284,7 @@ int httpclient_req_gen(struct httpclient *hc, const struct ist url, enum http_me if (!(hc->flags & HC_F_HTTPPROXY)) flags |= HTX_SL_F_NORMALIZED_URI; - if (!b_alloc(&hc->req.buf)) + if (!b_alloc(&hc->req.buf, DB_CHANNEL)) goto error; if (meth >= HTTP_METH_OTHER) @@ -402,7 +402,7 @@ int httpclient_req_xfer(struct httpclient *hc, struct ist src, int end) int ret = 0; struct htx *htx; - if (!b_alloc(&hc->req.buf)) + if (!b_alloc(&hc->req.buf, DB_CHANNEL)) goto error; htx = htx_from_buf(&hc->req.buf); @@ -917,7 +917,7 @@ void httpclient_applet_io_handler(struct appctx *appctx) if (htx_is_empty(htx)) goto out; - if (!b_alloc(&hc->res.buf)) + if (!b_alloc(&hc->res.buf, DB_MUX_TX)) goto out; if (b_full(&hc->res.buf)) diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index fb8517982c..71609532f7 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -488,14 +488,14 @@ static int fcgi_buf_available(void *target) struct fcgi_conn *fconn = target; struct fcgi_strm *fstrm; - if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) { + if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf, DB_MUX_RX)) { TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn); fconn->flags &= ~FCGI_CF_DEM_DALLOC; fcgi_conn_restart_reading(fconn, 1); return 1; } - if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) { + if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf), DB_MUX_TX)) { TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn); fconn->flags &= ~FCGI_CF_MUX_MALLOC; if (fconn->flags & FCGI_CF_DEM_MROOM) { @@ -507,7 +507,7 @@ static int fcgi_buf_available(void *target) if ((fconn->flags & FCGI_CF_DEM_SALLOC) && (fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fcgi_strm_sc(fstrm) && - b_alloc(&fstrm->rxbuf)) { + b_alloc(&fstrm->rxbuf, DB_SE_RX)) { TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm); fconn->flags &= ~FCGI_CF_DEM_SALLOC; fcgi_conn_restart_reading(fconn, 1); @@ -523,7 +523,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer struct buffer *buf = NULL; if (likely(!LIST_INLIST(&fconn->buf_wait.list)) && - unlikely((buf = b_alloc(bptr)) == NULL)) { + unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) { fconn->buf_wait.target = fconn; fconn->buf_wait.wakeup_cb = fcgi_buf_available; LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list); diff --git a/src/mux_h1.c b/src/mux_h1.c index 85ba0c5fa8..a886c00c3c 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -501,7 +501,7 @@ static int h1_buf_available(void *target) { struct h1c *h1c = target; - if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) { + if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf, DB_MUX_RX)) { TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn); h1c->flags &= ~H1C_F_IN_ALLOC; if (h1_recv_allowed(h1c)) @@ -509,7 +509,7 @@ static int h1_buf_available(void *target) return 1; } - if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) { + if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf, DB_MUX_TX)) { TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s); h1c->flags &= ~H1C_F_OUT_ALLOC; if (h1c->h1s) @@ -517,7 +517,7 @@ static int h1_buf_available(void *target) return 1; } - if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) { + if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf, DB_SE_RX)) { TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn); h1c->flags &= ~H1C_F_IN_SALLOC; tasklet_wakeup(h1c->wait_event.tasklet); @@ -535,7 +535,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr) struct buffer *buf = NULL; if (likely(!LIST_INLIST(&h1c->buf_wait.list)) && - unlikely((buf = b_alloc(bptr)) == NULL)) { + unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) { h1c->buf_wait.target = h1c; h1c->buf_wait.wakeup_cb = h1_buf_available; LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list); diff --git a/src/mux_h2.c b/src/mux_h2.c index c8eb1b0c48..28dd457df6 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -812,13 +812,13 @@ static int h2_buf_available(void *target) struct h2c *h2c = target; struct h2s *h2s; - if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) { + if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf, DB_MUX_RX)) { h2c->flags &= ~H2_CF_DEM_DALLOC; h2c_restart_reading(h2c, 1); return 1; } - if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) { + if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf), DB_MUX_TX)) { h2c->flags &= ~H2_CF_MUX_MALLOC; if (h2c->flags & H2_CF_DEM_MROOM) { @@ -830,7 +830,7 @@ static int h2_buf_available(void *target) if ((h2c->flags & H2_CF_DEM_SALLOC) && (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) && - b_alloc(&h2s->rxbuf)) { + b_alloc(&h2s->rxbuf, DB_SE_RX)) { h2c->flags &= ~H2_CF_DEM_SALLOC; h2c_restart_reading(h2c, 1); return 1; @@ -844,7 +844,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr) struct buffer *buf = NULL; if (likely(!LIST_INLIST(&h2c->buf_wait.list)) && - unlikely((buf = b_alloc(bptr)) == NULL)) { + unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) { h2c->buf_wait.target = h2c; h2c->buf_wait.wakeup_cb = h2_buf_available; LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list); diff --git a/src/mux_quic.c b/src/mux_quic.c index b90077b04e..23df3f14d5 100644 --- a/src/mux_quic.c +++ b/src/mux_quic.c @@ -432,7 +432,7 @@ static struct ncbuf *qcs_get_ncbuf(struct qcs *qcs, struct ncbuf *ncbuf) struct buffer buf = BUF_NULL; if (ncb_is_null(ncbuf)) { - if (!b_alloc(&buf)) + if (!b_alloc(&buf, DB_MUX_RX)) return NULL; *ncbuf = ncb_make(buf.area, buf.size, 0); @@ -956,7 +956,7 @@ static int qcc_decode_qcs(struct qcc *qcc, struct qcs *qcs) */ struct buffer *qcc_get_stream_rxbuf(struct qcs *qcs) { - return b_alloc(&qcs->rx.app_buf); + return b_alloc(&qcs->rx.app_buf, DB_MUX_RX); } /* Allocate if needed and retrieve stream buffer for data emission. @@ -1000,7 +1000,7 @@ struct buffer *qcc_get_stream_txbuf(struct qcs *qcs, int *err) goto out; } - if (!b_alloc(out)) { + if (!b_alloc(out, DB_MUX_TX)) { TRACE_ERROR("buffer alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs); *err = 1; goto out; diff --git a/src/quic_sock.c b/src/quic_sock.c index 5bd65c7064..31ba2497d6 100644 --- a/src/quic_sock.c +++ b/src/quic_sock.c @@ -772,7 +772,7 @@ int qc_rcv_buf(struct quic_conn *qc) max_sz = params->max_udp_payload_size; do { - if (!b_alloc(&buf)) + if (!b_alloc(&buf, DB_MUX_RX)) break; /* TODO subscribe for memory again available. */ b_reset(&buf); diff --git a/src/quic_tx.c b/src/quic_tx.c index d50b52f707..396e8501b8 100644 --- a/src/quic_tx.c +++ b/src/quic_tx.c @@ -88,7 +88,7 @@ static inline void free_quic_tx_packet(struct quic_conn *qc, struct buffer *qc_txb_alloc(struct quic_conn *qc) { struct buffer *buf = &qc->tx.buf; - if (!b_alloc(buf)) + if (!b_alloc(buf, DB_MUX_TX)) return NULL; return buf; diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 11fb908d53..f8c4291f3b 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -5717,7 +5717,7 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx) #ifdef SSL_READ_EARLY_DATA_SUCCESS if (bc->ssl_conf.early_data) { - b_alloc(&ctx->early_buf); + b_alloc(&ctx->early_buf, DB_MUX_RX); SSL_set_max_early_data(ctx->ssl, /* Only allow early data if we managed to allocate * a buffer. diff --git a/src/stconn.c b/src/stconn.c index a00f8e1140..607740305a 100644 --- a/src/stconn.c +++ b/src/stconn.c @@ -1675,7 +1675,7 @@ int sc_conn_send(struct stconn *sc) if (s->txn->req.msg_state != HTTP_MSG_DONE) s->txn->flags &= ~TX_L7_RETRY; else { - if (b_alloc(&s->txn->l7_buffer) == NULL) + if (b_alloc(&s->txn->l7_buffer, DB_UNLIKELY) == NULL) s->txn->flags &= ~TX_L7_RETRY; else { memcpy(b_orig(&s->txn->l7_buffer), diff --git a/src/stream.c b/src/stream.c index f233d6ef4c..6699585303 100644 --- a/src/stream.c +++ b/src/stream.c @@ -321,10 +321,10 @@ int stream_buf_available(void *arg) struct stream *s = arg; if (!s->req.buf.size && !sc_ep_have_ff_data(s->scb) && s->scf->flags & SC_FL_NEED_BUFF && - b_alloc(&s->req.buf)) + b_alloc(&s->req.buf, DB_CHANNEL)) sc_have_buff(s->scf); else if (!s->res.buf.size && !sc_ep_have_ff_data(s->scf) && s->scb->flags & SC_FL_NEED_BUFF && - b_alloc(&s->res.buf)) + b_alloc(&s->res.buf, DB_CHANNEL)) sc_have_buff(s->scb); else return 0; @@ -752,7 +752,7 @@ void stream_free(struct stream *s) */ static int stream_alloc_work_buffer(struct stream *s) { - if (b_alloc(&s->res.buf)) + if (b_alloc(&s->res.buf, DB_CHANNEL)) return 1; return 0; }