| | are used. A waiting buffer will match.
--------------------+------------------+---------------------------------------
b_alloc | buffer *buf | ensures that <buf> is allocated or
- | ret: buffer * | allocates a buffer and assigns it to
- | | *buf. If no memory is available, (1)
- | | is assigned instead with a zero size.
+ | enum dynbuf_crit | allocates a buffer and assigns it to
+ | criticality | *buf. If no memory is available, (1)
+ | ret: buffer * | is assigned instead with a zero size.
| | The allocated buffer is returned, or
- | | NULL in case no memory is available
+ | | NULL in case no memory is available.
+ | | The criticality indicates the how the
+ | | buffer might be used and how likely it
+ | | is that the allocated memory will be
+ | | quickly released.
--------------------+------------------+---------------------------------------
__b_free | buffer *buf | releases <buf> which must be allocated
| ret: void | and marks it empty
struct buffer *buf = NULL;
if (likely(!LIST_INLIST(&appctx->buffer_wait.list)) &&
- unlikely((buf = b_alloc(bptr)) == NULL)) {
+ unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) {
appctx->buffer_wait.target = appctx;
appctx->buffer_wait.wakeup_cb = appctx_buf_available;
LIST_APPEND(&th_ctx->buffer_wq, &appctx->buffer_wait.list);
*/
static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
{
- if (b_alloc(&chn->buf) != NULL)
+ if (b_alloc(&chn->buf, DB_CHANNEL) != NULL)
return 1;
if (!LIST_INLIST(&wait->list))
#include <haproxy/list-t.h>
+/* Describe the levels of criticality of each allocation based on the expected
+ * use case. We distinguish multiple use cases, from the least important to the
+ * most important one:
+ * - allocate a buffer to grow a non-empty ring: this should be avoided when
+ * resources are becoming scarce.
+ * - allocate a buffer for very unlikely situations (e.g. L7 retries, early
+ * data). These may acceptably fail on low resources.
+ * - buffer used to receive data in the mux at the connection level. Please
+ * note that this level might later be resplit into two levels, one for
+ * initial data such as a new request, which may be rejected and postponed,
+ * and one for data continuation, which may be needed to complete a request
+ * or receive some control data allowing another buffer to be flushed.
+ * - buffer used to produce data at the endpoint for internal consumption,
+ * typically mux streams and applets. These buffers will be allocated until
+ * a channel picks them. Not processing them might sometimes lead to a mux
+ * being clogged and blocking other streams from progressing.
+ * - channel buffer: this one may be allocated to perform a synchronous recv,
+ * or just preparing for the possibility of an instant response. The
+ * response channel always allocates a buffer when entering process_stream,
+ * which is immediately released if unused when leaving.
+ * - buffer used by the mux sending side, often allocated by the mux's
+ * snd_buf() handler to encode the outgoing channel's data.
+ * - buffer permanently allocated at boot (e.g. temporary compression
+ * buffers). If these fail, we can't boot.
+ */
+enum dynbuf_crit {
+ DB_GROW_RING = 0, // used to grow an existing buffer ring
+ DB_UNLIKELY, // unlikely to be needed (e.g. L7 retries)
+ DB_MUX_RX, // buffer used to store incoming data from the system
+ DB_SE_RX, // buffer used to store incoming data for the channel
+ DB_CHANNEL, // buffer used by the channel for synchronous reads
+ DB_MUX_TX, // buffer used to store outgoing mux data
+ DB_PERMANENT, // buffers permanently allocated.
+};
+
/* an element of the <buffer_wq> list. It represents an object that need to
* acquire a buffer to continue its process. */
struct buffer_wait {
* performance. Due to the difficult buffer_wait management, they are not
* subject to forced allocation failures either.
*/
-#define b_alloc(_buf) \
+#define b_alloc(_buf, _crit) \
({ \
char *_area; \
struct buffer *_retbuf = _buf; \
+ enum dynbuf_crit _criticality __maybe_unused = _crit; \
\
if (!_retbuf->size) { \
*_retbuf = BUF_WANTED; \
if (!ncb_is_null(ncbuf))
return ncbuf;
- b_alloc(&buf);
+ b_alloc(&buf, DB_MUX_RX);
BUG_ON(b_is_null(&buf));
*ncbuf = ncb_make(buf.area, buf.size, 0);
struct appctx *appctx = arg;
struct stconn *sc = appctx_sc(appctx);
- if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) && b_alloc(&appctx->inbuf)) {
+ if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) && b_alloc(&appctx->inbuf, DB_CHANNEL)) {
applet_fl_clr(appctx, APPCTX_FL_INBLK_ALLOC);
TRACE_STATE("unblocking appctx, inbuf allocated", APPLET_EV_RECV|APPLET_EV_BLK|APPLET_EV_WAKE, appctx);
task_wakeup(appctx->t, TASK_WOKEN_RES);
return 1;
}
- if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC) && b_alloc(&appctx->outbuf)) {
+ if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC) && b_alloc(&appctx->outbuf, DB_CHANNEL)) {
applet_fl_clr(appctx, APPCTX_FL_OUTBLK_ALLOC);
TRACE_STATE("unblocking appctx, outbuf allocated", APPLET_EV_SEND|APPLET_EV_BLK|APPLET_EV_WAKE, appctx);
task_wakeup(appctx->t, TASK_WOKEN_RES);
return 0;
/* allocation possible now ? */
- if (!b_alloc(&sc_ic(sc)->buf)) {
+ if (!b_alloc(&sc_ic(sc)->buf, DB_CHANNEL)) {
sc_need_buff(sc);
return 0;
}
BUG_ON(!check->sc);
- if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
+ if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi, DB_CHANNEL)) {
TRACE_STATE("unblocking check, input buffer allocated", CHK_EV_TCPCHK_EXP|CHK_EV_RX_BLK, check);
check->state &= ~CHK_ST_IN_ALLOC;
tasklet_wakeup(check->sc->wait_event.tasklet);
return 1;
}
- if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
+ if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo, DB_CHANNEL)) {
TRACE_STATE("unblocking check, output buffer allocated", CHK_EV_TCPCHK_SND|CHK_EV_TX_BLK, check);
check->state &= ~CHK_ST_OUT_ALLOC;
tasklet_wakeup(check->sc->wait_event.tasklet);
struct buffer *buf = NULL;
if (likely(!LIST_INLIST(&check->buf_wait.list)) &&
- unlikely((buf = b_alloc(bptr)) == NULL)) {
+ unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
* data and need a buffer now. We reuse the same buffer, as it's
* not used out of the scope of a series of add_data()*, end().
*/
- if (b_alloc(&tmpbuf) == NULL)
+ if (b_alloc(&tmpbuf, DB_PERMANENT) == NULL)
return -1; /* no memory */
b_reset(&tmpbuf);
memcpy(b_tail(&tmpbuf), comp_ctx->direct_ptr, comp_ctx->direct_len);
static int
comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
{
- if (b_alloc(&tmpbuf) == NULL)
+ if (b_alloc(&tmpbuf, DB_PERMANENT) == NULL)
return -1;
- if (b_alloc(&zbuf) == NULL)
+ if (b_alloc(&zbuf, DB_PERMANENT) == NULL)
return -1;
return 0;
}
if (LIST_INLIST(&buffer_wait->list))
LIST_DEL_INIT(&buffer_wait->list);
- if (b_alloc(buf))
+ if (b_alloc(buf, DB_CHANNEL))
return 1;
LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
goto out;
}
- if (!b_alloc(&htx_buf)) {
+ if (!b_alloc(&htx_buf, DB_SE_RX)) {
TRACE_ERROR("HTX buffer alloc failure", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
h3c->err = H3_INTERNAL_ERROR;
len = -1;
if (!fin)
return 0;
- b_alloc(&htx_buf);
+ b_alloc(&htx_buf, DB_MUX_RX);
htx = htx_from_buf(&htx_buf);
/* skip method */
if (!(hc->flags & HC_F_HTTPPROXY))
flags |= HTX_SL_F_NORMALIZED_URI;
- if (!b_alloc(&hc->req.buf))
+ if (!b_alloc(&hc->req.buf, DB_CHANNEL))
goto error;
if (meth >= HTTP_METH_OTHER)
int ret = 0;
struct htx *htx;
- if (!b_alloc(&hc->req.buf))
+ if (!b_alloc(&hc->req.buf, DB_CHANNEL))
goto error;
htx = htx_from_buf(&hc->req.buf);
if (htx_is_empty(htx))
goto out;
- if (!b_alloc(&hc->res.buf))
+ if (!b_alloc(&hc->res.buf, DB_MUX_TX))
goto out;
if (b_full(&hc->res.buf))
struct fcgi_conn *fconn = target;
struct fcgi_strm *fstrm;
- if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
+ if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf, DB_MUX_RX)) {
TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_DEM_DALLOC;
fcgi_conn_restart_reading(fconn, 1);
return 1;
}
- if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
+ if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf), DB_MUX_TX)) {
TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_MUX_MALLOC;
if (fconn->flags & FCGI_CF_DEM_MROOM) {
if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
(fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fcgi_strm_sc(fstrm) &&
- b_alloc(&fstrm->rxbuf)) {
+ b_alloc(&fstrm->rxbuf, DB_SE_RX)) {
TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
fconn->flags &= ~FCGI_CF_DEM_SALLOC;
fcgi_conn_restart_reading(fconn, 1);
struct buffer *buf = NULL;
if (likely(!LIST_INLIST(&fconn->buf_wait.list)) &&
- unlikely((buf = b_alloc(bptr)) == NULL)) {
+ unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
{
struct h1c *h1c = target;
- if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
+ if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf, DB_MUX_RX)) {
TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_ALLOC;
if (h1_recv_allowed(h1c))
return 1;
}
- if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
+ if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf, DB_MUX_TX)) {
TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
h1c->flags &= ~H1C_F_OUT_ALLOC;
if (h1c->h1s)
return 1;
}
- if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
+ if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf, DB_SE_RX)) {
TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_SALLOC;
tasklet_wakeup(h1c->wait_event.tasklet);
struct buffer *buf = NULL;
if (likely(!LIST_INLIST(&h1c->buf_wait.list)) &&
- unlikely((buf = b_alloc(bptr)) == NULL)) {
+ unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
struct h2c *h2c = target;
struct h2s *h2s;
- if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
+ if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf, DB_MUX_RX)) {
h2c->flags &= ~H2_CF_DEM_DALLOC;
h2c_restart_reading(h2c, 1);
return 1;
}
- if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
+ if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf), DB_MUX_TX)) {
h2c->flags &= ~H2_CF_MUX_MALLOC;
if (h2c->flags & H2_CF_DEM_MROOM) {
if ((h2c->flags & H2_CF_DEM_SALLOC) &&
(h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) &&
- b_alloc(&h2s->rxbuf)) {
+ b_alloc(&h2s->rxbuf, DB_SE_RX)) {
h2c->flags &= ~H2_CF_DEM_SALLOC;
h2c_restart_reading(h2c, 1);
return 1;
struct buffer *buf = NULL;
if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
- unlikely((buf = b_alloc(bptr)) == NULL)) {
+ unlikely((buf = b_alloc(bptr, DB_MUX_RX)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
struct buffer buf = BUF_NULL;
if (ncb_is_null(ncbuf)) {
- if (!b_alloc(&buf))
+ if (!b_alloc(&buf, DB_MUX_RX))
return NULL;
*ncbuf = ncb_make(buf.area, buf.size, 0);
*/
struct buffer *qcc_get_stream_rxbuf(struct qcs *qcs)
{
- return b_alloc(&qcs->rx.app_buf);
+ return b_alloc(&qcs->rx.app_buf, DB_MUX_RX);
}
/* Allocate if needed and retrieve <qcs> stream buffer for data emission.
goto out;
}
- if (!b_alloc(out)) {
+ if (!b_alloc(out, DB_MUX_TX)) {
TRACE_ERROR("buffer alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
*err = 1;
goto out;
max_sz = params->max_udp_payload_size;
do {
- if (!b_alloc(&buf))
+ if (!b_alloc(&buf, DB_MUX_RX))
break; /* TODO subscribe for memory again available. */
b_reset(&buf);
struct buffer *qc_txb_alloc(struct quic_conn *qc)
{
struct buffer *buf = &qc->tx.buf;
- if (!b_alloc(buf))
+ if (!b_alloc(buf, DB_MUX_TX))
return NULL;
return buf;
#ifdef SSL_READ_EARLY_DATA_SUCCESS
if (bc->ssl_conf.early_data) {
- b_alloc(&ctx->early_buf);
+ b_alloc(&ctx->early_buf, DB_MUX_RX);
SSL_set_max_early_data(ctx->ssl,
/* Only allow early data if we managed to allocate
* a buffer.
if (s->txn->req.msg_state != HTTP_MSG_DONE)
s->txn->flags &= ~TX_L7_RETRY;
else {
- if (b_alloc(&s->txn->l7_buffer) == NULL)
+ if (b_alloc(&s->txn->l7_buffer, DB_UNLIKELY) == NULL)
s->txn->flags &= ~TX_L7_RETRY;
else {
memcpy(b_orig(&s->txn->l7_buffer),
struct stream *s = arg;
if (!s->req.buf.size && !sc_ep_have_ff_data(s->scb) && s->scf->flags & SC_FL_NEED_BUFF &&
- b_alloc(&s->req.buf))
+ b_alloc(&s->req.buf, DB_CHANNEL))
sc_have_buff(s->scf);
else if (!s->res.buf.size && !sc_ep_have_ff_data(s->scf) && s->scb->flags & SC_FL_NEED_BUFF &&
- b_alloc(&s->res.buf))
+ b_alloc(&s->res.buf, DB_CHANNEL))
sc_have_buff(s->scb);
else
return 0;
*/
static int stream_alloc_work_buffer(struct stream *s)
{
- if (b_alloc(&s->res.buf))
+ if (b_alloc(&s->res.buf, DB_CHANNEL))
return 1;
return 0;
}