]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: dynbuf: remove last usages of b_alloc_margin()
authorWilly Tarreau <w@1wt.eu>
Mon, 22 Mar 2021 13:44:31 +0000 (14:44 +0100)
committerWilly Tarreau <w@1wt.eu>
Mon, 22 Mar 2021 15:27:59 +0000 (16:27 +0100)
The function's purpose used to be to fail a buffer allocation if that
allocation wouldn't result in leaving some buffers available. Thus,
some allocations could succeed and others fail for the sole purpose of
trying to provide 2 buffers at once to process_stream(). But things
have changed a lot with 1.7 breaking the promise that process_stream()
would always succeed with only two buffers, and later the thread-local
pool caches that keep certain buffers available that are not accounted
for in the global pool so that local allocators cannot guess anything
from the number of currently available pools.

Let's just replace all last uses of b_alloc_margin() with b_alloc() once
for all.

src/applet.c
src/check.c
src/flt_spoe.c
src/mux_fcgi.c
src/mux_h1.c
src/mux_h2.c
src/stream.c

index eafce3d1f09f7fa76ac7bc5888922b44981a076b..a6d7f43fdde2922756978e9a60cc0752f1298db8 100644 (file)
@@ -48,7 +48,7 @@ int appctx_buf_available(void *arg)
                return 0;
 
        /* allocation possible now ? */
-       if (!b_alloc_margin(&si_ic(si)->buf, global.tune.reserved_bufs)) {
+       if (!b_alloc(&si_ic(si)->buf)) {
                si_rx_buff_blk(si);
                return 0;
        }
index c32e940c3a4d4b5e6db17e535aa0ee06a4339cb2..96276c140f8d6317573abf190a4715b8bd18efaa 100644 (file)
@@ -994,12 +994,12 @@ int check_buf_available(void *target)
 {
        struct check *check = target;
 
-       if ((check->state & CHK_ST_IN_ALLOC) && b_alloc_margin(&check->bi, 0)) {
+       if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
                check->state &= ~CHK_ST_IN_ALLOC;
                tasklet_wakeup(check->wait_list.tasklet);
                return 1;
        }
-       if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc_margin(&check->bo, 0)) {
+       if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
                check->state &= ~CHK_ST_OUT_ALLOC;
                tasklet_wakeup(check->wait_list.tasklet);
                return 1;
@@ -1016,7 +1016,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
        struct buffer *buf = NULL;
 
        if (likely(!LIST_ADDED(&check->buf_wait.list)) &&
-           unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+           unlikely((buf = b_alloc(bptr)) == NULL)) {
                check->buf_wait.target = check;
                check->buf_wait.wakeup_cb = check_buf_available;
                LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
index 0397470778df3bd3f17e30bc55febf9ce760c479..2d7b3b82805022401da282b8dbba4f61a6d2a7f8 100644 (file)
@@ -2837,7 +2837,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
        if (LIST_ADDED(&buffer_wait->list))
                LIST_DEL_INIT(&buffer_wait->list);
 
-       if (b_alloc_margin(buf, global.tune.reserved_bufs))
+       if (b_alloc(buf))
                return 1;
 
        LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
index 48f11f27fc942e2e039a1404f224d0ce7e4348cd..780d72172d6c451a73e6ddd3c8fd683e848a0c44 100644 (file)
@@ -570,14 +570,14 @@ static int fcgi_buf_available(void *target)
        struct fcgi_conn *fconn = target;
        struct fcgi_strm *fstrm;
 
-       if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc_margin(&fconn->dbuf, 0)) {
+       if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
                TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
                fconn->flags &= ~FCGI_CF_DEM_DALLOC;
                fcgi_conn_restart_reading(fconn, 1);
                return 1;
        }
 
-       if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc_margin(br_tail(fconn->mbuf), 0)) {
+       if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
                TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
                fconn->flags &= ~FCGI_CF_MUX_MALLOC;
                if (fconn->flags & FCGI_CF_DEM_MROOM) {
@@ -589,7 +589,7 @@ static int fcgi_buf_available(void *target)
 
        if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
            (fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fstrm->cs &&
-           b_alloc_margin(&fstrm->rxbuf, 0)) {
+           b_alloc(&fstrm->rxbuf)) {
                TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
                fconn->flags &= ~FCGI_CF_DEM_SALLOC;
                fcgi_conn_restart_reading(fconn, 1);
@@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
        struct buffer *buf = NULL;
 
        if (likely(!LIST_ADDED(&fconn->buf_wait.list)) &&
-           unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+           unlikely((buf = b_alloc(bptr)) == NULL)) {
                fconn->buf_wait.target = fconn;
                fconn->buf_wait.wakeup_cb = fcgi_buf_available;
                LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
index 3c80e85f7f221d118c1d26ed7f8e6889b539eb02..ca2e8a85375b3c98ea623e85f3c85e7e723dbe02 100644 (file)
@@ -415,7 +415,7 @@ static int h1_buf_available(void *target)
 {
        struct h1c *h1c = target;
 
-       if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
+       if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
                TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
                h1c->flags &= ~H1C_F_IN_ALLOC;
                if (h1_recv_allowed(h1c))
@@ -423,7 +423,7 @@ static int h1_buf_available(void *target)
                return 1;
        }
 
-       if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
+       if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
                TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
                h1c->flags &= ~H1C_F_OUT_ALLOC;
                if (h1c->h1s)
@@ -431,7 +431,7 @@ static int h1_buf_available(void *target)
                return 1;
        }
 
-       if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc_margin(&h1c->h1s->rxbuf, 0)) {
+       if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
                TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
                h1c->flags &= ~H1C_F_IN_SALLOC;
                tasklet_wakeup(h1c->wait_event.tasklet);
@@ -449,7 +449,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
        struct buffer *buf = NULL;
 
        if (likely(!LIST_ADDED(&h1c->buf_wait.list)) &&
-           unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+           unlikely((buf = b_alloc(bptr)) == NULL)) {
                h1c->buf_wait.target = h1c;
                h1c->buf_wait.wakeup_cb = h1_buf_available;
                LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
index 68b34621c306899fa4a6ea89bb9b3a1c6c39a486..06abbc123cdc831769d837c1383fd6bf8cc015f9 100644 (file)
@@ -775,13 +775,13 @@ static int h2_buf_available(void *target)
        struct h2c *h2c = target;
        struct h2s *h2s;
 
-       if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc_margin(&h2c->dbuf, 0)) {
+       if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
                h2c->flags &= ~H2_CF_DEM_DALLOC;
                h2c_restart_reading(h2c, 1);
                return 1;
        }
 
-       if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc_margin(br_tail(h2c->mbuf), 0)) {
+       if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
                h2c->flags &= ~H2_CF_MUX_MALLOC;
 
                if (h2c->flags & H2_CF_DEM_MROOM) {
@@ -793,7 +793,7 @@ static int h2_buf_available(void *target)
 
        if ((h2c->flags & H2_CF_DEM_SALLOC) &&
            (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
-           b_alloc_margin(&h2s->rxbuf, 0)) {
+           b_alloc(&h2s->rxbuf)) {
                h2c->flags &= ~H2_CF_DEM_SALLOC;
                h2c_restart_reading(h2c, 1);
                return 1;
@@ -807,7 +807,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
        struct buffer *buf = NULL;
 
        if (likely(!LIST_ADDED(&h2c->buf_wait.list)) &&
-           unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+           unlikely((buf = b_alloc(bptr)) == NULL)) {
                h2c->buf_wait.target = h2c;
                h2c->buf_wait.wakeup_cb = h2_buf_available;
                LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
index 54c6a77bf59f7032cd4ea67b380fa90ad19f2b55..b0c2bab372aa911382b1943699f006fff9ef63b4 100644 (file)
@@ -329,10 +329,10 @@ int stream_buf_available(void *arg)
        struct stream *s = arg;
 
        if (!s->req.buf.size && !s->req.pipe && (s->si[0].flags & SI_FL_RXBLK_BUFF) &&
-           b_alloc_margin(&s->req.buf, global.tune.reserved_bufs))
+           b_alloc(&s->req.buf))
                si_rx_buff_rdy(&s->si[0]);
        else if (!s->res.buf.size && !s->res.pipe && (s->si[1].flags & SI_FL_RXBLK_BUFF) &&
-                b_alloc_margin(&s->res.buf, 0))
+                b_alloc(&s->res.buf))
                si_rx_buff_rdy(&s->si[1]);
        else
                return 0;
@@ -772,7 +772,7 @@ static int stream_alloc_work_buffer(struct stream *s)
        if (LIST_ADDED(&s->buffer_wait.list))
                LIST_DEL_INIT(&s->buffer_wait.list);
 
-       if (b_alloc_margin(&s->res.buf, 0))
+       if (b_alloc(&s->res.buf))
                return 1;
 
        LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);