return 0;
/* allocation possible now ? */
- if (!b_alloc_margin(&si_ic(si)->buf, global.tune.reserved_bufs)) {
+ if (!b_alloc(&si_ic(si)->buf)) {
si_rx_buff_blk(si);
return 0;
}
{
struct check *check = target;
- if ((check->state & CHK_ST_IN_ALLOC) && b_alloc_margin(&check->bi, 0)) {
+ if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
check->state &= ~CHK_ST_IN_ALLOC;
tasklet_wakeup(check->wait_list.tasklet);
return 1;
}
- if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc_margin(&check->bo, 0)) {
+ if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
check->state &= ~CHK_ST_OUT_ALLOC;
tasklet_wakeup(check->wait_list.tasklet);
return 1;
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&check->buf_wait.list)) &&
- unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
if (LIST_ADDED(&buffer_wait->list))
LIST_DEL_INIT(&buffer_wait->list);
- if (b_alloc_margin(buf, global.tune.reserved_bufs))
+ if (b_alloc(buf))
return 1;
LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
struct fcgi_conn *fconn = target;
struct fcgi_strm *fstrm;
- if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc_margin(&fconn->dbuf, 0)) {
+ if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_DEM_DALLOC;
fcgi_conn_restart_reading(fconn, 1);
return 1;
}
- if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc_margin(br_tail(fconn->mbuf), 0)) {
+ if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_MUX_MALLOC;
if (fconn->flags & FCGI_CF_DEM_MROOM) {
if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
(fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fstrm->cs &&
- b_alloc_margin(&fstrm->rxbuf, 0)) {
+ b_alloc(&fstrm->rxbuf)) {
TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
fconn->flags &= ~FCGI_CF_DEM_SALLOC;
fcgi_conn_restart_reading(fconn, 1);
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&fconn->buf_wait.list)) &&
- unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
{
struct h1c *h1c = target;
- if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
+ if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_ALLOC;
if (h1_recv_allowed(h1c))
return 1;
}
- if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
+ if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
h1c->flags &= ~H1C_F_OUT_ALLOC;
if (h1c->h1s)
return 1;
}
- if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc_margin(&h1c->h1s->rxbuf, 0)) {
+ if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_SALLOC;
tasklet_wakeup(h1c->wait_event.tasklet);
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&h1c->buf_wait.list)) &&
- unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
struct h2c *h2c = target;
struct h2s *h2s;
- if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc_margin(&h2c->dbuf, 0)) {
+ if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
h2c->flags &= ~H2_CF_DEM_DALLOC;
h2c_restart_reading(h2c, 1);
return 1;
}
- if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc_margin(br_tail(h2c->mbuf), 0)) {
+ if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
h2c->flags &= ~H2_CF_MUX_MALLOC;
if (h2c->flags & H2_CF_DEM_MROOM) {
if ((h2c->flags & H2_CF_DEM_SALLOC) &&
(h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
- b_alloc_margin(&h2s->rxbuf, 0)) {
+ b_alloc(&h2s->rxbuf)) {
h2c->flags &= ~H2_CF_DEM_SALLOC;
h2c_restart_reading(h2c, 1);
return 1;
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&h2c->buf_wait.list)) &&
- unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
struct stream *s = arg;
if (!s->req.buf.size && !s->req.pipe && (s->si[0].flags & SI_FL_RXBLK_BUFF) &&
- b_alloc_margin(&s->req.buf, global.tune.reserved_bufs))
+ b_alloc(&s->req.buf))
si_rx_buff_rdy(&s->si[0]);
else if (!s->res.buf.size && !s->res.pipe && (s->si[1].flags & SI_FL_RXBLK_BUFF) &&
- b_alloc_margin(&s->res.buf, 0))
+ b_alloc(&s->res.buf))
si_rx_buff_rdy(&s->si[1]);
else
return 0;
if (LIST_ADDED(&s->buffer_wait.list))
LIST_DEL_INIT(&s->buffer_wait.list);
- if (b_alloc_margin(&s->res.buf, 0))
+ if (b_alloc(&s->res.buf))
return 1;
LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);