return 1;
full:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
error:
/* unrecoverable error */
return 1;
full:
htx_reset(htx);
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
/* Check if the input buffer is available. */
if (!b_size(&res->buf)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
*/
if (htx_is_empty(res_htx)) {
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
channel_add_input(res, 1);
return !!sc_ep_test(cs, SE_FL_RXBLK_ANY);
}
-
-/* Returns non-zero if the stream connector's Rx path is blocked because of lack
- * of room in the input buffer.
+/* Returns non-zero if the stream connector's Rx path is blocked because of
+ * lack of room in the input buffer. This usually happens after applets failed
+ * to deliver data into the channel's buffer and reported it via sc_need_room().
*/
-static inline int cs_rx_blocked_room(const struct stconn *cs)
+__attribute__((warn_unused_result))
+static inline int sc_waiting_room(const struct stconn *cs)
{
return !!sc_ep_test(cs, SE_FL_RXBLK_ROOM);
}
sc_ep_set(cs, SE_FL_RXBLK_BUFF);
}
-/* Tell a stream connector some room was made in the input buffer */
-static inline void cs_rx_room_rdy(struct stconn *cs)
+/* Tell a stream connector some room was made in the input buffer and any
+ * failed attempt to inject data into it may be tried again. This is usually
+ * called after a successful transfer of buffer contents to the other side.
+ */
+static inline void sc_have_room(struct stconn *cs)
{
sc_ep_clr(cs, SE_FL_RXBLK_ROOM);
}
/* The stream connector announces it failed to put data into the input buffer
* by lack of room. Since it indicates a willingness to deliver data to the
- * buffer that will have to be retried, we automatically clear RXBLK_ENDP to
- * be called again as soon as RXBLK_ROOM is cleared.
+ * buffer that will have to be retried. Usually the caller will also clear
+ * RXBLK_ENDP to be called again as soon as RXBLK_ROOM is cleared.
*/
-static inline void cs_rx_room_blk(struct stconn *cs)
+static inline void sc_need_room(struct stconn *cs)
{
sc_ep_set(cs, SE_FL_RXBLK_ROOM);
}
*/
if (count != co_data(sc_oc(cs))) {
sc_oc(cs)->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
- cs_rx_room_rdy(cs_opposite(cs));
+ sc_have_room(cs_opposite(cs));
}
/* measure the call rate and check for anomalies when too high */
/* Check if the input buffer is available. */
if (!b_size(&res->buf)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
if (len) {
ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_UNUSED);
if (ret < len) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
}
* would want to return some info right after parsing.
*/
if (buffer_almost_full(sc_ib(cs))) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
break;
}
appctx->st0 = CLI_ST_PROMPT;
}
else
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
break;
case CLI_ST_CALLBACK: /* use custom pointer */
if (ic->pipe) {
/* stop reading */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
}
else {
/* (re)start reading */
if (!channel_is_empty(ic) || !channel_may_recv(ic)) {
/* stop reading, imposed by channel's policy or contents */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
}
else {
/* (re)start reading and update timeout. Note: we don't recompute the timeout
* update it if is was not yet set. The stream socket handler will already
* have updated it if there has been a completed I/O.
*/
- cs_rx_room_rdy(cs);
+ sc_have_room(cs);
}
if (sc_ep_test(cs, SE_FL_RXBLK_ANY))
ic->rex = TICK_ETERNITY;
* buffer or in the pipe.
*/
if (new_len < last_len)
- cs_rx_room_rdy(cs);
+ sc_have_room(cs);
}
if (!(ic->flags & CF_DONT_READ))
/* the pipe is full or we have read enough data that it
* could soon be full. Let's stop before needing to poll.
*/
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto done_recv;
}
*/
BUG_ON(c_empty(ic));
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
/* Add READ_PARTIAL because some data are pending but
* cannot be xferred to the channel
*/
* here to proceed.
*/
if (flags & CO_RFL_BUF_FLUSH)
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
break;
}
if (cs->state == SC_ST_CON)
cs->state = SC_ST_RDY;
- cs_rx_room_rdy(cs_opposite(cs));
+ sc_have_room(cs_opposite(cs));
}
if (sc_ep_test(cs, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
/* check if there is enough room to put message len and query id */
if (available_room < sizeof(slen) + sizeof(new_qid)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
ret = 0;
break;
}
/* check if it remains available room on output chan */
if (unlikely(!available_room)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
ret = 0;
break;
}
if (ds->tx_msg_offset) {
/* msg was not fully processed, we must be awake to drain pending data */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
ret = 0;
break;
}
if (ret <= 0) {
if ((ret == -3 && b_is_null(&sc_ic(cs)->buf)) || ret == -1) {
/* WT: is this still needed for the case ret==-3 ? */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 1; /* retry */
}
SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_IO;
* applet, and returns a yield.
*/
if (l < len) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_send_yield, TICK_ETERNITY, 0));
}
if (l < len) {
snd_yield:
htx_to_buf(htx, &res->buf);
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_send_yield, TICK_ETERNITY, 0));
}
struct channel *res = sc_ic(cs);
if (co_data(res)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_start_response_yield, TICK_ETERNITY, 0));
}
return MAY_LJMP(hlua_applet_http_send_response(L));
/* Check if the input buffer is available. */
if (!b_size(&res->buf)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
/* check that the output is not closed */
*/
if (htx_is_empty(res_htx) && (strm->txn->rsp.flags & (HTTP_MSGF_XFER_LEN|HTTP_MSGF_CNT_LEN)) == HTTP_MSGF_XFER_LEN) {
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
channel_add_input(res, 1);
case HLUA_E_AGAIN:
/* We want write. */
if (HLUA_IS_WAKERESWR(hlua))
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
/* Set the timeout. */
if (hlua->wake_time != TICK_ETERNITY)
task_schedule(hlua->task, hlua->wake_time);
if ((htx->flags & HTX_FL_EOM) ||
htx_get_tail_type(htx) > HTX_BLK_DATA ||
channel_htx_full(chn, htx, global.tune.maxrewrite) ||
- cs_rx_blocked_room(chn_prod(chn)))
+ sc_waiting_room(chn_prod(chn)))
goto end;
if (bytes) {
out:
/* we didn't clear every flags, we should come back to finish things */
if (ctx->flags)
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
free_trash_chunk(trash);
return 0;
more:
/* There was not enough data in the response channel */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
if (appctx->st0 == HTTPCLIENT_S_RES_END)
goto end;
if (ret <= 0) {
if (ret == -1) {
/* No more write possible */
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return -1;
}
appctx->st0 = PEER_SESS_ST_END;
/* Check if the input buffer is available. */
if (sc_ib(cs)->size == 0) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
cant_send_unlock:
HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &ctx->px->lock);
cant_send:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
return 1;
full:
- cs_rx_room_rdy(cs);
+ sc_have_room(cs);
return 0;
}
return 1;
full:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
}
full:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
return 1;
full:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
}
full:
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
full:
htx_reset(htx);
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
full:
htx_reset(htx);
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
return 0;
}
/* Check if the input buffer is available. */
if (!b_size(&res->buf)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
*/
if (htx_is_empty(res_htx)) {
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
- cs_rx_room_blk(cs);
+ sc_need_room(cs);
goto out;
}
channel_add_input(res, 1);
*/
if ((req->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR)) || channel_full(req, global.tune.maxrewrite) ||
- cs_rx_blocked_room(chn_prod(req)) ||
+ sc_waiting_room(chn_prod(req)) ||
!s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
partial = SMP_OPT_FINAL;
else
* - if one rule returns KO, then return KO
*/
if ((rep->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR)) || channel_full(rep, global.tune.maxrewrite) ||
- cs_rx_blocked_room(chn_prod(rep)) ||
+ sc_waiting_room(chn_prod(rep)) ||
!s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
partial = SMP_OPT_FINAL;
else