*/
static inline void cs_chk_rcv(struct stconn *cs)
{
+ struct channel *ic = sc_ic(cs);
+
if (sc_ep_test(cs, SE_FL_RXBLK_CONN) && cs_state_in(cs_opposite(cs)->state, SC_SB_RDY|SC_SB_EST|SC_SB_DIS|SC_SB_CLO))
cs_rx_conn_rdy(cs);
+ if (ic->flags & CF_SHUTR)
+ return;
+
if (cs_rx_blocked(cs) || !cs_rx_endp_ready(cs))
return;
cs_chk_rcv(cs);
cs_chk_rcv(cso);
- if (cs_rx_blocked(cs)) {
+ if (ic->flags & CF_SHUTR || cs_rx_blocked(cs)) {
ic->rex = TICK_ETERNITY;
}
else if ((ic->flags & (CF_SHUTR|CF_READ_PARTIAL)) == CF_READ_PARTIAL) {
sc_conn_read0(cs);
ret = 1;
}
- else if (!cs_rx_blocked(cs)) {
+ else if (!cs_rx_blocked(cs) && !(ic->flags & CF_SHUTR)) {
/* Subscribe to receive events if we're blocking on I/O */
conn->mux->subscribe(cs, SUB_RETRY_RECV, &cs->wait_event);
cs_rx_endp_done(cs);
* appctx but in the case the task is not in runqueue we may have to
* wakeup the appctx immediately.
*/
- if ((cs_rx_endp_ready(cs) && !cs_rx_blocked(cs)) || sc_is_send_allowed(cs))
+ if ((cs_rx_endp_ready(cs) && !cs_rx_blocked(cs) && !(ic->flags & CF_SHUTR)) ||
+ sc_is_send_allowed(cs))
appctx_wakeup(__sc_appctx(cs));
return 0;
}
* handled at the latest moment.
*/
if (sc_appctx(scf)) {
- if ((cs_rx_endp_ready(scf) && !cs_rx_blocked(scf)) || sc_is_send_allowed(scf))
+ if ((cs_rx_endp_ready(scf) && !cs_rx_blocked(scf) && !(req->flags & CF_SHUTR)) || sc_is_send_allowed(scf))
appctx_wakeup(__sc_appctx(scf));
}
if (sc_appctx(scb)) {
- if ((cs_rx_endp_ready(scb) && !cs_rx_blocked(scb)) || sc_is_send_allowed(scb))
+ if ((cs_rx_endp_ready(scb) && !cs_rx_blocked(scb) && !(res->flags & CF_SHUTR)) || sc_is_send_allowed(scb))
appctx_wakeup(__sc_appctx(scb));
}
}