* - read-only indicators reported by lower data levels :
* CF_STREAMER, CF_STREAMER_FAST
*
- * - write-once status flags reported by the stream connector layer :
- * CF_SHUTR, CF_SHUTW
- *
- * - persistent control flags managed only by application level :
- * CF_SHUT*_NOW, CF_*_ENA
- *
* The flags have been arranged for readability, so that the read and write
* bits have the same position in a byte (read being the lower byte and write
* the second one). All flag names are relative to the channel. For instance,
#define CF_READ_TIMEOUT 0x00000004 /* timeout while waiting for producer */
/* unused 0x00000008 */
-/* unused: 0x00000010 */
-#define CF_SHUTR 0x00000020 /* producer has already shut down */
-#define CF_SHUTR_NOW 0x00000040 /* the producer must shut down for reads ASAP */
-/* 0x00000080 unused */
+/* unused: 0x00000010 - 0x00000080 */
#define CF_WRITE_EVENT 0x00000100 /* a write event detected on consumer side */
/* unused: 0x00000200 */
/* unused 0x00000800 */
#define CF_WAKE_WRITE 0x00001000 /* wake the task up when there's write activity */
-#define CF_SHUTW 0x00002000 /* consumer has already shut down */
-#define CF_SHUTW_NOW 0x00004000 /* the consumer must shut down for writes ASAP */
+/* unused: 0x00002000 - 0x00004000 */
#define CF_AUTO_CLOSE 0x00008000 /* producer can forward shutdown to other side */
-/* When CF_SHUTR_NOW is set, it is strictly forbidden for the producer to alter
- * the buffer contents. When CF_SHUTW_NOW is set, the consumer is free to perform
- * a shutw() when it has consumed the last contents, otherwise the session processor
- * will do it anyway.
- *
- * The SHUT* flags work like this :
- *
- * SHUTR SHUTR_NOW meaning
- * 0 0 normal case, connection still open and data is being read
- * 0 1 closing : the producer cannot feed data anymore but can close
- * 1 0 closed: the producer has closed its input channel.
- * 1 1 impossible
- *
- * SHUTW SHUTW_NOW meaning
- * 0 0 normal case, connection still open and data is being written
- * 0 1 closing: the consumer can send last data and may then close
- * 1 0 closed: the consumer has closed its output channel.
- * 1 1 impossible
- *
- * The SHUTW_NOW flag should be set by the session processor when SHUTR and AUTO_CLOSE
- * are both set. And it may also be set by the producer when it detects SHUTR while
- * directly forwarding data to the consumer.
- *
- * The SHUTR_NOW flag is mostly used to force the producer to abort when an error is
- * detected on the consumer side.
- */
-
#define CF_STREAMER 0x00010000 /* the producer is identified as streaming data */
#define CF_STREAMER_FAST 0x00020000 /* the consumer seems to eat the stream very fast */
/* Masks which define input events for stream analysers */
#define CF_MASK_ANALYSER (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WAKE_ONCE)
-/* Mask for static flags which cause analysers to be woken up when they change */
-#define CF_MASK_STATIC (CF_SHUTR|CF_SHUTW|CF_SHUTR_NOW|CF_SHUTW_NOW)
-
/* This function is used to report flags in debugging tools. Please reflect
* below any single-bit flag addition above in the same order via the
* __APPEND_FLAG macro. The new end of the buffer is returned.
_(0);
/* flags */
_(CF_READ_EVENT, _(CF_READ_TIMEOUT,
- _(CF_SHUTR, _(CF_SHUTR_NOW, _(CF_WRITE_EVENT,
+ _(CF_WRITE_EVENT,
_(CF_WRITE_TIMEOUT,
- _(CF_WAKE_WRITE, _(CF_SHUTW, _(CF_SHUTW_NOW, _(CF_AUTO_CLOSE,
+ _(CF_WAKE_WRITE, _(CF_AUTO_CLOSE,
_(CF_STREAMER, _(CF_STREAMER_FAST, _(CF_WROTE_DATA,
_(CF_KERN_SPLICING,
_(CF_AUTO_CONNECT, _(CF_DONT_READ,
_(CF_WAKE_ONCE, _(CF_FLT_ANALYZE,
- _(CF_ISRESP)))))))))))))))))));
+ _(CF_ISRESP)))))))))))))));
/* epilogue */
_(~0U);
return buf;
/* Returns true if the channel's input is already closed */
static inline int channel_input_closed(struct channel *chn)
{
- return ((chn->flags & CF_SHUTR) != 0);
+ return ((chn_prod(chn)->flags & SC_FL_SHUTR) != 0);
}
/* Returns true if the channel's output is already closed */
static inline int channel_output_closed(struct channel *chn)
{
- return ((chn->flags & CF_SHUTW) != 0);
+ return ((chn_cons(chn)->flags & SC_FL_SHUTW) != 0);
}
/* Check channel timeouts, and set the corresponding flags. */
/* marks the channel as "shutdown" ASAP for reads */
static inline void channel_shutr_now(struct channel *chn)
{
- chn->flags |= CF_SHUTR_NOW;
+ chn_prod(chn)->flags |= SC_FL_SHUTR_NOW;
}
/* marks the channel as "shutdown" ASAP for writes */
static inline void channel_shutw_now(struct channel *chn)
{
- chn->flags |= CF_SHUTW_NOW;
+ chn_cons(chn)->flags |= SC_FL_SHUTW_NOW;
}
/* marks the channel as "shutdown" ASAP in both directions */
static inline void channel_abort(struct channel *chn)
{
- chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
+ chn_prod(chn)->flags |= SC_FL_SHUTR_NOW;
+ chn_cons(chn)->flags |= SC_FL_SHUTW_NOW;
chn->flags &= ~CF_AUTO_CONNECT;
}
static inline int co_getchr(struct channel *chn)
{
/* closed or empty + imminent close = -2; empty = -1 */
- if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUTW) || channel_is_empty(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
return -2;
return -1;
}
{
struct channel *ic = sc_ic(sc);
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return 0;
if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
{
struct channel *oc = sc_oc(sc);
- if (oc->flags & CF_SHUTW)
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return 0;
return !sc_ep_test(sc, SE_FL_WAIT_DATA | SE_FL_WONT_CONSUME);
static inline int sc_rcv_may_expire(const struct stconn *sc)
{
- if (sc_ic(sc)->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_EVENT))
+ if ((chn_prod(sc_ic(sc))->flags & SC_FL_SHUTR) ||
+ (sc_ic(sc)->flags & (CF_READ_TIMEOUT|CF_READ_EVENT)))
return 0;
if (sc->flags & (SC_FL_EOI|SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
return 0;
static inline int sc_snd_may_expire(const struct stconn *sc)
{
- if (sc_oc(sc)->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_EVENT))
+ if ((chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW) ||
+ (sc_oc(sc)->flags & (CF_WRITE_TIMEOUT|CF_WRITE_EVENT)))
return 0;
if (sc_ep_test(sc, SE_FL_WONT_CONSUME))
return 0;
/* stconn flags.
* Please also update the sc_show_flags() function below in case of changes.
+ *
+ * When SC_FL_SHUTR_NOW is set, it is strictly forbidden for the producer to alter
+ * the buffer contents. When SC_FL_SHUTW_NOW is set, the consumer is free to perform
+ * a shutw() when it has consumed the last contents, otherwise the session processor
+ * will do it anyway.
+ *
+ * The SHUT* flags work like this :
+ *
+ * SHUTR SHUTR_NOW meaning
+ * 0 0 normal case, connection still open and data is being read
+ * 0 1 closing : the producer cannot feed data anymore but can close
+ * 1 0 closed: the producer has closed its input channel.
+ * 1 1 impossible
+ *
+ * SHUTW SHUTW_NOW meaning
+ * 0 0 normal case, connection still open and data is being written
+ * 0 1 closing: the consumer can send last data and may then close
+ * 1 0 closed: the consumer has closed its output channel.
+ * 1 1 impossible
+ *
+ * The SHUTW_NOW flag should be set by the session processor when SHUTR and AUTO_CLOSE
+ * are both set. And it may also be set by the producer when it detects SHUTR while
+ * directly forwarding data to the consumer.
+ *
+ * The SHUTR_NOW flag is mostly used to force the producer to abort when an error is
+ * detected on the consumer side.
+ *
*/
enum sc_flags {
SC_FL_NONE = 0x00000000, /* Just for initialization purposes */
SC_FL_SND_ASAP = 0x00000800, /* Don't wait for sending. cleared when all data were sent */
SC_FL_SND_NEVERWAIT = 0x00001000, /* Never wait for sending (permanent) */
SC_FL_SND_EXP_MORE = 0x00001000, /* More data expected to be sent very soon. cleared when all data were sent */
+
+ SC_FL_SHUTR_NOW = 0x00002000, /* SC is shut down for reads */
+ SC_FL_SHUTW_NOW = 0x00004000, /* SC must shut down for reads ASAP */
+ SC_FL_SHUTR = 0x00008000, /* SC is shut down for writes */
+ SC_FL_SHUTW = 0x00010000, /* SC must shut down for writes ASAP */
};
/* This function is used to report flags in debugging tools. Please reflect
_(SC_FL_ISBACK, _(SC_FL_EOI, _(SC_FL_NOLINGER, _(SC_FL_NOHALF,
_(SC_FL_DONT_WAKE, _(SC_FL_INDEP_STR, _(SC_FL_WONT_READ,
_(SC_FL_NEED_BUFF, _(SC_FL_NEED_ROOM,
- _(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE)))))))))))));
+ _(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE,
+ _(SC_FL_SHUTR_NOW, _(SC_FL_SHUTW_NOW, _(SC_FL_SHUTR, _(SC_FL_SHUTW)))))))))))))))));
/* epilogue */
_(~0U);
return buf;
int max_lines;
int i, j, max;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
chunk_reset(&trash);
int thr, queue;
int i, max;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
/* It's not possible to scan queues in small chunks and yield in the
struct timeval up;
int thr;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
chunk_reset(&trash);
(b_size(sc_ib(sc)) && !b_data(sc_ib(sc)) && sc->flags & SC_FL_NEED_ROOM) || // asks for room in an empty buffer
(b_data(sc_ob(sc)) && sc_is_send_allowed(sc)) || // asks for data already present
(!b_data(sc_ib(sc)) && b_data(sc_ob(sc)) && // didn't return anything ...
- (sc_oc(sc)->flags & (CF_WRITE_EVENT|CF_SHUTW_NOW)) == CF_SHUTW_NOW))) { // ... and left data pending after a shut
+ (!(sc_oc(sc)->flags & CF_WRITE_EVENT) && (chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW_NOW))))) { // ... and left data pending after a shut
rate = update_freq_ctr(&app->call_rate, 1);
if (rate >= 100000 && app->call_rate.prev_ctr) // looped like this more than 100k times over last second
stream_dump_and_crash(&app->obj_type, read_freq_ctr(&app->call_rate));
static int back_may_abort_req(struct channel *req, struct stream *s)
{
return (sc_ep_test(s->scf, SE_FL_ERROR) ||
- ((req->flags & (CF_SHUTW_NOW|CF_SHUTW)) && /* empty and client aborted */
+ ((chn_cons(req)->flags & (SC_FL_SHUTW_NOW|SC_FL_SHUTW)) && /* empty and client aborted */
(channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE))));
}
DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
/* the client might want to abort */
- if ((rep->flags & CF_SHUTW) ||
- ((req->flags & CF_SHUTW_NOW) &&
+ if ((chn_cons(rep)->flags & SC_FL_SHUTW) ||
+ ((chn_cons(req)->flags & SC_FL_SHUTW_NOW) &&
(channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
sc->flags |= SC_FL_NOLINGER;
sc_shutw(sc);
*/
if (!(req->flags & CF_WROTE_DATA)) {
/* client abort ? */
- if ((rep->flags & CF_SHUTW) ||
- ((req->flags & CF_SHUTW_NOW) &&
+ if ((chn_cons(rep)->flags & SC_FL_SHUTW) ||
+ ((chn_cons(req)->flags & SC_FL_SHUTW_NOW) &&
(channel_is_empty(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
/* give up */
sc->flags |= SC_FL_NOLINGER;
max = len;
/* closed or empty + imminent close = -1; empty = 0 */
- if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUTW) || channel_is_empty(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
ret = -1;
goto out;
}
if (ret > 0 && ret < len &&
(ret < co_data(chn) || channel_may_recv(chn)) &&
!found &&
- !(chn->flags & (CF_SHUTW|CF_SHUTW_NOW)))
+ !(chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
ret = 0;
out:
if (max)
max = len;
/* closed or empty + imminent close = -1; empty = 0 */
- if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUTW) || channel_is_empty(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
ret = -1;
goto out;
}
if (ret > 0 && ret < len &&
(ret < co_data(chn) || channel_may_recv(chn)) &&
*(str-1) != sep &&
- !(chn->flags & (CF_SHUTW|CF_SHUTW_NOW)))
+ !(chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
ret = 0;
out:
if (max)
max = len;
/* closed or empty + imminent close = -1; empty = 0 */
- if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUTW) || channel_is_empty(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
ret = -1;
goto out;
}
if (ret > 0 && ret < len &&
(ret < co_data(chn) || channel_may_recv(chn)) &&
*(str-1) != '\n' &&
- !(chn->flags & (CF_SHUTW|CF_SHUTW_NOW)))
+ !(chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
ret = 0;
out:
if (max)
*/
int co_getchar(const struct channel *chn, char *c)
{
- if (chn->flags & CF_SHUTW)
+ if (chn_cons(chn)->flags & SC_FL_SHUTW)
return -1;
if (unlikely(co_data(chn) == 0)) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
return -1;
return 0;
}
*/
int co_getblk(const struct channel *chn, char *blk, int len, int offset)
{
- if (chn->flags & CF_SHUTW)
+ if (chn_cons(chn)->flags & SC_FL_SHUTW)
return -1;
if (len + offset > co_data(chn) || co_data(chn) == 0) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
return -1;
return 0;
}
int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
if (unlikely(co_data(chn) == 0)) {
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
return -1;
return 0;
}
}
}
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) {
/* If we have found no LF and the buffer is shut, then
* the resulting string is made of the concatenation of
* the pending blocks (1 or 2).
char **blk2, size_t *len2)
{
if (unlikely(ci_data(chn) == 0)) {
- if (chn->flags & CF_SHUTR)
+ if (chn_prod(chn)->flags & SC_FL_SHUTR)
return -1;
return 0;
}
}
}
- if (chn->flags & CF_SHUTW) {
+ if (chn_cons(chn)->flags & SC_FL_SHUTW) {
/* If we have found no LF and the buffer is shut, then
* the resulting string is made of the concatenation of
* the pending blocks (1 or 2).
struct stconn *sc = appctx_sc(appctx);
char **var = ctx->var;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
chunk_reset(&trash);
int fd = fdctx->fd;
int ret = 1;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
chunk_reset(&trash);
goto read_again;
missing_data:
- if (req->flags & CF_SHUTR) {
+ if (chn_prod(req)->flags & SC_FL_SHUTR) {
/* There is no more request or a only a partial one and we
* receive a close from the client, we can leave */
channel_shutw_now(&s->res);
struct proxy *be = s->be;
if (sc_ep_test(s->scb, SE_FL_ERR_PENDING|SE_FL_ERROR) || (rep->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) ||
- ((rep->flags & CF_SHUTW) && (rep->to_forward || co_data(rep)))) {
+ ((chn_cons(rep)->flags & SC_FL_SHUTW) && (rep->to_forward || co_data(rep)))) {
pcli_reply_and_close(s, "Can't connect to the target CLI!\n");
s->req.analysers &= ~AN_REQ_WAIT_CLI;
s->res.analysers &= ~AN_RES_WAIT_CLI;
return 0;
}
- if (rep->flags & CF_SHUTR) {
+ if (chn_prod(rep)->flags & SC_FL_SHUTR) {
/* stream cleanup */
pcli_write_prompt(s);
sockaddr_free(&s->scb->dst);
sc_set_state(s->scb, SC_ST_INI);
+ s->scb->flags &= ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW);
s->scb->flags &= SC_FL_ISBACK | SC_FL_DONT_WAKE; /* we're in the context of process_stream */
- s->req.flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_STREAMER|CF_STREAMER_FAST|CF_WROTE_DATA);
- s->res.flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_EVENT|CF_WROTE_DATA|CF_READ_EVENT);
+
+ s->req.flags &= ~(CF_AUTO_CONNECT|CF_STREAMER|CF_STREAMER_FAST|CF_WROTE_DATA);
+ s->res.flags &= ~(CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_EVENT|CF_WROTE_DATA|CF_READ_EVENT);
s->flags &= ~(SF_DIRECT|SF_ASSIGNED|SF_BE_ASSIGNED|SF_FORCE_PRST|SF_IGNORE_PRST);
s->flags &= ~(SF_CURR_SESS|SF_REDIRECTABLE|SF_SRV_REUSED);
s->flags &= ~(SF_ERR_MASK|SF_FINST_MASK|SF_REDISP);
s->store_count = 0;
s->uniq_id = global.req_count++;
+ s->scf->flags &= ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW);
s->scf->flags &= ~SC_FL_SND_NEVERWAIT;
s->scf->flags |= SC_FL_RCV_ONCE; /* one read is usually enough */
struct stconn *sc = appctx_sc(appctx);
int thr;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
if (appctx->st0)
int ret = 1;
int i, fd;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
chunk_reset(&trash);
const char *pfx = ctx->match;
int ret = 1;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
if (!ctx->width) {
* - the input in closed and no data is pending
* - There is a READ/WRITE timeout
*/
- if (chn->flags & CF_SHUTW) {
+ if (chn_cons(chn)->flags & SC_FL_SHUTW) {
ret = 1;
goto end;
}
- if (chn->flags & CF_SHUTR) {
+ if (chn_prod(chn)->flags & SC_FL_SHUTR) {
if (((s->flags & SF_HTX) && htx_is_empty(htxbuf(&chn->buf))) || c_empty(chn)) {
ret = 1;
goto end;
*/
ret = tokens;
if (tokens < conf->min_size) {
- ret = ((chn_prod(chn)->flags & SC_FL_EOI) || (chn->flags & CF_SHUTR))
+ ret = (chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_SHUTR))
? MIN(len, conf->min_size)
: conf->min_size;
/* A SHUTR at this stage means we are performing a "destructive"
* HTTP upgrade (TCP>H2). In this case, we can leave.
*/
- if (req->flags & CF_SHUTR) {
+ if (chn_prod(req)->flags & SC_FL_SHUTR) {
s->logs.logwait = 0;
s->logs.level = 0;
channel_abort(&s->req);
* there and that the timeout has not expired.
*/
channel_dont_connect(req);
- if (!(req->flags & CF_SHUTR) &&
+ if (!(chn_prod(req)->flags & SC_FL_SHUTR) &&
!tick_is_expired(req->analyse_exp, now_ms)) {
/* Be sure to drain all data from the request channel */
channel_htx_erase(req, htxbuf(&req->buf));
if (!(txn->flags & TX_CON_WANT_TUN))
channel_dont_close(req);
- if ((req->flags & CF_SHUTW) && co_data(req)) {
+ if ((chn_cons(req)->flags & SC_FL_SHUTW) && co_data(req)) {
/* request errors are most likely due to the server aborting the
* transfer. */
goto return_srv_abort;
* it can be abused to exhaust source ports. */
if (s->be->options & PR_O_ABRT_CLOSE) {
channel_auto_read(req);
- if ((req->flags & CF_SHUTR) && !(txn->flags & TX_CON_WANT_TUN))
+ if ((chn_prod(req)->flags & SC_FL_SHUTR) && !(txn->flags & TX_CON_WANT_TUN))
s->scb->flags |= SC_FL_NOLINGER;
channel_auto_close(req);
}
missing_data_or_waiting:
/* stop waiting for data if the input is closed before the end */
- if (msg->msg_state < HTTP_MSG_ENDING && req->flags & CF_SHUTR)
+ if (msg->msg_state < HTTP_MSG_ENDING && chn_prod(req)->flags & SC_FL_SHUTR)
goto return_cli_abort;
waiting:
/* waiting for the last bits to leave the buffer */
- if (req->flags & CF_SHUTW)
+ if (chn_cons(req)->flags & SC_FL_SHUTW)
goto return_srv_abort;
/* When TE: chunked is used, we need to get there again to parse remaining
req = &s->req;
res = &s->res;
+
/* Remove any write error from the request, and read error from the response */
- req->flags &= ~(CF_WRITE_TIMEOUT | CF_SHUTW | CF_SHUTW_NOW);
- res->flags &= ~(CF_READ_TIMEOUT | CF_SHUTR | CF_READ_EVENT | CF_SHUTR_NOW);
+ s->scf->flags &= ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW);
+ req->flags &= ~CF_WRITE_TIMEOUT;
+ res->flags &= ~(CF_READ_TIMEOUT | CF_READ_EVENT);
res->analysers &= AN_RES_FLT_END;
s->conn_err_type = STRM_ET_NONE;
s->flags &= ~(SF_CONN_EXP | SF_ERR_MASK | SF_FINST_MASK);
res->analyse_exp = TICK_ETERNITY;
res->total = 0;
+ s->scb->flags &= ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW);
if (sc_reset_endp(s->scb) < 0) {
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
}
/* 3: client abort with an abortonclose */
- else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
+ else if ((chn_prod(rep)->flags & SC_FL_SHUTR) &&
+ (chn_prod(&s->req)->flags & SC_FL_SHUTR) &&
+ (chn_cons(&s->req)->flags & SC_FL_SHUTW)) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
if (sess->listener && sess->listener->counters)
}
/* 4: close from server, capture the response if the server has started to respond */
- else if (rep->flags & CF_SHUTR) {
+ else if (chn_prod(rep)->flags & SC_FL_SHUTR) {
if ((txn->flags & TX_L7_RETRY) &&
(s->be->retry_type & PR_RE_DISCONNECTED)) {
if (co_data(rep) || do_l7_retry(s, s->scb) == 0) {
if (htx->data != co_data(res))
goto missing_data_or_waiting;
- if (!(msg->flags & HTTP_MSGF_XFER_LEN) && res->flags & CF_SHUTR) {
+ if (!(msg->flags & HTTP_MSGF_XFER_LEN) && (chn_prod(res)->flags & SC_FL_SHUTR)) {
msg->msg_state = HTTP_MSG_ENDING;
goto ending;
}
channel_dont_close(res);
- if ((res->flags & CF_SHUTW) && co_data(res)) {
+ if ((chn_cons(res)->flags & SC_FL_SHUTW) && co_data(res)) {
/* response errors are most likely due to the client aborting
* the transfer. */
goto return_cli_abort;
return 0;
missing_data_or_waiting:
- if (res->flags & CF_SHUTW)
+ if (chn_cons(res)->flags & SC_FL_SHUTW)
goto return_cli_abort;
/* stop waiting for data if the input is closed before the end. If the
* so we don't want to count this as a server abort. Otherwise it's a
* server abort.
*/
- if (msg->msg_state < HTTP_MSG_ENDING && res->flags & CF_SHUTR) {
- if ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
+ if (msg->msg_state < HTTP_MSG_ENDING && (chn_prod(res)->flags & SC_FL_SHUTR)) {
+ if ((chn_prod(&s->req)->flags & SC_FL_SHUTR) &&
+ (chn_cons(&s->req)->flags & SC_FL_SHUTW))
goto return_cli_abort;
/* If we have some pending data, we continue the processing */
if (htx_is_empty(htx))
/* Always call the action function if defined */
if (rule->action_ptr) {
if (sc_ep_test(s->scf, SE_FL_ERROR) ||
- ((s->req.flags & CF_SHUTR) &&
+ ((chn_prod(&s->req)->flags & SC_FL_SHUTR) &&
(px->options & PR_O_ABRT_CLOSE)))
act_opts |= ACT_OPT_FINAL;
/* Always call the action function if defined */
if (rule->action_ptr) {
if (sc_ep_test(s->scf, SE_FL_ERROR) ||
- ((s->req.flags & CF_SHUTR) &&
+ ((chn_prod(&s->req)->flags & SC_FL_SHUTR) &&
(px->options & PR_O_ABRT_CLOSE)))
act_opts |= ACT_OPT_FINAL;
}
/* we get here if we need to wait for more data */
- if (!(chn->flags & CF_SHUTR)) {
+ if (!(chn_prod(chn)->flags & SC_FL_SHUTR)) {
if (!tick_isset(chn->analyse_exp))
chn->analyse_exp = tick_add_ifset(now_ms, time);
ret = HTTP_RULE_RES_YIELD;
txn->rsp.msg_state != HTTP_MSG_CLOSED)
goto check_channel_flags;
- if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
+ if (!(chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))) {
channel_shutr_now(chn);
channel_shutw_now(chn);
}
check_channel_flags:
/* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) {
/* if we've just closed an output, let's switch */
txn->req.msg_state = HTTP_MSG_CLOSING;
goto http_msg_closing;
/* we're not expecting any new data to come for this
* transaction, so we can close it.
*/
- if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
+ if (!(chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))) {
channel_shutr_now(chn);
channel_shutw_now(chn);
}
check_channel_flags:
/* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
- if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) {
/* if we've just closed an output, let's switch */
txn->rsp.msg_state = HTTP_MSG_CLOSING;
goto http_msg_closing;
smp->flags = SMP_F_VOL_TEST;
if (!finished && (check || (chn && !channel_full(chn, global.tune.maxrewrite) &&
- !(chn_prod(chn)->flags & SC_FL_EOI) && !(chn->flags & CF_SHUTR))))
+ !(chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_SHUTR)))))
smp->flags |= SMP_F_MAY_CHANGE;
return 1;
struct stconn *sc = appctx_sc(appctx);
struct pat_ref_elt *elt;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW)) {
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW)) {
/* If we're forced to shut down, we might have to remove our
* reference to the last ref_elt being dumped.
*/
char *uptime = NULL;
char *reloadtxt = NULL;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
if (up < 0) /* must never be negative because of clock drift */
if (!cli_has_level(appctx, ACCESS_LVL_OPER))
return 1;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
-
env = getenv("HAPROXY_LOAD_SUCCESS");
if (!env)
return 1;
struct stconn *sc = appctx_sc(appctx);
extern const char *monthname[12];
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
chunk_reset(&trash);
if (ctx->thr >= global.nbthread)
goto done;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW)) {
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW)) {
/* If we're forced to shut down, we might have to remove our
* reference to the last stream being dumped.
*/
size_t len, cnt;
int ret;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
return 1;
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
/* we've drained everything and are configured to wait for more
* data or an event (keypress, close)
*/
- if (!sc_oc(sc)->output && !(sc_oc(sc)->flags & CF_SHUTW)) {
+ if (!sc_oc(sc)->output && !(chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW)) {
/* let's be woken up once new data arrive */
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
LIST_APPEND(&ring->waiters, &appctx->wait_entry);
struct ckch_store *old_ckchs, *new_ckchs = NULL;
struct ckch_inst *ckchi;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
while (1) {
struct ckch_inst_link *ckchi_link;
char *path;
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
/* The ctx was already validated by the ca-file/crl-file parsing
/* for each bind_conf which use the crt-list, a new ckch_inst must be
* created.
*/
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW))
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW))
goto end;
switch (ctx->state) {
if (appctx->st0 == STAT_HTTP_POST) {
if (stats_process_http_post(sc))
appctx->st0 = STAT_HTTP_LAST;
- else if (req->flags & CF_SHUTR)
+ else if (chn_prod(req)->flags & SC_FL_SHUTR)
appctx->st0 = STAT_HTTP_DONE;
}
/* Conditionally forward the close to the write side. It return 1 if it can be
* forwarded. It is the caller responsibility to forward the close to the write
- * side. Otherwise, 0 is returned. In this case, CF_SHUTW_NOW flag may be set on
- * the channel if we are only waiting for the outgoing data to be flushed.
+ * side. Otherwise, 0 is returned. In this case, SC_FL_SHUTW_NOW flag may be set on
+ * the consumer SC if we are only waiting for the outgoing data to be flushed.
*/
static inline int sc_cond_forward_shutw(struct stconn *sc)
{
/* The close must not be forwarded */
- if (!(sc_ic(sc)->flags & CF_SHUTR) || !(sc->flags & SC_FL_NOHALF))
+ if (!(chn_prod(sc_ic(sc))->flags & SC_FL_SHUTR) || !(sc->flags & SC_FL_NOHALF))
return 0;
if (!channel_is_empty(sc_ic(sc))) {
{
struct channel *ic = sc_ic(sc);
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return;
- ic->flags |= CF_SHUTR|CF_READ_EVENT;
+
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
+ ic->flags |= CF_READ_EVENT;
sc_ep_report_read_activity(sc);
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
return;
- if (sc_oc(sc)->flags & CF_SHUTW) {
+ if (chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW) {
sc->state = SC_ST_DIS;
if (sc->flags & SC_FL_ISBACK)
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
struct channel *ic = sc_ic(sc);
struct channel *oc = sc_oc(sc);
- oc->flags &= ~CF_SHUTW_NOW;
- if (oc->flags & CF_SHUTW)
+ chn_cons(oc)->flags &= ~SC_FL_SHUTW_NOW;
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return;
- oc->flags |= CF_SHUTW|CF_WRITE_EVENT;
+ chn_cons(oc)->flags |= SC_FL_SHUTW;
+ oc->flags |= CF_WRITE_EVENT;
sc_set_hcto(sc);
switch (sc->state) {
* no risk so we close both sides immediately.
*/
if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
- !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
+ !(chn_prod(ic)->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
return;
__fallthrough;
__fallthrough;
default:
sc->flags &= ~SC_FL_NOLINGER;
- ic->flags |= CF_SHUTR;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
if (sc->flags & SC_FL_ISBACK)
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
}
{
struct channel *oc = sc_oc(sc);
- if (unlikely(sc->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
+ if (unlikely(sc->state != SC_ST_EST || (chn_cons(oc)->flags & SC_FL_SHUTW)))
return;
if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || /* not waiting for data */
BUG_ON(!sc_conn(sc));
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return;
- ic->flags |= CF_SHUTR|CF_READ_EVENT;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
+ ic->flags |= CF_READ_EVENT;
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
return;
- if (sc_oc(sc)->flags & CF_SHUTW) {
+ if (chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW) {
sc_conn_shut(sc);
sc->state = SC_ST_DIS;
if (sc->flags & SC_FL_ISBACK)
BUG_ON(!sc_conn(sc));
- oc->flags &= ~CF_SHUTW_NOW;
- if (oc->flags & CF_SHUTW)
+ chn_cons(oc)->flags &= ~SC_FL_SHUTW_NOW;
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return;
- oc->flags |= CF_SHUTW|CF_WRITE_EVENT;
+ chn_cons(oc)->flags |= SC_FL_SHUTW;
+ oc->flags |= CF_WRITE_EVENT;
sc_set_hcto(sc);
switch (sc->state) {
*/
sc_conn_shutw(sc, CO_SHW_NORMAL);
- if (!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
+ if (!(chn_prod(ic)->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
return;
}
__fallthrough;
default:
sc->flags &= ~SC_FL_NOLINGER;
- ic->flags |= CF_SHUTR;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
if (sc->flags & SC_FL_ISBACK)
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
}
BUG_ON(!sc_conn(sc));
if (unlikely(!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST) ||
- (oc->flags & CF_SHUTW)))
+ (chn_cons(oc)->flags & SC_FL_SHUTW)))
return;
if (unlikely(channel_is_empty(oc))) /* called with nothing to send ! */
* ->o limit was reached. Maybe we just wrote the last
* chunk and need to close.
*/
- if (((oc->flags & (CF_SHUTW|CF_AUTO_CLOSE|CF_SHUTW_NOW)) ==
- (CF_AUTO_CLOSE|CF_SHUTW_NOW)) &&
+ if ((oc->flags & CF_AUTO_CLOSE) &&
+ ((chn_cons(oc)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW) &&
sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST)) {
sc_shutw(sc);
goto out_wakeup;
}
- if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == 0)
+ if ((chn_cons(oc)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == 0)
sc_ep_set(sc, SE_FL_WAIT_DATA);
}
else {
/* in case of special condition (error, shutdown, end of write...), we
* have to notify the task.
*/
- if (likely((oc->flags & CF_SHUTW) ||
+ if (likely((chn_cons(oc)->flags & SC_FL_SHUTW) ||
((oc->flags & CF_WRITE_EVENT) && sc->state < SC_ST_EST) ||
((oc->flags & CF_WAKE_WRITE) &&
((channel_is_empty(oc) && !oc->to_forward) ||
BUG_ON(!sc_appctx(sc));
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return;
- ic->flags |= CF_SHUTR|CF_READ_EVENT;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
+ ic->flags |= CF_READ_EVENT;
/* Note: on shutr, we don't call the applet */
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
return;
- if (sc_oc(sc)->flags & CF_SHUTW) {
+ if (chn_cons(sc_oc(sc))->flags & SC_FL_SHUTW) {
appctx_shut(__sc_appctx(sc));
sc->state = SC_ST_DIS;
if (sc->flags & SC_FL_ISBACK)
BUG_ON(!sc_appctx(sc));
- oc->flags &= ~CF_SHUTW_NOW;
- if (oc->flags & CF_SHUTW)
+ chn_cons(oc)->flags &= ~SC_FL_SHUTW_NOW;
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return;
- oc->flags |= CF_SHUTW|CF_WRITE_EVENT;
+ chn_cons(oc)->flags |= SC_FL_SHUTW;
+ oc->flags |= CF_WRITE_EVENT;
sc_set_hcto(sc);
/* on shutw we always wake the applet up */
* no risk so we close both sides immediately.
*/
if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
- !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
+ !(chn_prod(ic)->flags & SC_FL_SHUTR) &&
+ !(ic->flags & CF_DONT_READ))
return;
__fallthrough;
__fallthrough;
default:
sc->flags &= ~SC_FL_NOLINGER;
- ic->flags |= CF_SHUTR;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
if (sc->flags & SC_FL_ISBACK)
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
}
BUG_ON(!sc_appctx(sc));
- if (unlikely(sc->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
+ if (unlikely(sc->state != SC_ST_EST || (chn_cons(oc)->flags & SC_FL_SHUTW)))
return;
/* we only wake the applet up if it was waiting for some data and is ready to consume it */
{
struct channel *ic = sc_ic(sc);
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return;
/* Read not closed, update FD status and timeout for reads */
{
struct channel *oc = sc_oc(sc);
- if (oc->flags & CF_SHUTW)
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return;
/* Write not closed, update FD status and timeout for writes */
if (channel_is_empty(oc)) {
/* stop writing */
if (!sc_ep_test(sc, SE_FL_WAIT_DATA)) {
- if ((oc->flags & CF_SHUTW_NOW) == 0)
+ if ((chn_cons(oc)->flags & SC_FL_SHUTW_NOW) == 0)
sc_ep_set(sc, SE_FL_WAIT_DATA);
}
return;
if (channel_is_empty(oc)) {
struct connection *conn = sc_conn(sc);
- if (((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW) &&
+ if (((chn_cons(oc)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW) &&
(sc->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
sc_shutw(sc);
}
/* indicate that we may be waiting for data from the output channel or
- * we're about to close and can't expect more data if SHUTW_NOW is there.
+ * we're about to close and can't expect more data if SC_FL_SHUTW_NOW is there.
*/
- if (!(oc->flags & (CF_SHUTW|CF_SHUTW_NOW)))
+ if (!(chn_cons(oc)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
sc_ep_set(sc, SE_FL_WAIT_DATA);
- else if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW)
+ else if ((chn_cons(oc)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW)
sc_ep_clr(sc, SE_FL_WAIT_DATA);
if (oc->flags & CF_DONT_READ)
* data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
* read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
*/
- ((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (ic->flags & CF_SHUTR) || !ic->to_forward || sco->state != SC_ST_EST)) ||
+ ((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (chn_prod(ic)->flags & SC_FL_SHUTR) || !ic->to_forward || sco->state != SC_ST_EST)) ||
sc_ep_test(sc, SE_FL_ERROR) ||
/* changes on the consumption side */
sc_ep_test(sc, SE_FL_ERR_PENDING) ||
((oc->flags & CF_WRITE_EVENT) &&
((sc->state < SC_ST_EST) ||
- (oc->flags & CF_SHUTW) ||
+ (chn_cons(oc)->flags & SC_FL_SHUTW) ||
(((oc->flags & CF_WAKE_WRITE) ||
- !(oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW|CF_SHUTW))) &&
- (sco->state != SC_ST_EST ||
- (channel_is_empty(oc) && !oc->to_forward)))))) {
+ (!(oc->flags & CF_AUTO_CLOSE) &&
+ !(chn_cons(oc)->flags & (SC_FL_SHUTW_NOW|SC_FL_SHUTW)))) &&
+ (sco->state != SC_ST_EST ||
+ (channel_is_empty(oc) && !oc->to_forward)))))) {
task_wakeup(task, TASK_WOKEN_IO);
}
BUG_ON(!sc_conn(sc));
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return;
- ic->flags |= CF_SHUTR|CF_READ_EVENT;
+ chn_prod(ic)->flags |= SC_FL_SHUTR;
+ ic->flags |= CF_READ_EVENT;
sc_ep_report_read_activity(sc);
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
return;
- if (oc->flags & CF_SHUTW)
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
goto do_close;
if (sc_cond_forward_shutw(sc)) {
/* OK we completely close the socket here just as if we went through sc_shut[rw]() */
sc_conn_shut(sc);
- oc->flags &= ~CF_SHUTW_NOW;
- oc->flags |= CF_SHUTW;
+ chn_cons(oc)->flags &= ~SC_FL_SHUTW_NOW;
+ chn_cons(oc)->flags |= SC_FL_SHUTW;
sc->state = SC_ST_DIS;
if (sc->flags & SC_FL_ISBACK)
return 0;
/* maybe we were called immediately after an asynchronous shutr */
- if (ic->flags & CF_SHUTR)
+ if (chn_prod(ic)->flags & SC_FL_SHUTR)
return 1;
/* we must wait because the mux is not installed yet */
*/
while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
(!(conn->flags & CO_FL_HANDSHAKE) &&
- (!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(ic->flags & CF_SHUTR))) {
+ (!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(chn_prod(ic)->flags & SC_FL_SHUTR))) {
int cur_flags = flags;
/* Compute transient CO_RFL_* flags */
cur_read += ret;
/* if we're allowed to directly forward data, we must update ->o */
- if (ic->to_forward && !(ic->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
+ if (ic->to_forward && !(chn_cons(ic)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))) {
unsigned long fwd = ret;
if (ic->to_forward != CHN_INFINITE_FORWARD) {
if (fwd > ic->to_forward)
if (sc_ep_test(sc, SE_FL_ERROR))
ret = 1;
else if (!(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
- !(ic->flags & CF_SHUTR)) {
+ !(chn_prod(ic)->flags & SC_FL_SHUTR)) {
/* Subscribe to receive events if we're blocking on I/O */
conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
se_have_no_more_data(sc->sedesc);
return 0;
/* we might have been called just after an asynchronous shutw */
- if (oc->flags & CF_SHUTW)
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return 1;
/* we must wait because the mux is not installed yet */
((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
(sc->flags & SC_FL_SND_EXP_MORE) ||
(IS_HTX_STRM(s) &&
- (!(sco->flags & SC_FL_EOI) && !(oc->flags & CF_SHUTR) && htx_expect_more(htxbuf(&oc->buf)))))) ||
+ (!(sco->flags & SC_FL_EOI) && !(chn_prod(oc)->flags & SC_FL_SHUTR) && htx_expect_more(htxbuf(&oc->buf)))))) ||
((oc->flags & CF_ISRESP) &&
- ((oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW)) == (CF_AUTO_CLOSE|CF_SHUTW_NOW))))
+ (oc->flags & CF_AUTO_CLOSE) &&
+ (chn_cons(oc)->flags & SC_FL_SHUTW_NOW)))
send_flag |= CO_SFL_MSG_MORE;
if (oc->flags & CF_STREAMER)
oc->flags &= ~CF_WRITE_EVENT;
- if (oc->flags & CF_SHUTW)
+ if (chn_cons(oc)->flags & SC_FL_SHUTW)
return;
if (channel_is_empty(oc))
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
* care of it.
*/
- if (sc_ep_test(sc, SE_FL_EOS) && !(ic->flags & CF_SHUTR)) {
+ if (sc_ep_test(sc, SE_FL_EOS) && !(chn_prod(ic)->flags & SC_FL_SHUTR)) {
/* we received a shutdown */
if (ic->flags & CF_AUTO_CLOSE)
channel_shutw_now(ic);
/* If the applet wants to write and the channel is closed, it's a
* broken pipe and it must be reported.
*/
- if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (ic->flags & CF_SHUTR))
+ if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (chn_prod(ic)->flags & SC_FL_SHUTR))
sc_ep_set(sc, SE_FL_ERROR);
/* automatically mark the applet having data available if it reported
* - STATE_DONE : nothing left to dump, the buffer may contain some
* data though.
*/
-
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW)) {
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW)) {
/* in case of abort, remove any refcount we might have set on an entry */
if (ctx->state == STATE_DUMP) {
stksess_kill_if_expired(ctx->t, ctx->entry, 1);
* SC_ST_EST state. It must only be called after switching from SC_ST_CON (or
* SC_ST_INI or SC_ST_RDY) to SC_ST_EST, but only when a ->proto is defined.
* Note that it will switch the interface to SC_ST_DIS if we already have
- * the CF_SHUTR flag, it means we were able to forward the request, and
+ * the SC_FL_SHUTR flag, it means we were able to forward the request, and
* receive the response, before process_stream() had the opportunity to
* make the switch from SC_ST_CON to SC_ST_EST. When that happens, we want
* to go through back_establish() anyway, to make sure the analysers run.
}
/* If we managed to get the whole response, and we don't have anything
* left to send, or can't, switch to SC_ST_DIS now. */
- if (rep->flags & (CF_SHUTR | CF_SHUTW)) {
+ if ((s->scb->flags & SC_FL_SHUTR) || (s->scf->flags & SC_FL_SHUTW)) {
s->scb->state = SC_ST_DIS;
DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
}
sc_check_timeouts(s->scf);
channel_check_timeout(&s->req);
- if (unlikely((s->req.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+ if (unlikely(!(s->scb->flags & SC_FL_SHUTW) && (s->req.flags & CF_WRITE_TIMEOUT))) {
s->scb->flags |= SC_FL_NOLINGER;
sc_shutw(s->scb);
}
- if (unlikely((s->req.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+ if (unlikely(!(s->scf->flags & SC_FL_SHUTR) && (s->req.flags & CF_READ_TIMEOUT))) {
if (s->scf->flags & SC_FL_NOHALF)
s->scf->flags |= SC_FL_NOLINGER;
sc_shutr(s->scf);
sc_check_timeouts(s->scb);
channel_check_timeout(&s->res);
- if (unlikely((s->res.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+ if (unlikely(!(s->scf->flags & SC_FL_SHUTW) && (s->res.flags & CF_WRITE_TIMEOUT))) {
s->scf->flags |= SC_FL_NOLINGER;
sc_shutw(s->scf);
}
- if (unlikely((s->res.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+ if (unlikely(!(s->scb->flags & SC_FL_SHUTR) && (s->res.flags & CF_READ_TIMEOUT))) {
if (s->scb->flags & SC_FL_NOHALF)
s->scb->flags |= SC_FL_NOLINGER;
sc_shutr(s->scb);
- }
+ }
if (HAS_FILTERS(s))
flt_stream_check_timeouts(s);
struct server *srv;
struct stream *s = context;
struct session *sess = s->sess;
+ unsigned int scf_flags, scb_flags;
unsigned int rqf_last, rpf_last;
unsigned int rq_prod_last, rq_cons_last;
unsigned int rp_cons_last, rp_prod_last;
scf->flags |= SC_FL_DONT_WAKE;
scb->flags |= SC_FL_DONT_WAKE;
+ /* Keep a copy of SC flags */
+ scf_flags = scf->flags;
+ scb_flags = scb->flags;
+
/* update pending events */
s->pending_events |= (state & TASK_WOKEN_ANY);
* So let's not run a whole stream processing if only an expiration
* timeout needs to be refreshed.
*/
- if (!((req->flags | res->flags) &
- (CF_SHUTR|CF_READ_EVENT|CF_READ_TIMEOUT|CF_SHUTW|
- CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
+ if (!((scf->flags | scb->flags) & (SC_FL_SHUTR|SC_FL_SHUTW)) &&
+ !((req->flags | res->flags) & (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
!(s->flags & SF_CONN_EXP) &&
!((sc_ep_get(scf) | scb->flags) & SE_FL_ERROR) &&
((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
resync_request:
/* Analyse request */
if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
- ((req->flags ^ rqf_last) & CF_MASK_STATIC) ||
- (req->analysers && (req->flags & CF_SHUTW)) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_SHUTR|SC_FL_SHUTR_NOW)) ||
+ ((scb->flags ^ scb_flags) & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) ||
+ (req->analysers && (chn_cons(req)->flags & SC_FL_SHUTW)) ||
scf->state != rq_prod_last ||
scb->state != rq_cons_last ||
s->pending_events & TASK_WOKEN_MSG) {
- unsigned int flags = req->flags;
+ unsigned int scf_flags_ana = scf->flags;
+ unsigned int scb_flags_ana = scb->flags;
if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
int max_loops = global.tune.maxpollevents;
rq_cons_last = scb->state;
req->flags &= ~CF_WAKE_ONCE;
rqf_last = req->flags;
+ scf_flags = (scf_flags & ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW)) | (scf->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW));
+ scb_flags = (scb_flags & ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW)) | (scb->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW));
- if ((req->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
+ if (((scf->flags ^ scf_flags_ana) & SC_FL_SHUTR) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUTW))
goto resync_request;
}
/* Analyse response */
if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
- (res->flags ^ rpf_last) & CF_MASK_STATIC ||
- (res->analysers && (res->flags & CF_SHUTW)) ||
- scf->state != rp_cons_last ||
- scb->state != rp_prod_last ||
- s->pending_events & TASK_WOKEN_MSG) {
- unsigned int flags = res->flags;
+ ((scb->flags ^ scb_flags) & (SC_FL_SHUTR|SC_FL_SHUTR_NOW)) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) ||
+ (res->analysers && (chn_cons(res)->flags & SC_FL_SHUTW)) ||
+ scf->state != rp_cons_last ||
+ scb->state != rp_prod_last ||
+ s->pending_events & TASK_WOKEN_MSG) {
+ unsigned int scb_flags_ana = scb->flags;
+ unsigned int scf_flags_ana = scf->flags;
if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
int max_loops = global.tune.maxpollevents;
rp_prod_last = scb->state;
res->flags &= ~CF_WAKE_ONCE;
rpf_last = res->flags;
+ scb_flags = (scb_flags & ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW)) | (scb->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW));
+ scf_flags = (scf_flags & ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW)) | (scf->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW));
- if ((res->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
+ if (((scb->flags ^ scb_flags_ana) & SC_FL_SHUTR) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUTW))
goto resync_response;
}
/* If no one is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
- * Note that we're checking CF_SHUTR_NOW as an indication of a possible
+ * Note that we're checking SC_FL_SHUTR_NOW as an indication of a possible
* recent call to channel_abort().
*/
if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
- !(req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
- (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) &&
- (req->to_forward != CHN_INFINITE_FORWARD))) {
+ !(scf->flags & SC_FL_SHUTR_NOW) && !(scb->flags & SC_FL_SHUTW) &&
+ (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) &&
+ (req->to_forward != CHN_INFINITE_FORWARD))) {
/* This buffer is freewheeling, there's no analyser
* attached to it. If any data are left in, we'll permit them to
* move.
* to the consumer.
*/
co_set_data(req, htx->data);
- if ((global.tune.options & GTUNE_USE_FAST_FWD) && !(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & SC_FL_SHUTR) && !(scb->flags & SC_FL_SHUTW_NOW))
channel_htx_forward_forever(req, htx);
}
else {
* to the consumer (which might possibly not be connected yet).
*/
c_adv(req, ci_data(req));
- if ((global.tune.options & GTUNE_USE_FAST_FWD) && !(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & SC_FL_SHUTR) && !(scb->flags & SC_FL_SHUTW_NOW))
channel_forward_forever(req);
}
}
/* check if it is wise to enable kernel splicing to forward request data */
- if (!(req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+ if (!(req->flags & CF_KERN_SPLICING) &&
+ !(scf->flags & SC_FL_SHUTR) &&
req->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->rcv_pipe &&
/* reflect what the L7 analysers have seen last */
rqf_last = req->flags;
+ scf_flags = (scf_flags & ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW)) | (scf->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW));
+ scb_flags = (scb_flags & ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW)) | (scb->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW));
/* it's possible that an upper layer has requested a connection setup or abort.
* There are 2 situations where we decide to establish a new connection :
* - the CF_AUTO_CONNECT flag is set (active connection)
*/
if (scb->state == SC_ST_INI) {
- if (!(req->flags & CF_SHUTW)) {
+ if (!(scb->flags & SC_FL_SHUTW)) {
if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
/* If we have an appctx, there is no connect method, so we
* immediately switch to the connected state, otherwise we
* the other side's timeout as well. However this doesn't have effect during the
* connection setup unless the backend has abortonclose set.
*/
- if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
- (CF_AUTO_CLOSE|CF_SHUTR) &&
+ if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & SC_FL_SHUTR) &&
+ !(scb->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) &&
(scb->state != SC_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
channel_shutw_now(req);
}
/* shutdown(write) pending */
- if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+ if (unlikely((scb->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW &&
channel_is_empty(req))) {
if (sc_ep_test(s->scf, SE_FL_ERROR))
scb->flags |= SC_FL_NOLINGER;
}
/* shutdown(write) done on server side, we must stop the client too */
- if (unlikely((req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
- !req->analysers))
+ if (unlikely((scb->flags & SC_FL_SHUTW) && !(scf->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW))) &&
+ !req->analysers)
channel_shutr_now(req);
/* shutdown(read) pending */
- if (unlikely((req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+ if (unlikely((scf->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW)) == SC_FL_SHUTR_NOW)) {
if (scf->flags & SC_FL_NOHALF)
scf->flags |= SC_FL_NOLINGER;
sc_shutr(scf);
goto resync_stconns;
/* otherwise we want to check if we need to resync the req buffer or not */
- if ((req->flags ^ rqf_last) & (CF_SHUTR|CF_SHUTW))
+ if (((scf->flags ^ scf_flags) & SC_FL_SHUTR) || ((scb->flags ^ scb_flags) & SC_FL_SHUTW))
goto resync_request;
/* perform output updates to the response buffer */
/* If no one is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
- * Note that we're checking CF_SHUTR_NOW as an indication of a possible
+ * Note that we're checking SC_FL_SHUTR_NOW as an indication of a possible
* recent call to channel_abort().
*/
if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
- !(res->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
- sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
- (res->to_forward != CHN_INFINITE_FORWARD))) {
+ !(scf->flags & SC_FL_SHUTR_NOW) && !(scb->flags & SC_FL_SHUTW_NOW) &&
+ sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
+ (res->to_forward != CHN_INFINITE_FORWARD))) {
/* This buffer is freewheeling, there's no analyser
* attached to it. If any data are left in, we'll permit them to
* move.
* to the consumer.
*/
co_set_data(res, htx->data);
- if ((global.tune.options & GTUNE_USE_FAST_FWD) && !(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & SC_FL_SHUTR) && !(scb->flags & SC_FL_SHUTW_NOW))
channel_htx_forward_forever(res, htx);
}
else {
* to the consumer.
*/
c_adv(res, ci_data(res));
- if ((global.tune.options & GTUNE_USE_FAST_FWD) && !(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & SC_FL_SHUTR) && !(scb->flags & SC_FL_SHUTW_NOW))
channel_forward_forever(res);
}
if (!req->analysers && s->tunnel_timeout) {
scf->ioto = scb->ioto = s->tunnel_timeout;
- if (((req->flags & CF_SHUTR) || (res->flags & CF_SHUTW)) && tick_isset(sess->fe->timeout.clientfin))
+ if ((scf->flags & (SC_FL_SHUTR|SC_FL_SHUTW)) && tick_isset(sess->fe->timeout.clientfin))
scf->ioto = sess->fe->timeout.clientfin;
- if (((res->flags & CF_SHUTR) || (req->flags & CF_SHUTW)) && tick_isset(s->be->timeout.serverfin))
+ if ((scb->flags & (SC_FL_SHUTR|SC_FL_SHUTW)) && tick_isset(s->be->timeout.serverfin))
scb->ioto = s->be->timeout.serverfin;
}
}
/* check if it is wise to enable kernel splicing to forward response data */
- if (!(res->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+ if (!(res->flags & CF_KERN_SPLICING) &&
+ !(scb->flags & SC_FL_SHUTR) &&
res->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->snd_pipe &&
/* reflect what the L7 analysers have seen last */
rpf_last = res->flags;
+ scb_flags = (scb_flags & ~(SC_FL_SHUTR|SC_FL_SHUTR_NOW)) | (scb->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW));
+ scf_flags = (scf_flags & ~(SC_FL_SHUTW|SC_FL_SHUTW_NOW)) | (scf->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW));
/* Let's see if we can send the pending response now */
sc_conn_sync_send(scf);
*/
/* first, let's check if the response buffer needs to shutdown(write) */
- if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
- (CF_AUTO_CLOSE|CF_SHUTR))) {
+ if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & SC_FL_SHUTR) &&
+ !(scf->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))) {
channel_shutw_now(res);
}
/* shutdown(write) pending */
- if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+ if (unlikely((scf->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW &&
channel_is_empty(res))) {
sc_shutw(scf);
}
/* shutdown(write) done on the client side, we must stop the server too */
- if (unlikely((res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
+ if (unlikely((scf->flags & SC_FL_SHUTW) && !(scb->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW))) &&
!res->analysers)
channel_shutr_now(res);
/* shutdown(read) pending */
- if (unlikely((res->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+ if (unlikely((scb->flags & (SC_FL_SHUTR|SC_FL_SHUTR_NOW)) == SC_FL_SHUTR_NOW)) {
if (scb->flags & SC_FL_NOHALF)
scb->flags |= SC_FL_NOLINGER;
sc_shutr(scb);
if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
goto resync_request;
- if ((res->flags ^ rpf_last) & CF_MASK_STATIC)
+ if (((scb->flags ^ scb_flags) & (SC_FL_SHUTR|SC_FL_SHUTR_NOW)) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
goto resync_response;
if (((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER)
/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
void stream_shutdown(struct stream *stream, int why)
{
- if (stream->req.flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (stream->scb->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))
return;
channel_shutw_now(&stream->req);
goto done;
}
- if (unlikely(sc_ic(sc)->flags & CF_SHUTW)) {
+ if (unlikely(chn_cons(sc_ic(sc))->flags & SC_FL_SHUTW)) {
/* If we're forced to shut down, we might have to remove our
* reference to the last stream being dumped.
*/
* - if one rule returns KO, then return KO
*/
- if ((s->scf->flags & SC_FL_EOI) || (req->flags & CF_SHUTR) || channel_full(req, global.tune.maxrewrite) ||
+ if ((s->scf->flags & (SC_FL_EOI|SC_FL_SHUTR)) || channel_full(req, global.tune.maxrewrite) ||
sc_waiting_room(chn_prod(req)) ||
!s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
partial = SMP_OPT_FINAL;
* - if one rule returns OK, then return OK
* - if one rule returns KO, then return KO
*/
- if ((s->scb->flags & SC_FL_EOI) || (rep->flags & CF_SHUTR) || channel_full(rep, global.tune.maxrewrite) ||
+ if ((s->scb->flags & (SC_FL_EOI|SC_FL_SHUTR)) || channel_full(rep, global.tune.maxrewrite) ||
sc_waiting_room(chn_prod(rep)) ||
!s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
partial = SMP_OPT_FINAL;