]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
CLEANUP: session: use local variables to access channels / stream ints
authorWilly Tarreau <w@1wt.eu>
Fri, 28 Nov 2014 14:07:47 +0000 (15:07 +0100)
committerWilly Tarreau <w@1wt.eu>
Wed, 11 Mar 2015 19:41:47 +0000 (20:41 +0100)
In process_session, we had around 300 accesses to channels and stream-ints
from the session. Not only this inflates the code due to the large offsets
from the original pointer, but readability can be improved. Let's have 4
local variables for the channels and stream-ints.

src/session.c

index 053e4a2449a3169e6959361a160f8fcb4bf5052e..83acc4ed9d0e555ac9dab7b5ff33dadbc77c7e62 100644 (file)
@@ -1731,31 +1731,39 @@ struct task *process_session(struct task *t)
        unsigned int rq_prod_last, rq_cons_last;
        unsigned int rp_cons_last, rp_prod_last;
        unsigned int req_ana_back;
+       struct channel *req, *res;
+       struct stream_interface *si_f, *si_b;
+
+       req = &s->req;
+       res = &s->res;
+
+       si_f = &s->si[0];
+       si_b = &s->si[1];
 
        //DPRINTF(stderr, "%s:%d: cs=%d ss=%d(%d) rqf=0x%08x rpf=0x%08x\n", __FUNCTION__, __LINE__,
-       //        s->si[0].state, s->si[1].state, s->si[1].err_type, s->req.flags, s->res.flags);
+       //        si_f->state, si_b->state, si_b->err_type, req->flags, res->flags);
 
        /* this data may be no longer valid, clear it */
        memset(&s->txn.auth, 0, sizeof(s->txn.auth));
 
        /* This flag must explicitly be set every time */
-       s->req.flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
-       s->res.flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
+       req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
+       res->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
 
        /* Keep a copy of req/rep flags so that we can detect shutdowns */
-       rqf_last = s->req.flags & ~CF_MASK_ANALYSER;
-       rpf_last = s->res.flags & ~CF_MASK_ANALYSER;
+       rqf_last = req->flags & ~CF_MASK_ANALYSER;
+       rpf_last = res->flags & ~CF_MASK_ANALYSER;
 
        /* we don't want the stream interface functions to recursively wake us up */
-       s->si[0].flags |= SI_FL_DONT_WAKE;
-       s->si[1].flags |= SI_FL_DONT_WAKE;
+       si_f->flags |= SI_FL_DONT_WAKE;
+       si_b->flags |= SI_FL_DONT_WAKE;
 
        /* 1a: Check for low level timeouts if needed. We just set a flag on
         * stream interfaces when their timeouts have expired.
         */
        if (unlikely(t->state & TASK_WOKEN_TIMER)) {
-               stream_int_check_timeouts(&s->si[0]);
-               stream_int_check_timeouts(&s->si[1]);
+               stream_int_check_timeouts(si_f);
+               stream_int_check_timeouts(si_b);
 
                /* check channel timeouts, and close the corresponding stream interfaces
                 * for future reads or writes. Note: this will also concern upper layers
@@ -1763,30 +1771,30 @@ struct task *process_session(struct task *t)
                 * detect state changes when calling them.
                 */
 
-               channel_check_timeouts(&s->req);
+               channel_check_timeouts(req);
 
-               if (unlikely((s->req.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
-                       s->si[1].flags |= SI_FL_NOLINGER;
-                       si_shutw(&s->si[1]);
+               if (unlikely((req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+                       si_b->flags |= SI_FL_NOLINGER;
+                       si_shutw(si_b);
                }
 
-               if (unlikely((s->req.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
-                       if (s->si[0].flags & SI_FL_NOHALF)
-                               s->si[0].flags |= SI_FL_NOLINGER;
-                       si_shutr(&s->si[0]);
+               if (unlikely((req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+                       if (si_f->flags & SI_FL_NOHALF)
+                               si_f->flags |= SI_FL_NOLINGER;
+                       si_shutr(si_f);
                }
 
-               channel_check_timeouts(&s->res);
+               channel_check_timeouts(res);
 
-               if (unlikely((s->res.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
-                       s->si[0].flags |= SI_FL_NOLINGER;
-                       si_shutw(&s->si[0]);
+               if (unlikely((res->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+                       si_f->flags |= SI_FL_NOLINGER;
+                       si_shutw(si_f);
                }
 
-               if (unlikely((s->res.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
-                       if (s->si[1].flags & SI_FL_NOHALF)
-                               s->si[1].flags |= SI_FL_NOLINGER;
-                       si_shutr(&s->si[1]);
+               if (unlikely((res->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+                       if (si_b->flags & SI_FL_NOHALF)
+                               si_b->flags |= SI_FL_NOLINGER;
+                       si_shutr(si_b);
                }
 
                /* Once in a while we're woken up because the task expires. But
@@ -1794,10 +1802,10 @@ struct task *process_session(struct task *t)
                 * So let's not run a whole session processing if only an expiration
                 * timeout needs to be refreshed.
                 */
-               if (!((s->req.flags | s->res.flags) &
+               if (!((req->flags | res->flags) &
                      (CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
                       CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
-                   !((s->si[0].flags | s->si[1].flags) & (SI_FL_EXP|SI_FL_ERR)) &&
+                   !((si_f->flags | si_b->flags) & (SI_FL_EXP|SI_FL_ERR)) &&
                    ((t->state & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER))
                        goto update_exp_and_leave;
        }
@@ -1820,12 +1828,12 @@ struct task *process_session(struct task *t)
         * connection setup code must be able to deal with any type of abort.
         */
        srv = objt_server(s->target);
-       if (unlikely(s->si[0].flags & SI_FL_ERR)) {
-               if (s->si[0].state == SI_ST_EST || s->si[0].state == SI_ST_DIS) {
-                       si_shutr(&s->si[0]);
-                       si_shutw(&s->si[0]);
-                       stream_int_report_error(&s->si[0]);
-                       if (!(s->req.analysers) && !(s->res.analysers)) {
+       if (unlikely(si_f->flags & SI_FL_ERR)) {
+               if (si_f->state == SI_ST_EST || si_f->state == SI_ST_DIS) {
+                       si_shutr(si_f);
+                       si_shutw(si_f);
+                       stream_int_report_error(si_f);
+                       if (!(req->analysers) && !(res->analysers)) {
                                s->be->be_counters.cli_aborts++;
                                s->fe->fe_counters.cli_aborts++;
                                if (srv)
@@ -1838,15 +1846,15 @@ struct task *process_session(struct task *t)
                }
        }
 
-       if (unlikely(s->si[1].flags & SI_FL_ERR)) {
-               if (s->si[1].state == SI_ST_EST || s->si[1].state == SI_ST_DIS) {
-                       si_shutr(&s->si[1]);
-                       si_shutw(&s->si[1]);
-                       stream_int_report_error(&s->si[1]);
+       if (unlikely(si_b->flags & SI_FL_ERR)) {
+               if (si_b->state == SI_ST_EST || si_b->state == SI_ST_DIS) {
+                       si_shutr(si_b);
+                       si_shutw(si_b);
+                       stream_int_report_error(si_b);
                        s->be->be_counters.failed_resp++;
                        if (srv)
                                srv->counters.failed_resp++;
-                       if (!(s->req.analysers) && !(s->res.analysers)) {
+                       if (!(req->analysers) && !(res->analysers)) {
                                s->be->be_counters.srv_aborts++;
                                s->fe->fe_counters.srv_aborts++;
                                if (srv)
@@ -1860,14 +1868,14 @@ struct task *process_session(struct task *t)
                /* note: maybe we should process connection errors here ? */
        }
 
-       if (s->si[1].state == SI_ST_CON) {
+       if (si_b->state == SI_ST_CON) {
                /* we were trying to establish a connection on the server side,
                 * maybe it succeeded, maybe it failed, maybe we timed out, ...
                 */
-               if (unlikely(!sess_update_st_con_tcp(s, &s->si[1])))
-                       sess_update_st_cer(s, &s->si[1]);
-               else if (s->si[1].state == SI_ST_EST)
-                       sess_establish(s, &s->si[1]);
+               if (unlikely(!sess_update_st_con_tcp(s, si_b)))
+                       sess_update_st_cer(s, si_b);
+               else if (si_b->state == SI_ST_EST)
+                       sess_establish(s, si_b);
 
                /* state is now one of SI_ST_CON (still in progress), SI_ST_EST
                 * (established), SI_ST_DIS (abort), SI_ST_CLO (last error),
@@ -1875,10 +1883,10 @@ struct task *process_session(struct task *t)
                 */
        }
 
-       rq_prod_last = s->si[0].state;
-       rq_cons_last = s->si[1].state;
-       rp_cons_last = s->si[0].state;
-       rp_prod_last = s->si[1].state;
+       rq_prod_last = si_f->state;
+       rq_cons_last = si_b->state;
+       rp_cons_last = si_f->state;
+       rp_prod_last = si_b->state;
 
  resync_stream_interface:
        /* Check for connection closure */
@@ -1888,22 +1896,22 @@ struct task *process_session(struct task *t)
                now_ms, __FUNCTION__, __LINE__,
                t,
                s, s->flags,
-               s->req, s->rep,
-               s->req.rex, s->res.wex,
-               s->req.flags, s->res.flags,
-               s->req.buf->i, s->req.buf->o, s->res.buf->i, s->res.buf->o, s->si[0].state, s->si[1].state,
-               s->si[0].err_type, s->si[1].err_type,
-               s->si[1].conn_retries);
+               req, res,
+               req->rex, res->wex,
+               req->flags, res->flags,
+               req->buf->i, req->buf->o, res->buf->i, res->buf->o, si_f->state, si_b->state,
+               si_f->err_type, si_b->err_type,
+               si_b->conn_retries);
 
        /* nothing special to be done on client side */
-       if (unlikely(s->si[0].state == SI_ST_DIS))
-               s->si[0].state = SI_ST_CLO;
+       if (unlikely(si_f->state == SI_ST_DIS))
+               si_f->state = SI_ST_CLO;
 
        /* When a server-side connection is released, we have to count it and
         * check for pending connections on this server.
         */
-       if (unlikely(s->si[1].state == SI_ST_DIS)) {
-               s->si[1].state = SI_ST_CLO;
+       if (unlikely(si_b->state == SI_ST_DIS)) {
+               si_b->state = SI_ST_CLO;
                srv = objt_server(s->target);
                if (srv) {
                        if (s->flags & SN_CURR_SESS) {
@@ -1923,14 +1931,14 @@ struct task *process_session(struct task *t)
 
  resync_request:
        /* Analyse request */
-       if (((s->req.flags & ~rqf_last) & CF_MASK_ANALYSER) ||
-           ((s->req.flags ^ rqf_last) & CF_MASK_STATIC) ||
-           s->si[0].state != rq_prod_last ||
-           s->si[1].state != rq_cons_last ||
+       if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
+           ((req->flags ^ rqf_last) & CF_MASK_STATIC) ||
+           si_f->state != rq_prod_last ||
+           si_b->state != rq_cons_last ||
            s->task->state & TASK_WOKEN_MSG) {
-               unsigned int flags = s->req.flags;
+               unsigned int flags = req->flags;
 
-               if (s->si[0].state >= SI_ST_EST) {
+               if (si_f->state >= SI_ST_EST) {
                        int max_loops = global.tune.maxpollevents;
                        unsigned int ana_list;
                        unsigned int ana_back;
@@ -1941,12 +1949,12 @@ struct task *process_session(struct task *t)
                         * enabling them again when it disables itself, so
                         * that other analysers are called in similar conditions.
                         */
-                       channel_auto_read(&s->req);
-                       channel_auto_connect(&s->req);
-                       channel_auto_close(&s->req);
+                       channel_auto_read(req);
+                       channel_auto_connect(req);
+                       channel_auto_close(req);
 
                        /* We will call all analysers for which a bit is set in
-                        * s->req.analysers, following the bit order from LSB
+                        * req->analysers, following the bit order from LSB
                         * to MSB. The analysers must remove themselves from
                         * the list when not needed. Any analyser may return 0
                         * to break out of the loop, either because of missing
@@ -1979,97 +1987,97 @@ struct task *process_session(struct task *t)
                         * analyser and must immediately loop again.
                         */
 
-                       ana_list = ana_back = s->req.analysers;
+                       ana_list = ana_back = req->analysers;
                        while (ana_list && max_loops--) {
                                /* Warning! ensure that analysers are always placed in ascending order! */
 
                                if (ana_list & AN_REQ_INSPECT_FE) {
-                                       if (!tcp_inspect_request(s, &s->req, AN_REQ_INSPECT_FE))
+                                       if (!tcp_inspect_request(s, req, AN_REQ_INSPECT_FE))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_INSPECT_FE);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_INSPECT_FE);
                                }
 
                                if (ana_list & AN_REQ_WAIT_HTTP) {
-                                       if (!http_wait_for_request(s, &s->req, AN_REQ_WAIT_HTTP))
+                                       if (!http_wait_for_request(s, req, AN_REQ_WAIT_HTTP))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_WAIT_HTTP);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_WAIT_HTTP);
                                }
 
                                if (ana_list & AN_REQ_HTTP_PROCESS_FE) {
-                                       if (!http_process_req_common(s, &s->req, AN_REQ_HTTP_PROCESS_FE, s->fe))
+                                       if (!http_process_req_common(s, req, AN_REQ_HTTP_PROCESS_FE, s->fe))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE);
                                }
 
                                if (ana_list & AN_REQ_SWITCHING_RULES) {
-                                       if (!process_switching_rules(s, &s->req, AN_REQ_SWITCHING_RULES))
+                                       if (!process_switching_rules(s, req, AN_REQ_SWITCHING_RULES))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
                                }
 
                                if (ana_list & AN_REQ_INSPECT_BE) {
-                                       if (!tcp_inspect_request(s, &s->req, AN_REQ_INSPECT_BE))
+                                       if (!tcp_inspect_request(s, req, AN_REQ_INSPECT_BE))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_INSPECT_BE);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_INSPECT_BE);
                                }
 
                                if (ana_list & AN_REQ_HTTP_PROCESS_BE) {
-                                       if (!http_process_req_common(s, &s->req, AN_REQ_HTTP_PROCESS_BE, s->be))
+                                       if (!http_process_req_common(s, req, AN_REQ_HTTP_PROCESS_BE, s->be))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE);
                                }
 
                                if (ana_list & AN_REQ_HTTP_TARPIT) {
-                                       if (!http_process_tarpit(s, &s->req, AN_REQ_HTTP_TARPIT))
+                                       if (!http_process_tarpit(s, req, AN_REQ_HTTP_TARPIT))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
                                }
 
                                if (ana_list & AN_REQ_SRV_RULES) {
-                                       if (!process_server_rules(s, &s->req, AN_REQ_SRV_RULES))
+                                       if (!process_server_rules(s, req, AN_REQ_SRV_RULES))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_SRV_RULES);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_SRV_RULES);
                                }
 
                                if (ana_list & AN_REQ_HTTP_INNER) {
-                                       if (!http_process_request(s, &s->req, AN_REQ_HTTP_INNER))
+                                       if (!http_process_request(s, req, AN_REQ_HTTP_INNER))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_INNER);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_INNER);
                                }
 
                                if (ana_list & AN_REQ_HTTP_BODY) {
-                                       if (!http_wait_for_request_body(s, &s->req, AN_REQ_HTTP_BODY))
+                                       if (!http_wait_for_request_body(s, req, AN_REQ_HTTP_BODY))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_BODY);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_BODY);
                                }
 
                                if (ana_list & AN_REQ_PRST_RDP_COOKIE) {
-                                       if (!tcp_persist_rdp_cookie(s, &s->req, AN_REQ_PRST_RDP_COOKIE))
+                                       if (!tcp_persist_rdp_cookie(s, req, AN_REQ_PRST_RDP_COOKIE))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
                                }
 
                                if (ana_list & AN_REQ_STICKING_RULES) {
-                                       if (!process_sticking_rules(s, &s->req, AN_REQ_STICKING_RULES))
+                                       if (!process_sticking_rules(s, req, AN_REQ_STICKING_RULES))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_STICKING_RULES);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_STICKING_RULES);
                                }
 
                                if (ana_list & AN_REQ_HTTP_XFER_BODY) {
-                                       if (!http_request_forward_body(s, &s->req, AN_REQ_HTTP_XFER_BODY))
+                                       if (!http_request_forward_body(s, req, AN_REQ_HTTP_XFER_BODY))
                                                break;
-                                       UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
+                                       UPDATE_ANALYSERS(req->analysers, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
                                }
                                break;
                        }
                }
 
-               rq_prod_last = s->si[0].state;
-               rq_cons_last = s->si[1].state;
-               s->req.flags &= ~CF_WAKE_ONCE;
-               rqf_last = s->req.flags;
+               rq_prod_last = si_f->state;
+               rq_cons_last = si_b->state;
+               req->flags &= ~CF_WAKE_ONCE;
+               rqf_last = req->flags;
 
-               if ((s->req.flags ^ flags) & CF_MASK_STATIC)
+               if ((req->flags ^ flags) & CF_MASK_STATIC)
                        goto resync_request;
        }
 
@@ -2077,31 +2085,31 @@ struct task *process_session(struct task *t)
         * because some response analysers may indirectly enable new request
         * analysers (eg: HTTP keep-alive).
         */
-       req_ana_back = s->req.analysers;
+       req_ana_back = req->analysers;
 
  resync_response:
        /* Analyse response */
 
-       if (((s->res.flags & ~rpf_last) & CF_MASK_ANALYSER) ||
-                (s->res.flags ^ rpf_last) & CF_MASK_STATIC ||
-                s->si[0].state != rp_cons_last ||
-                s->si[1].state != rp_prod_last ||
+       if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
+                (res->flags ^ rpf_last) & CF_MASK_STATIC ||
+                si_f->state != rp_cons_last ||
+                si_b->state != rp_prod_last ||
                 s->task->state & TASK_WOKEN_MSG) {
-               unsigned int flags = s->res.flags;
+               unsigned int flags = res->flags;
 
-               if ((s->res.flags & CF_MASK_ANALYSER) &&
-                   (s->res.analysers & AN_REQ_ALL)) {
+               if ((res->flags & CF_MASK_ANALYSER) &&
+                   (res->analysers & AN_REQ_ALL)) {
                        /* Due to HTTP pipelining, the HTTP request analyser might be waiting
                         * for some free space in the response buffer, so we might need to call
                         * it when something changes in the response buffer, but still we pass
                         * it the request buffer. Note that the SI state might very well still
                         * be zero due to us returning a flow of redirects!
                         */
-                       s->res.analysers &= ~AN_REQ_ALL;
-                       s->req.flags |= CF_WAKE_ONCE;
+                       res->analysers &= ~AN_REQ_ALL;
+                       req->flags |= CF_WAKE_ONCE;
                }
 
-               if (s->si[1].state >= SI_ST_EST) {
+               if (si_b->state >= SI_ST_EST) {
                        int max_loops = global.tune.maxpollevents;
                        unsigned int ana_list;
                        unsigned int ana_back;
@@ -2112,11 +2120,11 @@ struct task *process_session(struct task *t)
                         * it disables itself, so that other analysers are called
                         * in similar conditions.
                         */
-                       channel_auto_read(&s->res);
-                       channel_auto_close(&s->res);
+                       channel_auto_read(res);
+                       channel_auto_close(res);
 
                        /* We will call all analysers for which a bit is set in
-                        * s->res.analysers, following the bit order from LSB
+                        * res->analysers, following the bit order from LSB
                         * to MSB. The analysers must remove themselves from
                         * the list when not needed. Any analyser may return 0
                         * to break out of the loop, either because of missing
@@ -2126,56 +2134,56 @@ struct task *process_session(struct task *t)
                         * are added in the middle.
                         */
 
-                       ana_list = ana_back = s->res.analysers;
+                       ana_list = ana_back = res->analysers;
                        while (ana_list && max_loops--) {
                                /* Warning! ensure that analysers are always placed in ascending order! */
 
                                if (ana_list & AN_RES_INSPECT) {
-                                       if (!tcp_inspect_response(s, &s->res, AN_RES_INSPECT))
+                                       if (!tcp_inspect_response(s, res, AN_RES_INSPECT))
                                                break;
-                                       UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_INSPECT);
+                                       UPDATE_ANALYSERS(res->analysers, ana_list, ana_back, AN_RES_INSPECT);
                                }
 
                                if (ana_list & AN_RES_WAIT_HTTP) {
-                                       if (!http_wait_for_response(s, &s->res, AN_RES_WAIT_HTTP))
+                                       if (!http_wait_for_response(s, res, AN_RES_WAIT_HTTP))
                                                break;
-                                       UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_WAIT_HTTP);
+                                       UPDATE_ANALYSERS(res->analysers, ana_list, ana_back, AN_RES_WAIT_HTTP);
                                }
 
                                if (ana_list & AN_RES_STORE_RULES) {
-                                       if (!process_store_rules(s, &s->res, AN_RES_STORE_RULES))
+                                       if (!process_store_rules(s, res, AN_RES_STORE_RULES))
                                                break;
-                                       UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_STORE_RULES);
+                                       UPDATE_ANALYSERS(res->analysers, ana_list, ana_back, AN_RES_STORE_RULES);
                                }
 
                                if (ana_list & AN_RES_HTTP_PROCESS_BE) {
-                                       if (!http_process_res_common(s, &s->res, AN_RES_HTTP_PROCESS_BE, s->be))
+                                       if (!http_process_res_common(s, res, AN_RES_HTTP_PROCESS_BE, s->be))
                                                break;
-                                       UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE);
+                                       UPDATE_ANALYSERS(res->analysers, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE);
                                }
 
                                if (ana_list & AN_RES_HTTP_XFER_BODY) {
-                                       if (!http_response_forward_body(s, &s->res, AN_RES_HTTP_XFER_BODY))
+                                       if (!http_response_forward_body(s, res, AN_RES_HTTP_XFER_BODY))
                                                break;
-                                       UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
+                                       UPDATE_ANALYSERS(res->analysers, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
                                }
                                break;
                        }
                }
 
-               rp_cons_last = s->si[0].state;
-               rp_prod_last = s->si[1].state;
-               rpf_last = s->res.flags;
+               rp_cons_last = si_f->state;
+               rp_prod_last = si_b->state;
+               rpf_last = res->flags;
 
-               if ((s->res.flags ^ flags) & CF_MASK_STATIC)
+               if ((res->flags ^ flags) & CF_MASK_STATIC)
                        goto resync_response;
        }
 
        /* maybe someone has added some request analysers, so we must check and loop */
-       if (s->req.analysers & ~req_ana_back)
+       if (req->analysers & ~req_ana_back)
                goto resync_request;
 
-       if ((s->req.flags & ~rqf_last) & CF_MASK_ANALYSER)
+       if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
                goto resync_request;
 
        /* FIXME: here we should call protocol handlers which rely on
@@ -2190,24 +2198,24 @@ struct task *process_session(struct task *t)
         */
        srv = objt_server(s->target);
        if (unlikely(!(s->flags & SN_ERR_MASK))) {
-               if (s->req.flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
+               if (req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
                        /* Report it if the client got an error or a read timeout expired */
-                       s->req.analysers = 0;
-                       if (s->req.flags & CF_READ_ERROR) {
+                       req->analysers = 0;
+                       if (req->flags & CF_READ_ERROR) {
                                s->be->be_counters.cli_aborts++;
                                s->fe->fe_counters.cli_aborts++;
                                if (srv)
                                        srv->counters.cli_aborts++;
                                s->flags |= SN_ERR_CLICL;
                        }
-                       else if (s->req.flags & CF_READ_TIMEOUT) {
+                       else if (req->flags & CF_READ_TIMEOUT) {
                                s->be->be_counters.cli_aborts++;
                                s->fe->fe_counters.cli_aborts++;
                                if (srv)
                                        srv->counters.cli_aborts++;
                                s->flags |= SN_ERR_CLITO;
                        }
-                       else if (s->req.flags & CF_WRITE_ERROR) {
+                       else if (req->flags & CF_WRITE_ERROR) {
                                s->be->be_counters.srv_aborts++;
                                s->fe->fe_counters.srv_aborts++;
                                if (srv)
@@ -2223,24 +2231,24 @@ struct task *process_session(struct task *t)
                        }
                        sess_set_term_flags(s);
                }
-               else if (s->res.flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
+               else if (res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
                        /* Report it if the server got an error or a read timeout expired */
-                       s->res.analysers = 0;
-                       if (s->res.flags & CF_READ_ERROR) {
+                       res->analysers = 0;
+                       if (res->flags & CF_READ_ERROR) {
                                s->be->be_counters.srv_aborts++;
                                s->fe->fe_counters.srv_aborts++;
                                if (srv)
                                        srv->counters.srv_aborts++;
                                s->flags |= SN_ERR_SRVCL;
                        }
-                       else if (s->res.flags & CF_READ_TIMEOUT) {
+                       else if (res->flags & CF_READ_TIMEOUT) {
                                s->be->be_counters.srv_aborts++;
                                s->fe->fe_counters.srv_aborts++;
                                if (srv)
                                        srv->counters.srv_aborts++;
                                s->flags |= SN_ERR_SRVTO;
                        }
-                       else if (s->res.flags & CF_WRITE_ERROR) {
+                       else if (res->flags & CF_WRITE_ERROR) {
                                s->be->be_counters.cli_aborts++;
                                s->fe->fe_counters.cli_aborts++;
                                if (srv)
@@ -2269,41 +2277,41 @@ struct task *process_session(struct task *t)
         * Note that we're checking CF_SHUTR_NOW as an indication of a possible
         * recent call to channel_abort().
         */
-       if (unlikely(!s->req.analysers &&
-           !(s->req.flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
-           (s->si[0].state >= SI_ST_EST) &&
-           (s->req.to_forward != CHN_INFINITE_FORWARD))) {
+       if (unlikely(!req->analysers &&
+           !(req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
+           (si_f->state >= SI_ST_EST) &&
+           (req->to_forward != CHN_INFINITE_FORWARD))) {
                /* This buffer is freewheeling, there's no analyser
                 * attached to it. If any data are left in, we'll permit them to
                 * move.
                 */
-               channel_auto_read(&s->req);
-               channel_auto_connect(&s->req);
-               channel_auto_close(&s->req);
-               buffer_flush(s->req.buf);
+               channel_auto_read(req);
+               channel_auto_connect(req);
+               channel_auto_close(req);
+               buffer_flush(req->buf);
 
                /* We'll let data flow between the producer (if still connected)
                 * to the consumer (which might possibly not be connected yet).
                 */
-               if (!(s->req.flags & (CF_SHUTR|CF_SHUTW_NOW)))
-                       channel_forward(&s->req, CHN_INFINITE_FORWARD);
+               if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+                       channel_forward(req, CHN_INFINITE_FORWARD);
        }
 
        /* check if it is wise to enable kernel splicing to forward request data */
-       if (!(s->req.flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
-           s->req.to_forward &&
+       if (!(req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+           req->to_forward &&
            (global.tune.options & GTUNE_USE_SPLICE) &&
-           (objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->rcv_pipe) &&
-           (objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->snd_pipe) &&
+           (objt_conn(si_f->end) && __objt_conn(si_f->end)->xprt && __objt_conn(si_f->end)->xprt->rcv_pipe) &&
+           (objt_conn(si_b->end) && __objt_conn(si_b->end)->xprt && __objt_conn(si_b->end)->xprt->snd_pipe) &&
            (pipes_used < global.maxpipes) &&
            (((s->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
             (((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
-             (s->req.flags & CF_STREAMER_FAST)))) {
-               s->req.flags |= CF_KERN_SPLICING;
+             (req->flags & CF_STREAMER_FAST)))) {
+               req->flags |= CF_KERN_SPLICING;
        }
 
        /* reflect what the L7 analysers have seen last */
-       rqf_last = s->req.flags;
+       rqf_last = req->flags;
 
        /*
         * Now forward all shutdown requests between both sides of the buffer
@@ -2314,40 +2322,40 @@ struct task *process_session(struct task *t)
         * once the server has begun to respond. If a half-closed timeout is set, we adjust
         * the other side's timeout as well.
         */
-       if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
+       if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
                     (CF_AUTO_CLOSE|CF_SHUTR))) {
-               channel_shutw_now(&s->req);
+               channel_shutw_now(req);
                if (tick_isset(s->fe->timeout.clientfin)) {
-                       s->res.wto = s->fe->timeout.clientfin;
-                       s->res.wex = tick_add(now_ms, s->res.wto);
+                       res->wto = s->fe->timeout.clientfin;
+                       res->wex = tick_add(now_ms, res->wto);
                }
        }
 
        /* shutdown(write) pending */
-       if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
-                    channel_is_empty(&s->req))) {
-               if (s->req.flags & CF_READ_ERROR)
-                       s->si[1].flags |= SI_FL_NOLINGER;
-               si_shutw(&s->si[1]);
+       if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+                    channel_is_empty(req))) {
+               if (req->flags & CF_READ_ERROR)
+                       si_b->flags |= SI_FL_NOLINGER;
+               si_shutw(si_b);
                if (tick_isset(s->be->timeout.serverfin)) {
-                       s->res.rto = s->be->timeout.serverfin;
-                       s->res.rex = tick_add(now_ms, s->res.rto);
+                       res->rto = s->be->timeout.serverfin;
+                       res->rex = tick_add(now_ms, res->rto);
                }
        }
 
        /* shutdown(write) done on server side, we must stop the client too */
-       if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
-                    !s->req.analysers))
-               channel_shutr_now(&s->req);
+       if (unlikely((req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
+                    !req->analysers))
+               channel_shutr_now(req);
 
        /* shutdown(read) pending */
-       if (unlikely((s->req.flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
-               if (s->si[0].flags & SI_FL_NOHALF)
-                       s->si[0].flags |= SI_FL_NOLINGER;
-               si_shutr(&s->si[0]);
+       if (unlikely((req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+               if (si_f->flags & SI_FL_NOHALF)
+                       si_f->flags |= SI_FL_NOLINGER;
+               si_shutr(si_f);
                if (tick_isset(s->fe->timeout.clientfin)) {
-                       s->res.wto = s->fe->timeout.clientfin;
-                       s->res.wex = tick_add(now_ms, s->res.wto);
+                       res->wto = s->fe->timeout.clientfin;
+                       res->wex = tick_add(now_ms, res->wto);
                }
        }
 
@@ -2356,21 +2364,21 @@ struct task *process_session(struct task *t)
         *  - there are data scheduled for emission in the buffer
         *  - the CF_AUTO_CONNECT flag is set (active connection)
         */
-       if (s->si[1].state == SI_ST_INI) {
-               if (!(s->req.flags & CF_SHUTW)) {
-                       if ((s->req.flags & CF_AUTO_CONNECT) || !channel_is_empty(&s->req)) {
+       if (si_b->state == SI_ST_INI) {
+               if (!(req->flags & CF_SHUTW)) {
+                       if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
                                /* If we have an appctx, there is no connect method, so we
                                 * immediately switch to the connected state, otherwise we
                                 * perform a connection request.
                                 */
-                               s->si[1].state = SI_ST_REQ; /* new connection requested */
-                               s->si[1].conn_retries = s->be->conn_retries;
+                               si_b->state = SI_ST_REQ; /* new connection requested */
+                               si_b->conn_retries = s->be->conn_retries;
                        }
                }
                else {
-                       s->si[1].state = SI_ST_CLO; /* shutw+ini = abort */
-                       channel_shutw_now(&s->req);        /* fix buffer flags upon abort */
-                       channel_shutr_now(&s->res);
+                       si_b->state = SI_ST_CLO; /* shutw+ini = abort */
+                       channel_shutw_now(req);        /* fix buffer flags upon abort */
+                       channel_shutr_now(res);
                }
        }
 
@@ -2378,26 +2386,26 @@ struct task *process_session(struct task *t)
        /* we may have a pending connection request, or a connection waiting
         * for completion.
         */
-       if (s->si[1].state >= SI_ST_REQ && s->si[1].state < SI_ST_CON) {
+       if (si_b->state >= SI_ST_REQ && si_b->state < SI_ST_CON) {
                do {
                        /* nb: step 1 might switch from QUE to ASS, but we first want
                         * to give a chance to step 2 to perform a redirect if needed.
                         */
-                       if (s->si[1].state != SI_ST_REQ)
-                               sess_update_stream_int(s, &s->si[1]);
-                       if (s->si[1].state == SI_ST_REQ)
-                               sess_prepare_conn_req(s, &s->si[1]);
+                       if (si_b->state != SI_ST_REQ)
+                               sess_update_stream_int(s, si_b);
+                       if (si_b->state == SI_ST_REQ)
+                               sess_prepare_conn_req(s, si_b);
 
                        /* applets directly go to the ESTABLISHED state. Similarly,
                         * servers experience the same fate when their connection
                         * is reused.
                         */
-                       if (unlikely(s->si[1].state == SI_ST_EST))
-                               sess_establish(s, &s->si[1]);
+                       if (unlikely(si_b->state == SI_ST_EST))
+                               sess_establish(s, si_b);
 
                        /* Now we can add the server name to a header (if requested) */
                        /* check for HTTP mode and proxy server_name_hdr_name != NULL */
-                       if ((s->si[1].state >= SI_ST_CON) &&
+                       if ((si_b->state >= SI_ST_CON) &&
                            (s->be->server_id_hdr_name != NULL) &&
                            (s->be->mode == PR_MODE_HTTP) &&
                            objt_server(s->target)) {
@@ -2405,17 +2413,17 @@ struct task *process_session(struct task *t)
                        }
 
                        srv = objt_server(s->target);
-                       if (s->si[1].state == SI_ST_ASS && srv && srv->rdr_len && (s->flags & SN_REDIRECTABLE))
-                               http_perform_server_redirect(s, &s->si[1]);
-               } while (s->si[1].state == SI_ST_ASS);
+                       if (si_b->state == SI_ST_ASS && srv && srv->rdr_len && (s->flags & SN_REDIRECTABLE))
+                               http_perform_server_redirect(s, si_b);
+               } while (si_b->state == SI_ST_ASS);
        }
 
        /* Benchmarks have shown that it's optimal to do a full resync now */
-       if (s->si[0].state == SI_ST_DIS || s->si[1].state == SI_ST_DIS)
+       if (si_f->state == SI_ST_DIS || si_b->state == SI_ST_DIS)
                goto resync_stream_interface;
 
        /* otherwise we want to check if we need to resync the req buffer or not */
-       if ((s->req.flags ^ rqf_last) & CF_MASK_STATIC)
+       if ((req->flags ^ rqf_last) & CF_MASK_STATIC)
                goto resync_request;
 
        /* perform output updates to the response buffer */
@@ -2425,63 +2433,63 @@ struct task *process_session(struct task *t)
         * Note that we're checking CF_SHUTR_NOW as an indication of a possible
         * recent call to channel_abort().
         */
-       if (unlikely(!s->res.analysers &&
-           !(s->res.flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
-           (s->si[1].state >= SI_ST_EST) &&
-           (s->res.to_forward != CHN_INFINITE_FORWARD))) {
+       if (unlikely(!res->analysers &&
+           !(res->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
+           (si_b->state >= SI_ST_EST) &&
+           (res->to_forward != CHN_INFINITE_FORWARD))) {
                /* This buffer is freewheeling, there's no analyser
                 * attached to it. If any data are left in, we'll permit them to
                 * move.
                 */
-               channel_auto_read(&s->res);
-               channel_auto_close(&s->res);
-               buffer_flush(s->res.buf);
+               channel_auto_read(res);
+               channel_auto_close(res);
+               buffer_flush(res->buf);
 
                /* We'll let data flow between the producer (if still connected)
                 * to the consumer.
                 */
-               if (!(s->res.flags & (CF_SHUTR|CF_SHUTW_NOW)))
-                       channel_forward(&s->res, CHN_INFINITE_FORWARD);
+               if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
+                       channel_forward(res, CHN_INFINITE_FORWARD);
 
                /* if we have no analyser anymore in any direction and have a
                 * tunnel timeout set, use it now. Note that we must respect
                 * the half-closed timeouts as well.
                 */
-               if (!s->req.analysers && s->be->timeout.tunnel) {
-                       s->req.rto = s->req.wto = s->res.rto = s->res.wto =
+               if (!req->analysers && s->be->timeout.tunnel) {
+                       req->rto = req->wto = res->rto = res->wto =
                                s->be->timeout.tunnel;
 
-                       if ((s->req.flags & CF_SHUTR) && tick_isset(s->fe->timeout.clientfin))
-                               s->res.wto = s->fe->timeout.clientfin;
-                       if ((s->req.flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
-                               s->res.rto = s->be->timeout.serverfin;
-                       if ((s->res.flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
-                               s->req.wto = s->be->timeout.serverfin;
-                       if ((s->res.flags & CF_SHUTW) && tick_isset(s->fe->timeout.clientfin))
-                               s->req.rto = s->fe->timeout.clientfin;
-
-                       s->req.rex = tick_add(now_ms, s->req.rto);
-                       s->req.wex = tick_add(now_ms, s->req.wto);
-                       s->res.rex = tick_add(now_ms, s->res.rto);
-                       s->res.wex = tick_add(now_ms, s->res.wto);
+                       if ((req->flags & CF_SHUTR) && tick_isset(s->fe->timeout.clientfin))
+                               res->wto = s->fe->timeout.clientfin;
+                       if ((req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
+                               res->rto = s->be->timeout.serverfin;
+                       if ((res->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
+                               req->wto = s->be->timeout.serverfin;
+                       if ((res->flags & CF_SHUTW) && tick_isset(s->fe->timeout.clientfin))
+                               req->rto = s->fe->timeout.clientfin;
+
+                       req->rex = tick_add(now_ms, req->rto);
+                       req->wex = tick_add(now_ms, req->wto);
+                       res->rex = tick_add(now_ms, res->rto);
+                       res->wex = tick_add(now_ms, res->wto);
                }
        }
 
        /* check if it is wise to enable kernel splicing to forward response data */
-       if (!(s->res.flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
-           s->res.to_forward &&
+       if (!(res->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+           res->to_forward &&
            (global.tune.options & GTUNE_USE_SPLICE) &&
-           (objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->snd_pipe) &&
-           (objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->rcv_pipe) &&
+           (objt_conn(si_f->end) && __objt_conn(si_f->end)->xprt && __objt_conn(si_f->end)->xprt->snd_pipe) &&
+           (objt_conn(si_b->end) && __objt_conn(si_b->end)->xprt && __objt_conn(si_b->end)->xprt->rcv_pipe) &&
            (pipes_used < global.maxpipes) &&
            (((s->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
             (((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
-             (s->res.flags & CF_STREAMER_FAST)))) {
-               s->res.flags |= CF_KERN_SPLICING;
+             (res->flags & CF_STREAMER_FAST)))) {
+               res->flags |= CF_KERN_SPLICING;
        }
 
        /* reflect what the L7 analysers have seen last */
-       rpf_last = s->res.flags;
+       rpf_last = res->flags;
 
        /*
         * Now forward all shutdown requests between both sides of the buffer
@@ -2492,53 +2500,53 @@ struct task *process_session(struct task *t)
         */
 
        /* first, let's check if the response buffer needs to shutdown(write) */
-       if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
+       if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
                     (CF_AUTO_CLOSE|CF_SHUTR))) {
-               channel_shutw_now(&s->res);
+               channel_shutw_now(res);
                if (tick_isset(s->be->timeout.serverfin)) {
-                       s->req.wto = s->be->timeout.serverfin;
-                       s->req.wex = tick_add(now_ms, s->req.wto);
+                       req->wto = s->be->timeout.serverfin;
+                       req->wex = tick_add(now_ms, req->wto);
                }
        }
 
        /* shutdown(write) pending */
-       if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
-                    channel_is_empty(&s->res))) {
-               si_shutw(&s->si[0]);
+       if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+                    channel_is_empty(res))) {
+               si_shutw(si_f);
                if (tick_isset(s->fe->timeout.clientfin)) {
-                       s->req.rto = s->fe->timeout.clientfin;
-                       s->req.rex = tick_add(now_ms, s->req.rto);
+                       req->rto = s->fe->timeout.clientfin;
+                       req->rex = tick_add(now_ms, req->rto);
                }
        }
 
        /* shutdown(write) done on the client side, we must stop the server too */
-       if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
-           !s->res.analysers)
-               channel_shutr_now(&s->res);
+       if (unlikely((res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
+           !res->analysers)
+               channel_shutr_now(res);
 
        /* shutdown(read) pending */
-       if (unlikely((s->res.flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
-               if (s->si[1].flags & SI_FL_NOHALF)
-                       s->si[1].flags |= SI_FL_NOLINGER;
-               si_shutr(&s->si[1]);
+       if (unlikely((res->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+               if (si_b->flags & SI_FL_NOHALF)
+                       si_b->flags |= SI_FL_NOLINGER;
+               si_shutr(si_b);
                if (tick_isset(s->be->timeout.serverfin)) {
-                       s->req.wto = s->be->timeout.serverfin;
-                       s->req.wex = tick_add(now_ms, s->req.wto);
+                       req->wto = s->be->timeout.serverfin;
+                       req->wex = tick_add(now_ms, req->wto);
                }
        }
 
-       if (s->si[0].state == SI_ST_DIS || s->si[1].state == SI_ST_DIS)
+       if (si_f->state == SI_ST_DIS || si_b->state == SI_ST_DIS)
                goto resync_stream_interface;
 
-       if (s->req.flags != rqf_last)
+       if (req->flags != rqf_last)
                goto resync_request;
 
-       if ((s->res.flags ^ rpf_last) & CF_MASK_STATIC)
+       if ((res->flags ^ rpf_last) & CF_MASK_STATIC)
                goto resync_response;
 
        /* we're interested in getting wakeups again */
-       s->si[0].flags &= ~SI_FL_DONT_WAKE;
-       s->si[1].flags &= ~SI_FL_DONT_WAKE;
+       si_f->flags &= ~SI_FL_DONT_WAKE;
+       si_b->flags &= ~SI_FL_DONT_WAKE;
 
        /* This is needed only when debugging is enabled, to indicate
         * client-side or server-side close. Please note that in the unlikely
@@ -2548,43 +2556,43 @@ struct task *process_session(struct task *t)
        if (unlikely((global.mode & MODE_DEBUG) &&
                     (!(global.mode & MODE_QUIET) ||
                      (global.mode & MODE_VERBOSE)))) {
-               if (s->si[1].state == SI_ST_CLO &&
-                   s->si[1].prev_state == SI_ST_EST) {
+               if (si_b->state == SI_ST_CLO &&
+                   si_b->prev_state == SI_ST_EST) {
                        chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
                                      s->uniq_id, s->be->id,
-                                     objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
-                                     objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
+                                     objt_conn(si_f->end) ? (unsigned short)objt_conn(si_f->end)->t.sock.fd : -1,
+                                     objt_conn(si_b->end) ? (unsigned short)objt_conn(si_b->end)->t.sock.fd : -1);
                        shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
                }
 
-               if (s->si[0].state == SI_ST_CLO &&
-                   s->si[0].prev_state == SI_ST_EST) {
+               if (si_f->state == SI_ST_CLO &&
+                   si_f->prev_state == SI_ST_EST) {
                        chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
                                      s->uniq_id, s->be->id,
-                                     objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
-                                     objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
+                                     objt_conn(si_f->end) ? (unsigned short)objt_conn(si_f->end)->t.sock.fd : -1,
+                                     objt_conn(si_b->end) ? (unsigned short)objt_conn(si_b->end)->t.sock.fd : -1);
                        shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
                }
        }
 
-       if (likely((s->si[0].state != SI_ST_CLO) ||
-                  (s->si[1].state > SI_ST_INI && s->si[1].state < SI_ST_CLO))) {
+       if (likely((si_f->state != SI_ST_CLO) ||
+                  (si_b->state > SI_ST_INI && si_b->state < SI_ST_CLO))) {
 
                if ((s->fe->options & PR_O_CONTSTATS) && (s->flags & SN_BE_ASSIGNED))
                        session_process_counters(s);
 
-               if (s->si[0].state == SI_ST_EST && obj_type(s->si[0].end) != OBJ_TYPE_APPCTX)
-                       si_update(&s->si[0]);
+               if (si_f->state == SI_ST_EST && obj_type(si_f->end) != OBJ_TYPE_APPCTX)
+                       si_update(si_f);
 
-               if (s->si[1].state == SI_ST_EST && obj_type(s->si[1].end) != OBJ_TYPE_APPCTX)
-                       si_update(&s->si[1]);
+               if (si_b->state == SI_ST_EST && obj_type(si_b->end) != OBJ_TYPE_APPCTX)
+                       si_update(si_b);
 
-               s->req.flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
-               s->res.flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
-               s->si[0].prev_state = s->si[0].state;
-               s->si[1].prev_state = s->si[1].state;
-               s->si[0].flags &= ~(SI_FL_ERR|SI_FL_EXP);
-               s->si[1].flags &= ~(SI_FL_ERR|SI_FL_EXP);
+               req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
+               res->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
+               si_f->prev_state = si_f->state;
+               si_b->prev_state = si_b->state;
+               si_f->flags &= ~(SI_FL_ERR|SI_FL_EXP);
+               si_b->flags &= ~(SI_FL_ERR|SI_FL_EXP);
 
                /* Trick: if a request is being waiting for the server to respond,
                 * and if we know the server can timeout, we don't want the timeout
@@ -2594,10 +2602,10 @@ struct task *process_session(struct task *t)
                 * request timeout is set and the server has not yet sent a response.
                 */
 
-               if ((s->res.flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
-                   (tick_isset(s->req.wex) || tick_isset(s->res.rex))) {
-                       s->req.flags |= CF_READ_NOEXP;
-                       s->req.rex = TICK_ETERNITY;
+               if ((res->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
+                   (tick_isset(req->wex) || tick_isset(res->rex))) {
+                       req->flags |= CF_READ_NOEXP;
+                       req->rex = TICK_ETERNITY;
                }
 
                /* When any of the stream interfaces is attached to an applet,
@@ -2609,7 +2617,7 @@ struct task *process_session(struct task *t)
                 * both functions are always called and that we wake up if at
                 * least one did something.
                 */
-               if ((si_applet_call(&s->si[1]) | si_applet_call(&s->si[0])) != 0) {
+               if ((si_applet_call(si_b) | si_applet_call(si_f)) != 0) {
                        if (task_in_rq(t)) {
                                t->expire = TICK_ETERNITY;
                                session_release_buffers(s);
@@ -2618,23 +2626,23 @@ struct task *process_session(struct task *t)
                }
 
        update_exp_and_leave:
-               t->expire = tick_first(tick_first(s->req.rex, s->req.wex),
-                                      tick_first(s->res.rex, s->res.wex));
-               if (s->req.analysers)
-                       t->expire = tick_first(t->expire, s->req.analyse_exp);
+               t->expire = tick_first(tick_first(req->rex, req->wex),
+                                      tick_first(res->rex, res->wex));
+               if (req->analysers)
+                       t->expire = tick_first(t->expire, req->analyse_exp);
 
-               if (s->si[0].exp)
-                       t->expire = tick_first(t->expire, s->si[0].exp);
+               if (si_f->exp)
+                       t->expire = tick_first(t->expire, si_f->exp);
 
-               if (s->si[1].exp)
-                       t->expire = tick_first(t->expire, s->si[1].exp);
+               if (si_b->exp)
+                       t->expire = tick_first(t->expire, si_b->exp);
 
 #ifdef DEBUG_FULL
                fprintf(stderr,
                        "[%u] queuing with exp=%u req->rex=%u req->wex=%u req->ana_exp=%u"
                        " rep->rex=%u rep->wex=%u, si[0].exp=%u, si[1].exp=%u, cs=%d, ss=%d\n",
-                       now_ms, t->expire, s->req.rex, s->req.wex, s->req.analyse_exp,
-                       s->res.rex, s->res.wex, s->si[0].exp, s->si[1].exp, s->si[0].state, s->si[1].state);
+                       now_ms, t->expire, req->rex, req->wex, req->analyse_exp,
+                       res->rex, res->wex, si_f->exp, si_b->exp, si_f->state, si_b->state);
 #endif
 
 #ifdef DEBUG_DEV
@@ -2670,8 +2678,8 @@ struct task *process_session(struct task *t)
                     (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
                chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
                              s->uniq_id, s->be->id,
-                             objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
-                             objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
+                             objt_conn(si_f->end) ? (unsigned short)objt_conn(si_f->end)->t.sock.fd : -1,
+                             objt_conn(si_b->end) ? (unsigned short)objt_conn(si_b->end)->t.sock.fd : -1);
                shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
        }
 
@@ -2702,7 +2710,7 @@ struct task *process_session(struct task *t)
        /* let's do a final log if we need it */
        if (!LIST_ISEMPTY(&s->fe->logformat) && s->logs.logwait &&
            !(s->flags & SN_MONITOR) &&
-           (!(s->fe->options & PR_O_NULLNOLOG) || s->req.total)) {
+           (!(s->fe->options & PR_O_NULLNOLOG) || req->total)) {
                s->do_log(s);
        }