unsigned int uniq_id; /* unique ID used for the traces */
enum obj_type *target; /* target to use for this session ; for mini-sess: incoming connection */
- struct channel *req; /* request buffer */
- struct channel *rep; /* response buffer */
+ struct channel req; /* request channel */
+ struct channel res; /* response channel */
struct proxy *fe; /* the proxy this session depends on for the client side */
struct proxy *be; /* the proxy this session depends on for the server side */
{
unsigned int hash = 0;
struct http_txn *txn = &s->txn;
- struct channel *req = s->req;
+ struct channel *req = &s->req;
struct http_msg *msg = &txn->req;
struct proxy *px = s->be;
unsigned int plen = px->url_param_len;
ctx.idx = 0;
/* if the message is chunked, we skip the chunk size, but use the value as len */
- http_find_header2(px->hh_name, plen, b_ptr(s->req->buf, -http_hdr_rewind(&txn->req)), &txn->hdr_idx, &ctx);
+ http_find_header2(px->hh_name, plen, b_ptr(s->req.buf, -http_hdr_rewind(&txn->req)), &txn->hdr_idx, &ctx);
/* if the header is not found or empty, let's fallback to round robin */
if (!ctx.idx || !ctx.vlen)
memset(&smp, 0, sizeof(smp));
- b_rew(s->req->buf, rewind = s->req->buf->o);
+ b_rew(s->req.buf, rewind = s->req.buf->o);
ret = fetch_rdp_cookie_name(s, &smp, px->hh_name, px->hh_len);
len = smp.data.str.len;
- b_adv(s->req->buf, rewind);
+ b_adv(s->req.buf, rewind);
if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
return NULL;
srv = NULL;
s->target = NULL;
- conn = objt_conn(s->req->cons->end);
+ conn = objt_conn(s->req.cons->end);
if (conn &&
(conn->flags & CO_FL_CONNECTED) &&
switch (s->be->lbprm.algo & BE_LB_PARM) {
case BE_LB_HASH_SRC:
- conn = objt_conn(s->req->prod->end);
+ conn = objt_conn(s->req.prod->end);
if (conn && conn->addr.from.ss_family == AF_INET) {
srv = get_server_sh(s->be,
(void *)&((struct sockaddr_in *)&conn->addr.from)->sin_addr,
if (s->txn.req.msg_state < HTTP_MSG_BODY)
break;
srv = get_server_uh(s->be,
- b_ptr(s->req->buf, -http_uri_rewind(&s->txn.req)),
+ b_ptr(s->req.buf, -http_uri_rewind(&s->txn.req)),
s->txn.req.sl.rq.u_l);
break;
break;
srv = get_server_ph(s->be,
- b_ptr(s->req->buf, -http_uri_rewind(&s->txn.req)),
+ b_ptr(s->req.buf, -http_uri_rewind(&s->txn.req)),
s->txn.req.sl.rq.u_l);
if (!srv && s->txn.meth == HTTP_METH_POST)
s->target = &s->be->obj_type;
}
else if ((s->be->options & PR_O_HTTP_PROXY) &&
- (conn = objt_conn(s->req->cons->end)) &&
+ (conn = objt_conn(s->req.cons->end)) &&
is_addr(&conn->addr.to)) {
/* in proxy mode, we need a valid destination address */
s->target = &s->be->obj_type;
*/
int assign_server_address(struct session *s)
{
- struct connection *cli_conn = objt_conn(s->req->prod->end);
- struct connection *srv_conn = objt_conn(s->req->cons->end);
+ struct connection *cli_conn = objt_conn(s->req.prod->end);
+ struct connection *srv_conn = objt_conn(s->req.cons->end);
#ifdef DEBUG_FULL
fprintf(stderr,"assign_server_address : s=%p\n",s);
/* If an explicit source binding is specified on the server and/or backend, and
* this source makes use of the transparent proxy, then it is extracted now and
* assigned to the session's pending connection. This function assumes that an
- * outgoing connection has already been assigned to s->req->cons->end.
+ * outgoing connection has already been assigned to s->req.cons->end.
*/
static void assign_tproxy_address(struct session *s)
{
struct server *srv = objt_server(s->target);
struct conn_src *src;
struct connection *cli_conn;
- struct connection *srv_conn = objt_conn(s->req->cons->end);
+ struct connection *srv_conn = objt_conn(s->req.cons->end);
if (srv && srv->conn_src.opts & CO_SRC_BIND)
src = &srv->conn_src;
case CO_SRC_TPROXY_CLI:
case CO_SRC_TPROXY_CIP:
/* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
- cli_conn = objt_conn(s->req->prod->end);
+ cli_conn = objt_conn(s->req.prod->end);
if (cli_conn)
srv_conn->addr.from = cli_conn->addr.from;
else
((struct sockaddr_in *)&srv_conn->addr.from)->sin_port = 0;
((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr = 0;
- b_rew(s->req->buf, rewind = http_hdr_rewind(&s->txn.req));
+ b_rew(s->req.buf, rewind = http_hdr_rewind(&s->txn.req));
if (http_get_hdr(&s->txn.req, src->bind_hdr_name, src->bind_hdr_len,
&s->txn.hdr_idx, src->bind_hdr_occ, NULL, &vptr, &vlen)) {
((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr =
htonl(inetaddr_host_lim(vptr, vptr + vlen));
}
- b_adv(s->req->buf, rewind);
+ b_adv(s->req.buf, rewind);
}
break;
default:
/*
* This function initiates a connection to the server assigned to this session
- * (s->target, s->req->cons->addr.to). It will assign a server if none
+ * (s->target, s->req.cons->addr.to). It will assign a server if none
* is assigned yet.
* It can return one of :
* - SN_ERR_NONE if everything's OK
* - SN_ERR_INTERNAL for any other purely internal errors
* Additionnally, in the case of SN_ERR_RESOURCE, an emergency log will be emitted.
* The server-facing stream interface is expected to hold a pre-allocated connection
- * in s->req->cons->conn.
+ * in s->req.cons->conn.
*/
int connect_server(struct session *s)
{
int reuse = 0;
int err;
- srv_conn = objt_conn(s->req->cons->end);
+ srv_conn = objt_conn(s->req.cons->end);
if (srv_conn)
reuse = s->target == srv_conn->target;
}
}
- srv_conn = si_alloc_conn(s->req->cons, reuse);
+ srv_conn = si_alloc_conn(s->req.cons, reuse);
if (!srv_conn)
return SN_ERR_RESOURCE;
else if (obj_type(s->target) == OBJ_TYPE_PROXY) {
/* proxies exclusively run on raw_sock right now */
conn_prepare(srv_conn, protocol_by_family(srv_conn->addr.to.ss_family), &raw_sock);
- if (!objt_conn(s->req->cons->end) || !objt_conn(s->req->cons->end)->ctrl)
+ if (!objt_conn(s->req.cons->end) || !objt_conn(s->req.cons->end)->ctrl)
return SN_ERR_INTERNAL;
}
else
srv_conn->send_proxy_ofs = 0;
if (objt_server(s->target) && objt_server(s->target)->pp_opts) {
srv_conn->send_proxy_ofs = 1; /* must compute size */
- cli_conn = objt_conn(s->req->prod->end);
+ cli_conn = objt_conn(s->req.prod->end);
if (cli_conn)
conn_get_to_addr(cli_conn);
}
- si_attach_conn(s->req->cons, srv_conn);
+ si_attach_conn(s->req.cons, srv_conn);
assign_tproxy_address(s);
}
else {
/* the connection is being reused, just re-attach it */
- si_attach_conn(s->req->cons, srv_conn);
+ si_attach_conn(s->req.cons, srv_conn);
s->flags |= SN_SRV_REUSED;
}
/* flag for logging source ip/port */
if (s->fe->options2 & PR_O2_SRC_ADDR)
- s->req->cons->flags |= SI_FL_SRC_ADDR;
+ s->req.cons->flags |= SI_FL_SRC_ADDR;
/* disable lingering */
if (s->be->options & PR_O_TCP_NOLING)
- s->req->cons->flags |= SI_FL_NOLINGER;
+ s->req.cons->flags |= SI_FL_NOLINGER;
- err = si_connect(s->req->cons);
+ err = si_connect(s->req.cons);
if (err != SN_ERR_NONE)
return err;
/* set connect timeout */
- s->req->cons->exp = tick_add_ifset(now_ms, s->be->timeout.connect);
+ s->req.cons->exp = tick_add_ifset(now_ms, s->be->timeout.connect);
srv = objt_server(s->target);
if (srv) {
goto redispatch;
}
- if (!s->req->cons->err_type) {
- s->req->cons->err_type = SI_ET_QUEUE_ERR;
+ if (!s->req.cons->err_type) {
+ s->req.cons->err_type = SI_ET_QUEUE_ERR;
}
srv->counters.failed_conns++;
case SRV_STATUS_NOSRV:
/* note: it is guaranteed that srv == NULL here */
- if (!s->req->cons->err_type) {
- s->req->cons->err_type = SI_ET_CONN_ERR;
+ if (!s->req.cons->err_type) {
+ s->req.cons->err_type = SI_ET_CONN_ERR;
}
s->be->be_counters.failed_conns++;
return 1;
case SRV_STATUS_QUEUED:
- s->req->cons->exp = tick_add_ifset(now_ms, s->be->timeout.queue);
- s->req->cons->state = SI_ST_QUE;
+ s->req.cons->exp = tick_add_ifset(now_ms, s->be->timeout.queue);
+ s->req.cons->state = SI_ST_QUE;
/* do nothing else and do not wake any other session up */
return 1;
case SRV_STATUS_INTERNAL:
default:
- if (!s->req->cons->err_type) {
- s->req->cons->err_type = SI_ET_CONN_OTHER;
+ if (!s->req.cons->err_type) {
+ s->req.cons->err_type = SI_ET_CONN_OTHER;
}
if (srv)
s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
- s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
+ s->req.flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
- s->req->rto = *s->listener->timeout;
- s->rep->wto = *s->listener->timeout;
+ s->req.rto = *s->listener->timeout;
+ s->res.wto = *s->listener->timeout;
}
return 1;
}
return 1;
}
- s->req->rto = s->rep->wto = 1 + MS_TO_TICKS(timeout*1000);
+ s->req.rto = s->res.wto = 1 + MS_TO_TICKS(timeout*1000);
return 1;
}
else {
chunk_appendf(&trash,
" req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
" an_exp=%s",
- sess->req,
- sess->req->flags, sess->req->analysers,
- sess->req->pipe ? sess->req->pipe->data : 0,
- sess->req->to_forward, sess->req->total,
- sess->req->analyse_exp ?
- human_time(TICKS_TO_MS(sess->req->analyse_exp - now_ms),
+ &sess->req,
+ sess->req.flags, sess->req.analysers,
+ sess->req.pipe ? sess->req.pipe->data : 0,
+ sess->req.to_forward, sess->req.total,
+ sess->req.analyse_exp ?
+ human_time(TICKS_TO_MS(sess->req.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" rex=%s",
- sess->req->rex ?
- human_time(TICKS_TO_MS(sess->req->rex - now_ms),
+ sess->req.rex ?
+ human_time(TICKS_TO_MS(sess->req.rex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" wex=%s\n"
" buf=%p data=%p o=%d p=%d req.next=%d i=%d size=%d\n",
- sess->req->wex ?
- human_time(TICKS_TO_MS(sess->req->wex - now_ms),
+ sess->req.wex ?
+ human_time(TICKS_TO_MS(sess->req.wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
- sess->req->buf,
- sess->req->buf->data, sess->req->buf->o,
- (int)(sess->req->buf->p - sess->req->buf->data),
- sess->txn.req.next, sess->req->buf->i,
- sess->req->buf->size);
+ sess->req.buf,
+ sess->req.buf->data, sess->req.buf->o,
+ (int)(sess->req.buf->p - sess->req.buf->data),
+ sess->txn.req.next, sess->req.buf->i,
+ sess->req.buf->size);
chunk_appendf(&trash,
" res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
" an_exp=%s",
- sess->rep,
- sess->rep->flags, sess->rep->analysers,
- sess->rep->pipe ? sess->rep->pipe->data : 0,
- sess->rep->to_forward, sess->rep->total,
- sess->rep->analyse_exp ?
- human_time(TICKS_TO_MS(sess->rep->analyse_exp - now_ms),
+ &sess->res,
+ sess->res.flags, sess->res.analysers,
+ sess->res.pipe ? sess->res.pipe->data : 0,
+ sess->res.to_forward, sess->res.total,
+ sess->res.analyse_exp ?
+ human_time(TICKS_TO_MS(sess->res.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" rex=%s",
- sess->rep->rex ?
- human_time(TICKS_TO_MS(sess->rep->rex - now_ms),
+ sess->res.rex ?
+ human_time(TICKS_TO_MS(sess->res.rex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" wex=%s\n"
" buf=%p data=%p o=%d p=%d rsp.next=%d i=%d size=%d\n",
- sess->rep->wex ?
- human_time(TICKS_TO_MS(sess->rep->wex - now_ms),
+ sess->res.wex ?
+ human_time(TICKS_TO_MS(sess->res.wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
- sess->rep->buf,
- sess->rep->buf->data, sess->rep->buf->o,
- (int)(sess->rep->buf->p - sess->rep->buf->data),
- sess->txn.rsp.next, sess->rep->buf->i,
- sess->rep->buf->size);
+ sess->res.buf,
+ sess->res.buf->data, sess->res.buf->o,
+ (int)(sess->res.buf->p - sess->res.buf->data),
+ sess->txn.rsp.next, sess->res.buf->i,
+ sess->res.buf->size);
if (bi_putchk(si->ib, &trash) == -1)
return 0;
chunk_appendf(&trash,
" rq[f=%06xh,i=%d,an=%02xh,rx=%s",
- curr_sess->req->flags,
- curr_sess->req->buf->i,
- curr_sess->req->analysers,
- curr_sess->req->rex ?
- human_time(TICKS_TO_MS(curr_sess->req->rex - now_ms),
+ curr_sess->req.flags,
+ curr_sess->req.buf->i,
+ curr_sess->req.analysers,
+ curr_sess->req.rex ?
+ human_time(TICKS_TO_MS(curr_sess->req.rex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",wx=%s",
- curr_sess->req->wex ?
- human_time(TICKS_TO_MS(curr_sess->req->wex - now_ms),
+ curr_sess->req.wex ?
+ human_time(TICKS_TO_MS(curr_sess->req.wex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",ax=%s]",
- curr_sess->req->analyse_exp ?
- human_time(TICKS_TO_MS(curr_sess->req->analyse_exp - now_ms),
+ curr_sess->req.analyse_exp ?
+ human_time(TICKS_TO_MS(curr_sess->req.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
" rp[f=%06xh,i=%d,an=%02xh,rx=%s",
- curr_sess->rep->flags,
- curr_sess->rep->buf->i,
- curr_sess->rep->analysers,
- curr_sess->rep->rex ?
- human_time(TICKS_TO_MS(curr_sess->rep->rex - now_ms),
+ curr_sess->res.flags,
+ curr_sess->res.buf->i,
+ curr_sess->res.analysers,
+ curr_sess->res.rex ?
+ human_time(TICKS_TO_MS(curr_sess->res.rex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",wx=%s",
- curr_sess->rep->wex ?
- human_time(TICKS_TO_MS(curr_sess->rep->wex - now_ms),
+ curr_sess->res.wex ?
+ human_time(TICKS_TO_MS(curr_sess->res.wex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",ax=%s]",
- curr_sess->rep->analyse_exp ?
- human_time(TICKS_TO_MS(curr_sess->rep->analyse_exp - now_ms),
+ curr_sess->res.analyse_exp ?
+ human_time(TICKS_TO_MS(curr_sess->res.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "");
conn = objt_conn(curr_sess->si[0].end);
}
if (s->fe->mode == PR_MODE_HTTP)
- s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
+ s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* note: this should not happen anymore since there's always at least the switching rules */
- if (!s->req->analysers) {
- channel_auto_connect(s->req); /* don't wait to establish connection */
- channel_auto_close(s->req); /* let the producer forward close requests */
+ if (!s->req.analysers) {
+ channel_auto_connect(&s->req); /* don't wait to establish connection */
+ channel_auto_close(&s->req); /* let the producer forward close requests */
}
- s->req->rto = s->fe->timeout.client;
- s->rep->wto = s->fe->timeout.client;
+ s->req.rto = s->fe->timeout.client;
+ s->res.wto = s->fe->timeout.client;
/* everything's OK, let's go on */
return 1;
sent = MAY_LJMP(luaL_checkinteger(L, 3));
/* Check for connection close. */
- if (!socket->s || channel_output_closed(socket->s->req)) {
+ if (!socket->s || channel_output_closed(&socket->s->req)) {
lua_pushinteger(L, -1);
return 1;
}
/* Check if the buffer is avalaible because HAProxy doesn't allocate
* the request buffer if its not required.
*/
- if (socket->s->req->buf->size == 0) {
- if (!session_alloc_recv_buffer(socket->s, &socket->s->req->buf)) {
- socket->s->req->prod->flags |= SI_FL_WAIT_ROOM;
+ if (socket->s->req.buf->size == 0) {
+ if (!session_alloc_recv_buffer(socket->s, &socket->s->req.buf)) {
+ socket->s->req.prod->flags |= SI_FL_WAIT_ROOM;
goto hlua_socket_write_yield_return;
}
}
struct appctx *appctx;
/* Check for connection close. */
- if (!hlua || !socket->s || channel_output_closed(socket->s->req)) {
+ if (!hlua || !socket->s || channel_output_closed(&socket->s->req)) {
lua_pushnil(L);
lua_pushstring(L, "Can't connect");
return 2;
ip = MAY_LJMP(luaL_checkstring(L, 2));
port = MAY_LJMP(luaL_checkinteger(L, 3));
- conn = si_alloc_conn(socket->s->req->cons, 0);
+ conn = si_alloc_conn(socket->s->req.cons, 0);
if (!conn)
WILL_LJMP(luaL_error(L, "connect: internal error"));
socket = MAY_LJMP(hlua_checksocket(L, 1));
tmout = MAY_LJMP(luaL_checkinteger(L, 2)) * 1000;
- socket->s->req->rto = tmout;
- socket->s->req->wto = tmout;
- socket->s->rep->rto = tmout;
- socket->s->rep->wto = tmout;
+ socket->s->req.rto = tmout;
+ socket->s->req.wto = tmout;
+ socket->s->res.rto = tmout;
+ socket->s->res.wto = tmout;
return 0;
}
goto out_free_session;
}
- socket->s->req = pool_alloc2(pool2_channel);
- if (!socket->s->req) {
- hlua_pusherror(L, "socket: out of memory");
- goto out_fail_req;
- }
-
- socket->s->req->buf = pool_alloc2(pool2_buffer);
- if (!socket->s->req->buf) {
+ socket->s->req.buf = pool_alloc2(pool2_buffer);
+ if (!socket->s->req.buf) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_req_buf;
}
- socket->s->rep = pool_alloc2(pool2_channel);
- if (!socket->s->rep) {
- hlua_pusherror(L, "socket: out of memory");
- goto out_fail_rep;
- }
-
- socket->s->rep->buf = pool_alloc2(pool2_buffer);
- if (!socket->s->rep->buf) {
+ socket->s->res.buf = pool_alloc2(pool2_buffer);
+ if (!socket->s->res.buf) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_rep_buf;
}
* Initialize the attached buffers
*
*/
- socket->s->req->buf->size = global.tune.bufsize;
- socket->s->rep->buf->size = global.tune.bufsize;
+ socket->s->req.buf->size = global.tune.bufsize;
+ socket->s->res.buf->size = global.tune.bufsize;
/*
*
/* This function reset the struct. It must be called
* before the configuration.
*/
- channel_init(socket->s->req);
- channel_init(socket->s->rep);
+ channel_init(&socket->s->req);
+ channel_init(&socket->s->res);
- socket->s->req->prod = &socket->s->si[0];
- socket->s->req->cons = &socket->s->si[1];
+ socket->s->req.prod = &socket->s->si[0];
+ socket->s->req.cons = &socket->s->si[1];
- socket->s->rep->prod = &socket->s->si[1];
- socket->s->rep->cons = &socket->s->si[0];
+ socket->s->res.prod = &socket->s->si[1];
+ socket->s->res.cons = &socket->s->si[0];
- socket->s->si[0].ib = socket->s->req;
- socket->s->si[0].ob = socket->s->rep;
+ socket->s->si[0].ib = &socket->s->req;
+ socket->s->si[0].ob = &socket->s->res;
- socket->s->si[1].ib = socket->s->rep;
- socket->s->si[1].ob = socket->s->req;
+ socket->s->si[1].ib = &socket->s->res;
+ socket->s->si[1].ob = &socket->s->req;
- socket->s->req->analysers = 0;
- socket->s->req->rto = socket_proxy.timeout.client;
- socket->s->req->wto = socket_proxy.timeout.server;
- socket->s->req->rex = TICK_ETERNITY;
- socket->s->req->wex = TICK_ETERNITY;
- socket->s->req->analyse_exp = TICK_ETERNITY;
+ socket->s->req.analysers = 0;
+ socket->s->req.rto = socket_proxy.timeout.client;
+ socket->s->req.wto = socket_proxy.timeout.server;
+ socket->s->req.rex = TICK_ETERNITY;
+ socket->s->req.wex = TICK_ETERNITY;
+ socket->s->req.analyse_exp = TICK_ETERNITY;
- socket->s->rep->analysers = 0;
- socket->s->rep->rto = socket_proxy.timeout.server;
- socket->s->rep->wto = socket_proxy.timeout.client;
- socket->s->rep->rex = TICK_ETERNITY;
- socket->s->rep->wex = TICK_ETERNITY;
- socket->s->rep->analyse_exp = TICK_ETERNITY;
+ socket->s->res.analysers = 0;
+ socket->s->res.rto = socket_proxy.timeout.server;
+ socket->s->res.wto = socket_proxy.timeout.client;
+ socket->s->res.rex = TICK_ETERNITY;
+ socket->s->res.wex = TICK_ETERNITY;
+ socket->s->res.analyse_exp = TICK_ETERNITY;
/*
*
*/
/* The data producer is already connected. It is the applet. */
- socket->s->req->flags = CF_READ_ATTACHED;
+ socket->s->req.flags = CF_READ_ATTACHED;
- channel_auto_connect(socket->s->req); /* don't wait to establish connection */
- channel_auto_close(socket->s->req); /* let the producer forward close requests */
+ channel_auto_connect(&socket->s->req); /* don't wait to establish connection */
+ channel_auto_close(&socket->s->req); /* let the producer forward close requests */
si_reset(&socket->s->si[0], socket->s->task);
si_set_state(&socket->s->si[0], SI_ST_EST); /* connection established (resource exists) */
return 1;
out_fail_conn1:
- pool_free2(pool2_buffer, socket->s->rep->buf);
+ pool_free2(pool2_buffer, socket->s->res.buf);
out_fail_rep_buf:
- pool_free2(pool2_channel, socket->s->rep);
-out_fail_rep:
- pool_free2(pool2_buffer, socket->s->req->buf);
+ pool_free2(pool2_buffer, socket->s->req.buf);
out_fail_req_buf:
- pool_free2(pool2_channel, socket->s->req);
-out_fail_req:
task_free(socket->s->task);
out_free_session:
pool_free2(pool2_session, socket->s);
* must set the flag WAKERESWR. This flag required the task
* wake up if any activity is detected on the response buffer.
*/
- if (chn->chn == chn->s->rep)
+ if (chn->chn == &chn->s->res)
HLUA_SET_WAKERESWR(hlua);
else
HLUA_SET_WAKEREQWR(hlua);
* must set the flag WAKERESWR. This flag required the task
* wake up if any activity is detected on the response buffer.
*/
- if (chn->chn == chn->s->rep)
+ if (chn->chn == &chn->s->res)
HLUA_SET_WAKERESWR(hlua);
else
HLUA_SET_WAKEREQWR(hlua);
/* Create the "req" field that contains the request channel object. */
lua_pushstring(L, "req");
- if (!hlua_channel_new(L, s, s->req))
+ if (!hlua_channel_new(L, s, &s->req))
return 0;
lua_settable(L, -3);
/* Create the "res" field that contains the response channel object. */
lua_pushstring(L, "res");
- if (!hlua_channel_new(L, s, s->rep))
+ if (!hlua_channel_new(L, s, &s->res))
return 0;
lua_settable(L, -3);
/* Build array of headers. */
old_idx = 0;
- cur_next = sess->req->buf->p + hdr_idx_first_pos(&sess->txn.hdr_idx);
+ cur_next = sess->req.buf->p + hdr_idx_first_pos(&sess->txn.hdr_idx);
while (1) {
cur_idx = sess->txn.hdr_idx.v[old_idx].next;
/* Set timeout in the required channel. */
if (s->hlua.wake_time != TICK_ETERNITY) {
if (analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE))
- s->req->analyse_exp = s->hlua.wake_time;
+ s->req.analyse_exp = s->hlua.wake_time;
else if (analyzer & (AN_RES_INSPECT|AN_RES_HTTP_PROCESS_BE))
- s->rep->analyse_exp = s->hlua.wake_time;
+ s->res.analyse_exp = s->hlua.wake_time;
}
/* Some actions can be wake up when a "write" event
* is detected on a response channel. This is useful
* only for actions targetted on the requests.
*/
if (HLUA_IS_WAKERESWR(&s->hlua)) {
- s->rep->flags |= CF_WAKE_WRITE;
+ s->res.flags |= CF_WAKE_WRITE;
if ((analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE)))
- s->rep->analysers |= analyzer;
+ s->res.analysers |= analyzer;
}
if (HLUA_IS_WAKEREQWR(&s->hlua))
- s->req->flags |= CF_WAKE_WRITE;
+ s->req.flags |= CF_WAKE_WRITE;
return 0;
/* finished with error. */
break;
case LOG_FMT_CLIENTIP: // %ci
- conn = objt_conn(s->req->prod->end);
+ conn = objt_conn(s->req.prod->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
break;
case LOG_FMT_CLIENTPORT: // %cp
- conn = objt_conn(s->req->prod->end);
+ conn = objt_conn(s->req.prod->end);
if (conn) {
if (conn->addr.from.ss_family == AF_UNIX) {
ret = ltoa_o(s->listener->luid, tmplog, dst + maxsize - tmplog);
break;
case LOG_FMT_FRONTENDIP: // %fi
- conn = objt_conn(s->req->prod->end);
+ conn = objt_conn(s->req.prod->end);
if (conn) {
conn_get_to_addr(conn);
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
break;
case LOG_FMT_FRONTENDPORT: // %fp
- conn = objt_conn(s->req->prod->end);
+ conn = objt_conn(s->req.prod->end);
if (conn) {
conn_get_to_addr(conn);
if (conn->addr.to.ss_family == AF_UNIX)
break;
case LOG_FMT_BACKENDIP: // %bi
- conn = objt_conn(s->req->cons->end);
+ conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
break;
case LOG_FMT_BACKENDPORT: // %bp
- conn = objt_conn(s->req->cons->end);
+ conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_port(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
break;
case LOG_FMT_SERVERIP: // %si
- conn = objt_conn(s->req->cons->end);
+ conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
else
break;
case LOG_FMT_SERVERPORT: // %sp
- conn = objt_conn(s->req->cons->end);
+ conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_port(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
else
case LOG_FMT_RETRIES: // %rq
if (s->flags & SN_REDISP)
LOGCHAR('+');
- ret = ltoa_o((s->req->cons->conn_retries>0) ?
- (be->conn_retries - s->req->cons->conn_retries) :
+ ret = ltoa_o((s->req.cons->conn_retries>0) ?
+ (be->conn_retries - s->req.cons->conn_retries) :
be->conn_retries, tmplog, dst + maxsize - tmplog);
if (ret == NULL)
goto out;
err = (s->flags & SN_REDISP) ||
((s->flags & SN_ERR_MASK) > SN_ERR_LOCAL) ||
(((s->flags & SN_ERR_MASK) == SN_ERR_NONE) &&
- (s->req->cons->conn_retries != s->be->conn_retries)) ||
+ (s->req.cons->conn_retries != s->be->conn_retries)) ||
((s->fe->mode == PR_MODE_HTTP) && s->txn.status >= 500);
if (!err && (s->fe->options2 & PR_O2_NOLOGNORM))
smp_fetch_len(struct proxy *px, struct session *s, void *l7, unsigned int opt,
const struct arg *args, struct sample *smp, const char *kw, void *private)
{
- struct channel *chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
+ struct channel *chn;
+
+ if (!s)
+ return 0;
- if (!s || !chn)
+ chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
+ if (!chn->buf)
return 0;
smp->type = SMP_T_UINT;
if (!s)
goto not_ssl_hello;
- chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
-
- if (!chn)
+ chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
+ if (!chn->buf)
goto not_ssl_hello;
bleft = chn->buf->i;
int version, bleft, msg_len;
const unsigned char *data;
- if (!s || !s->req)
+ if (!s || !s->req.buf)
return 0;
msg_len = 0;
- bleft = s->req->buf->i;
+ bleft = s->req.buf->i;
if (!bleft)
goto too_short;
- data = (const unsigned char *)s->req->buf->p;
+ data = (const unsigned char *)s->req.buf->p;
if ((*data >= 0x14 && *data <= 0x17) || (*data == 0xFF)) {
/* SSLv3 header format */
if (bleft < 5)
* all the part of the request which fits in a buffer is already
* there.
*/
- if (msg_len > channel_recv_limit(s->req) + s->req->buf->data - s->req->buf->p)
- msg_len = channel_recv_limit(s->req) + s->req->buf->data - s->req->buf->p;
+ if (msg_len > channel_recv_limit(&s->req) + s->req.buf->data - s->req.buf->p)
+ msg_len = channel_recv_limit(&s->req) + s->req.buf->data - s->req.buf->p;
if (bleft < msg_len)
goto too_short;
if (!s)
goto not_ssl_hello;
- chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
-
- if (!chn)
+ chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
+ if (!chn->buf)
goto not_ssl_hello;
bleft = chn->buf->i;
int bleft;
const unsigned char *data;
- if (!s || !s->req)
+ if (!s || !s->req.buf)
return 0;
smp->flags = SMP_F_CONST;
smp->type = SMP_T_STR;
- bleft = s->req->buf->i;
+ bleft = s->req.buf->i;
if (bleft <= 11)
goto too_short;
- data = (const unsigned char *)s->req->buf->p + 11;
+ data = (const unsigned char *)s->req.buf->p + 11;
bleft -= 11;
if (bleft <= 7)
if (!s)
return 0;
- chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
-
- if (!chn)
+ chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
+ if (!chn->buf)
return 0;
if (len_offset + len_size > chn->buf->i)
if (!s)
return 0;
- chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
-
- if (!chn)
+ chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
+ if (!chn->buf)
return 0;
if (buf_size > chn->buf->size || buf_offset + buf_size > chn->buf->size) {
#include <proto/proto_http.h>
#include <proto/proxy.h>
#include <proto/session.h>
+#include <proto/signal.h>
+#include <proto/stick_table.h>
#include <proto/stream_interface.h>
#include <proto/task.h>
-#include <proto/stick_table.h>
-#include <proto/signal.h>
/*******************************/
s->logs.prx_queue_size = 0;/* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
- s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
+ s->req.flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
- s->req->rto = *s->listener->timeout;
- s->rep->wto = *s->listener->timeout;
+ s->req.rto = *s->listener->timeout;
+ s->res.wto = *s->listener->timeout;
}
return 1;
}
* when the default backend is assigned.
*/
s->be = s->fe = p;
-
- s->req = s->rep = NULL; /* will be allocated later */
+ s->req.buf = s->res.buf = NULL;
si_reset(&s->si[0], t);
si_set_state(&s->si[0], SI_ST_EST);
txn->hdr_idx.v = NULL;
txn->hdr_idx.size = txn->hdr_idx.used = 0;
- if ((s->req = pool_alloc2(pool2_channel)) == NULL)
- goto out_fail_req; /* no memory */
-
- channel_init(s->req);
- s->req->prod = &s->si[0];
- s->req->cons = &s->si[1];
- s->si[0].ib = s->si[1].ob = s->req;
+ channel_init(&s->req);
+ s->req.prod = &s->si[0];
+ s->req.cons = &s->si[1];
+ s->si[0].ib = s->si[1].ob = &s->req;
- s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
+ s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
/* activate default analysers enabled for this listener */
- s->req->analysers = l->analysers;
+ s->req.analysers = l->analysers;
/* note: this should not happen anymore since there's always at least the switching rules */
- if (!s->req->analysers) {
- channel_auto_connect(s->req);/* don't wait to establish connection */
- channel_auto_close(s->req);/* let the producer forward close requests */
+ if (!s->req.analysers) {
+ channel_auto_connect(&s->req);/* don't wait to establish connection */
+ channel_auto_close(&s->req);/* let the producer forward close requests */
}
- s->req->rto = s->fe->timeout.client;
- s->req->wto = s->be->timeout.server;
-
- if ((s->rep = pool_alloc2(pool2_channel)) == NULL)
- goto out_fail_rep; /* no memory */
+ s->req.rto = s->fe->timeout.client;
+ s->req.wto = s->be->timeout.server;
- channel_init(s->rep);
- s->rep->prod = &s->si[1];
- s->rep->cons = &s->si[0];
- s->si[0].ob = s->si[1].ib = s->rep;
+ channel_init(&s->res);
+ s->res.prod = &s->si[1];
+ s->res.cons = &s->si[0];
+ s->si[0].ob = s->si[1].ib = &s->res;
- s->rep->rto = s->be->timeout.server;
- s->rep->wto = s->fe->timeout.client;
+ s->res.rto = s->be->timeout.server;
+ s->res.wto = s->fe->timeout.client;
- s->req->rex = TICK_ETERNITY;
- s->req->wex = TICK_ETERNITY;
- s->req->analyse_exp = TICK_ETERNITY;
- s->rep->rex = TICK_ETERNITY;
- s->rep->wex = TICK_ETERNITY;
- s->rep->analyse_exp = TICK_ETERNITY;
+ s->req.rex = TICK_ETERNITY;
+ s->req.wex = TICK_ETERNITY;
+ s->req.analyse_exp = TICK_ETERNITY;
+ s->res.rex = TICK_ETERNITY;
+ s->res.wex = TICK_ETERNITY;
+ s->res.analyse_exp = TICK_ETERNITY;
t->expire = TICK_ETERNITY;
- s->rep->flags |= CF_READ_DONTWAIT;
+ s->res.flags |= CF_READ_DONTWAIT;
/* it is important not to call the wakeup function directly but to
* pass through task_wakeup(), because this one knows how to apply
return s;
/* Error unrolling */
- out_fail_rep:
- pool_free2(pool2_channel, s->req);
- out_fail_req:
- conn_free(conn);
out_fail_conn1:
task_free(t);
out_free_session:
* to temporarily rewind the buffer.
*/
txn = &s->txn;
- b_rew(s->req->buf, rewind = http_hdr_rewind(&txn->req));
+ b_rew(s->req.buf, rewind = http_hdr_rewind(&txn->req));
path = http_get_path(txn);
- len = buffer_count(s->req->buf, path, b_ptr(s->req->buf, txn->req.sl.rq.u + txn->req.sl.rq.u_l));
+ len = buffer_count(s->req.buf, path, b_ptr(s->req.buf, txn->req.sl.rq.u + txn->req.sl.rq.u_l));
- b_adv(s->req->buf, rewind);
+ b_adv(s->req.buf, rewind);
if (!path)
return;
len = strlen(h);
}
- if (!http_find_header2(h, len, s->req->buf->p, &txn->hdr_idx, &ctx))
+ if (!http_find_header2(h, len, s->req.buf->p, &txn->hdr_idx, &ctx))
return 0;
h = ctx.line + ctx.val;
/* This stream analyser waits for a complete HTTP request. It returns 1 if the
* processing can continue on next analysers, or zero if it either needs more
* data or wants to immediately abort the request (eg: timeout, error, ...). It
- * is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req->analysers
+ * is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req.analysers
* when it has nothing left to do, and may remove any analyser when it wants to
* abort.
*/
* keep-alive requests.
*/
if ((txn->flags & TX_NOT_FIRST) &&
- unlikely(!channel_is_rewritable(s->rep) ||
- bi_end(s->rep->buf) < b_ptr(s->rep->buf, txn->rsp.next) ||
- bi_end(s->rep->buf) > s->rep->buf->data + s->rep->buf->size - global.tune.maxrewrite)) {
- if (s->rep->buf->o) {
- if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
+ unlikely(!channel_is_rewritable(&s->res) ||
+ bi_end(s->res.buf) < b_ptr(s->res.buf, txn->rsp.next) ||
+ bi_end(s->res.buf) > s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)) {
+ if (s->res.buf->o) {
+ if (s->res.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* don't let a connection request be initiated */
channel_dont_connect(req);
- s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
- s->rep->flags |= CF_WAKE_WRITE;
- s->rep->analysers |= an_bit; /* wake us up once it changes */
+ s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
+ s->res.flags |= CF_WAKE_WRITE;
+ s->res.analysers |= an_bit; /* wake us up once it changes */
return 0;
}
}
channel_dont_connect(req);
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
- s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
+ s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
#ifdef TCP_QUICKACK
- if (s->listener->options & LI_O_NOQUICKACK && req->buf->i && objt_conn(s->req->prod->end) && conn_ctrl_ready(__objt_conn(s->req->prod->end))) {
+ if (s->listener->options & LI_O_NOQUICKACK && req->buf->i && objt_conn(s->req.prod->end) && conn_ctrl_ready(__objt_conn(s->req.prod->end))) {
/* We need more data, we have to re-enable quick-ack in case we
* previously disabled it, otherwise we might cause the client
* to delay next data.
*/
- setsockopt(__objt_conn(s->req->prod->end)->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
+ setsockopt(__objt_conn(s->req.prod->end)->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
}
#endif
req->analysers = 0;
s->logs.logwait = 0;
s->logs.level = 0;
- s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
+ s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
stream_int_retnclose(req->prod, NULL);
return 0;
}
int http_handle_stats(struct session *s, struct channel *req)
{
struct stats_admin_rule *stats_admin_rule;
- struct stream_interface *si = s->rep->prod;
+ struct stream_interface *si = s->res.prod;
struct http_txn *txn = &s->txn;
struct http_msg *msg = &txn->req;
struct uri_auth *uri_auth = s->be->uri_auth;
break;
case HTTP_REQ_ACT_SET_TOS:
- if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
+ if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
inet_set_tos(cli_conn->t.sock.fd, cli_conn->addr.from, rule->arg.tos);
break;
case HTTP_REQ_ACT_SET_MARK:
#ifdef SO_MARK
- if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
+ if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
setsockopt(cli_conn->t.sock.fd, SOL_SOCKET, SO_MARK, &rule->arg.mark, sizeof(rule->arg.mark));
#endif
break;
break;
case HTTP_RES_ACT_SET_TOS:
- if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
+ if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
inet_set_tos(cli_conn->t.sock.fd, cli_conn->addr.from, rule->arg.tos);
break;
case HTTP_RES_ACT_SET_MARK:
#ifdef SO_MARK
- if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
+ if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
setsockopt(cli_conn->t.sock.fd, SOL_SOCKET, SO_MARK, &rule->arg.mark, sizeof(rule->arg.mark));
#endif
break;
msg->next -= msg->sov;
msg->sov = 0;
txn->req.chn->analysers = AN_REQ_HTTP_XFER_BODY;
- s->rep->analysers = AN_RES_HTTP_XFER_BODY;
+ s->res.analysers = AN_RES_HTTP_XFER_BODY;
txn->req.msg_state = HTTP_MSG_CLOSED;
txn->rsp.msg_state = HTTP_MSG_DONE;
} else {
* by a possible reqrep, while they are processed *after* so that a
* reqdeny can still block them. This clearly needs to change in 1.6!
*/
- if (stats_check_uri(s->rep->prod, txn, px)) {
+ if (stats_check_uri(s->res.prod, txn, px)) {
s->target = &http_stats_applet.obj_type;
- if (unlikely(!stream_int_register_handler(s->rep->prod, objt_applet(s->target)))) {
+ if (unlikely(!stream_int_register_handler(s->res.prod, objt_applet(s->target)))) {
txn->status = 500;
s->logs.tv_request = now;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500));
* If unset, then set it to zero because we really want it to
* eventually expire. We build the tarpit as an analyser.
*/
- channel_erase(s->req);
+ channel_erase(&s->req);
/* wipe the request out so that we can drop the connection early
* if the client closes first.
/* This function performs all the processing enabled for the current request.
* It returns 1 if the processing can continue on next analysers, or zero if it
* needs more data, encounters an error, or wants to immediately abort the
- * request. It relies on buffers flags, and updates s->req->analysers.
+ * request. It relies on buffers flags, and updates s->req.analysers.
*/
int http_process_request(struct session *s, struct channel *req, int an_bit)
{
/* Expect is allowed in 1.1, look for it */
if (http_find_header2("Expect", 6, req->buf->p, &txn->hdr_idx, &ctx) &&
unlikely(ctx.vlen == 12 && strncasecmp(ctx.line+ctx.val, "100-continue", 12) == 0)) {
- bo_inject(s->rep, http_100_chunk.str, http_100_chunk.len);
+ bo_inject(&s->res, http_100_chunk.str, http_100_chunk.len);
}
}
msg->msg_state = HTTP_MSG_100_SENT;
* to the server.
*/
if (((s->txn.flags & TX_CON_WANT_MSK) != TX_CON_WANT_KAL) ||
- !si_conn_ready(s->req->cons)) {
- s->req->cons->flags |= SI_FL_NOLINGER | SI_FL_NOHALF;
- si_shutr(s->req->cons);
- si_shutw(s->req->cons);
+ !si_conn_ready(s->req.cons)) {
+ s->req.cons->flags |= SI_FL_NOLINGER | SI_FL_NOHALF;
+ si_shutr(s->req.cons);
+ si_shutw(s->req.cons);
}
if (s->flags & SN_BE_ASSIGNED) {
}
/* don't count other requests' data */
- s->logs.bytes_in -= s->req->buf->i;
- s->logs.bytes_out -= s->rep->buf->i;
+ s->logs.bytes_in -= s->req.buf->i;
+ s->logs.bytes_out -= s->res.buf->i;
/* let's do a final log if we need it */
if (!LIST_ISEMPTY(&s->fe->logformat) && s->logs.logwait &&
!(s->flags & SN_MONITOR) &&
- (!(s->fe->options & PR_O_NULLNOLOG) || s->req->total)) {
+ (!(s->fe->options & PR_O_NULLNOLOG) || s->req.total)) {
s->do_log(s);
}
s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
- s->logs.bytes_in = s->req->total = s->req->buf->i;
- s->logs.bytes_out = s->rep->total = s->rep->buf->i;
+ s->logs.bytes_in = s->req.total = s->req.buf->i;
+ s->logs.bytes_out = s->res.total = s->res.buf->i;
if (s->pend_pos)
pendconn_free(s->pend_pos);
* connection.
*/
if (((s->txn.flags & TX_CON_WANT_MSK) != TX_CON_WANT_KAL) ||
- !si_conn_ready(s->req->cons)) {
- si_release_endpoint(s->req->cons);
+ !si_conn_ready(s->req.cons)) {
+ si_release_endpoint(s->req.cons);
}
- s->req->cons->state = s->req->cons->prev_state = SI_ST_INI;
- s->req->cons->err_type = SI_ET_NONE;
- s->req->cons->conn_retries = 0; /* used for logging too */
- s->req->cons->exp = TICK_ETERNITY;
- s->req->cons->flags &= SI_FL_DONT_WAKE; /* we're in the context of process_session */
- s->req->flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT|CF_WROTE_DATA);
- s->rep->flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT|CF_WROTE_DATA);
+ s->req.cons->state = s->req.cons->prev_state = SI_ST_INI;
+ s->req.cons->err_type = SI_ET_NONE;
+ s->req.cons->conn_retries = 0; /* used for logging too */
+ s->req.cons->exp = TICK_ETERNITY;
+ s->req.cons->flags &= SI_FL_DONT_WAKE; /* we're in the context of process_session */
+ s->req.flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT|CF_WROTE_DATA);
+ s->res.flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT|CF_WROTE_DATA);
s->flags &= ~(SN_DIRECT|SN_ASSIGNED|SN_ADDR_SET|SN_BE_ASSIGNED|SN_FORCE_PRST|SN_IGNORE_PRST);
s->flags &= ~(SN_CURR_SESS|SN_REDIRECTABLE|SN_SRV_REUSED);
s->flags &= ~(SN_ERR_MASK|SN_FINST_MASK|SN_REDISP);
}
if (s->fe->options2 & PR_O2_INDEPSTR)
- s->req->cons->flags |= SI_FL_INDEP_STR;
+ s->req.cons->flags |= SI_FL_INDEP_STR;
if (s->fe->options2 & PR_O2_NODELAY) {
- s->req->flags |= CF_NEVER_WAIT;
- s->rep->flags |= CF_NEVER_WAIT;
+ s->req.flags |= CF_NEVER_WAIT;
+ s->res.flags |= CF_NEVER_WAIT;
}
/* if the request buffer is not empty, it means we're
* because the request will wait for it to flush a little
* bit before proceeding.
*/
- if (s->req->buf->i) {
- if (s->rep->buf->o &&
- !buffer_full(s->rep->buf, global.tune.maxrewrite) &&
- bi_end(s->rep->buf) <= s->rep->buf->data + s->rep->buf->size - global.tune.maxrewrite)
- s->rep->flags |= CF_EXPECT_MORE;
+ if (s->req.buf->i) {
+ if (s->res.buf->o &&
+ !buffer_full(s->res.buf, global.tune.maxrewrite) &&
+ bi_end(s->res.buf) <= s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)
+ s->res.flags |= CF_EXPECT_MORE;
}
/* we're removing the analysers, we MUST re-enable events detection */
- channel_auto_read(s->req);
- channel_auto_close(s->req);
- channel_auto_read(s->rep);
- channel_auto_close(s->rep);
+ channel_auto_read(&s->req);
+ channel_auto_close(&s->req);
+ channel_auto_read(&s->res);
+ channel_auto_close(&s->res);
/* we're in keep-alive with an idle connection, monitor it */
- si_idle_conn(s->req->cons);
+ si_idle_conn(s->req.cons);
- s->req->analysers = s->listener->analysers;
- s->rep->analysers = 0;
+ s->req.analysers = s->listener->analysers;
+ s->res.analysers = 0;
}
*/
int http_sync_req_state(struct session *s)
{
- struct channel *chn = s->req;
+ struct channel *chn = &s->req;
struct http_txn *txn = &s->txn;
unsigned int old_flags = chn->flags;
unsigned int old_state = txn->req.msg_state;
*/
int http_sync_res_state(struct session *s)
{
- struct channel *chn = s->rep;
+ struct channel *chn = &s->res;
struct http_txn *txn = &s->txn;
unsigned int old_flags = chn->flags;
unsigned int old_state = txn->rsp.msg_state;
txn->rsp.msg_state == HTTP_MSG_TUNNEL ||
(txn->req.msg_state == HTTP_MSG_CLOSED &&
txn->rsp.msg_state == HTTP_MSG_CLOSED)) {
- s->req->analysers = 0;
- channel_auto_close(s->req);
- channel_auto_read(s->req);
- s->rep->analysers = 0;
- channel_auto_close(s->rep);
- channel_auto_read(s->rep);
+ s->req.analysers = 0;
+ channel_auto_close(&s->req);
+ channel_auto_read(&s->req);
+ s->res.analysers = 0;
+ channel_auto_close(&s->res);
+ channel_auto_read(&s->res);
}
else if ((txn->req.msg_state >= HTTP_MSG_DONE &&
- (txn->rsp.msg_state == HTTP_MSG_CLOSED || (s->rep->flags & CF_SHUTW))) ||
+ (txn->rsp.msg_state == HTTP_MSG_CLOSED || (s->res.flags & CF_SHUTW))) ||
txn->rsp.msg_state == HTTP_MSG_ERROR ||
txn->req.msg_state == HTTP_MSG_ERROR) {
- s->rep->analysers = 0;
- channel_auto_close(s->rep);
- channel_auto_read(s->rep);
- s->req->analysers = 0;
- channel_abort(s->req);
- channel_auto_close(s->req);
- channel_auto_read(s->req);
- channel_truncate(s->req);
+ s->res.analysers = 0;
+ channel_auto_close(&s->res);
+ channel_auto_read(&s->res);
+ s->req.analysers = 0;
+ channel_abort(&s->req);
+ channel_auto_close(&s->req);
+ channel_auto_read(&s->req);
+ channel_truncate(&s->req);
}
else if ((txn->req.msg_state == HTTP_MSG_DONE ||
txn->req.msg_state == HTTP_MSG_CLOSED) &&
* whichs need to parse/process the request after we've enabled forwarding.
*/
if (unlikely(msg->flags & HTTP_MSGF_WAIT_CONN)) {
- if (!(s->rep->flags & CF_READ_ATTACHED)) {
+ if (!(s->res.flags & CF_READ_ATTACHED)) {
channel_auto_connect(req);
req->flags |= CF_WAKE_CONNECT;
goto missing_data;
* such as last chunk of data or trailers.
*/
b_adv(req->buf, msg->next);
- if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
+ if (unlikely(!(s->req.flags & CF_WROTE_DATA)))
msg->sov -= msg->next;
msg->next = 0;
missing_data:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
- if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
+ if (unlikely(!(s->req.flags & CF_WROTE_DATA)))
msg->sov -= msg->next + MIN(msg->chunk_len, req->buf->i);
msg->next = 0;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
}
req->analysers = 0;
- s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
+ s->res.analysers = 0; /* we're in data phase, we want to abort both directions */
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_502));
}
req->analysers = 0;
- s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
+ s->res.analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.srv_aborts++;
s->be->be_counters.srv_aborts++;
/* This stream analyser waits for a complete HTTP response. It returns 1 if the
* processing can continue on next analysers, or zero if it either needs more
* data or wants to immediately abort the response (eg: timeout, error, ...). It
- * is tied to AN_RES_WAIT_HTTP and may may remove itself from s->rep->analysers
+ * is tied to AN_RES_WAIT_HTTP and may may remove itself from s->res.analysers
* when it has nothing left to do, and may remove any analyser when it wants to
* abort.
*/
}
/* client abort with an abortonclose */
- else if ((rep->flags & CF_SHUTR) && ((s->req->flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
+ else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
if (objt_server(s->target))
*/
txn->status = 0;
rep->analysers = 0;
- s->req->analysers = 0;
+ s->req.analysers = 0;
channel_auto_close(rep);
s->logs.logwait = 0;
s->logs.level = 0;
- s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
+ s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
channel_truncate(rep);
stream_int_retnclose(rep->cons, NULL);
return 0;
/* This function performs all the processing enabled for the current response.
* It normally returns 1 unless it wants to break. It relies on buffers flags,
- * and updates s->rep->analysers. It might make sense to explode it into several
+ * and updates s->res.analysers. It might make sense to explode it into several
* other functions. It works like process_request (see indications above).
*/
int http_process_res_common(struct session *s, struct channel *rep, int an_bit, struct proxy *px)
if ((res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((res->flags & CF_SHUTW) && (res->to_forward || res->buf->o)) ||
- !s->req->analysers) {
+ !s->req.analysers) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
*/
* server abort.
*/
if (res->flags & CF_SHUTR) {
- if ((s->req->flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
+ if ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
goto aborted_xfer;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
}
/* we need to obey the req analyser, so if it leaves, we must too */
- if (!s->req->analysers)
+ if (!s->req.analysers)
goto return_bad_res;
/* When TE: chunked is used, we need to get there again to parse remaining
/* don't send any error message as we're in the body */
stream_int_retnclose(res->cons, NULL);
res->analysers = 0;
- s->req->analysers = 0; /* we're in data phase, we want to abort both directions */
+ s->req.analysers = 0; /* we're in data phase, we want to abort both directions */
if (objt_server(s->target))
health_adjust(objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
/* don't send any error message as we're in the body */
stream_int_retnclose(res->cons, NULL);
res->analysers = 0;
- s->req->analysers = 0; /* we're in data phase, we want to abort both directions */
+ s->req.analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
es->sid = s->uniq_id;
es->srv = objt_server(s->target);
es->oe = other_end;
- if (objt_conn(s->req->prod->end))
- es->src = __objt_conn(s->req->prod->end)->addr.from;
+ if (objt_conn(s->req.prod->end))
+ es->src = __objt_conn(s->req.prod->end)->addr.from;
else
memset(&es->src, 0, sizeof(es->src));
int max;
chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
dir,
- objt_conn(s->req->prod->end) ? (unsigned short)objt_conn(s->req->prod->end)->t.sock.fd : -1,
- objt_conn(s->req->cons->end) ? (unsigned short)objt_conn(s->req->cons->end)->t.sock.fd : -1);
+ objt_conn(s->req.prod->end) ? (unsigned short)objt_conn(s->req.prod->end)->t.sock.fd : -1,
+ objt_conn(s->req.cons->end) ? (unsigned short)objt_conn(s->req.cons->end)->t.sock.fd : -1);
for (max = 0; start + max < end; max++)
if (start[max] == '\r' || start[max] == '\n')
txn->rsp.body_len = 0LL;
txn->req.msg_state = HTTP_MSG_RQBEFORE; /* at the very beginning of the request */
txn->rsp.msg_state = HTTP_MSG_RPBEFORE; /* at the very beginning of the response */
- txn->req.chn = s->req;
- txn->rsp.chn = s->rep;
+ txn->req.chn = &s->req;
+ txn->rsp.chn = &s->res;
txn->auth.method = HTTP_AUTH_UNKNOWN;
s->pend_pos = NULL;
- s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
+ s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* We must trim any excess data from the response buffer, because we
* may have blocked an invalid response from a server that we don't
* a HEAD with some data, or sending more than the advertised
* content-length.
*/
- if (unlikely(s->rep->buf->i))
- s->rep->buf->i = 0;
+ if (unlikely(s->res.buf->i))
+ s->res.buf->i = 0;
- s->req->rto = s->fe->timeout.client;
- s->req->wto = TICK_ETERNITY;
+ s->req.rto = s->fe->timeout.client;
+ s->req.wto = TICK_ETERNITY;
- s->rep->rto = TICK_ETERNITY;
- s->rep->wto = s->fe->timeout.client;
+ s->res.rto = TICK_ETERNITY;
+ s->res.wto = s->fe->timeout.client;
- s->req->rex = TICK_ETERNITY;
- s->req->wex = TICK_ETERNITY;
- s->req->analyse_exp = TICK_ETERNITY;
- s->rep->rex = TICK_ETERNITY;
- s->rep->wex = TICK_ETERNITY;
- s->rep->analyse_exp = TICK_ETERNITY;
+ s->req.rex = TICK_ETERNITY;
+ s->req.wex = TICK_ETERNITY;
+ s->req.analyse_exp = TICK_ETERNITY;
+ s->res.rex = TICK_ETERNITY;
+ s->res.wex = TICK_ETERNITY;
+ s->res.analyse_exp = TICK_ETERNITY;
}
void free_http_res_rules(struct list *r)
smp->type = SMP_T_BOOL;
if ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ) {
- if (unlikely(!s->req))
- return 0;
-
/* If the buffer does not leave enough free space at the end,
* we must first realign it.
*/
- if (s->req->buf->p > s->req->buf->data &&
- s->req->buf->i + s->req->buf->p > s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)
- buffer_slow_realign(s->req->buf);
+ if (s->req.buf->p > s->req.buf->data &&
+ s->req.buf->i + s->req.buf->p > s->req.buf->data + s->req.buf->size - global.tune.maxrewrite)
+ buffer_slow_realign(s->req.buf);
if (unlikely(txn->req.msg_state < HTTP_MSG_BODY)) {
if (msg->msg_state == HTTP_MSG_ERROR)
return 0;
/* Try to decode HTTP request */
- if (likely(msg->next < s->req->buf->i))
+ if (likely(msg->next < s->req.buf->i))
http_msg_analyzer(msg, &txn->hdr_idx);
/* Still no valid request ? */
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
if ((msg->msg_state == HTTP_MSG_ERROR) ||
- buffer_full(s->req->buf, global.tune.maxrewrite)) {
+ buffer_full(s->req.buf, global.tune.maxrewrite)) {
return 0;
}
/* wait for final state */
* cannot happen, but if the parsers are to change in the future,
* we want this check to be maintained.
*/
- if (unlikely(s->req->buf->i + s->req->buf->p >
- s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)) {
+ if (unlikely(s->req.buf->i + s->req.buf->p >
+ s->req.buf->data + s->req.buf->size - global.tune.maxrewrite)) {
msg->msg_state = HTTP_MSG_ERROR;
smp->data.uint = 1;
return 1;
switch (*(int *)&rule->arg.act.p[2]) {
case 0: // method
- cur_ptr = s->req->buf->p;
+ cur_ptr = s->req.buf->p;
cur_end = cur_ptr + txn->req.sl.rq.m_l;
/* adjust req line offsets and lengths */
case 1: // path
cur_ptr = http_get_path(txn);
if (!cur_ptr)
- cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
+ cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr;
- while (cur_end < s->req->buf->p + txn->req.sl.rq.u + txn->req.sl.rq.u_l && *cur_end != '?')
+ while (cur_end < s->req.buf->p + txn->req.sl.rq.u + txn->req.sl.rq.u_l && *cur_end != '?')
cur_end++;
/* adjust req line offsets and lengths */
break;
case 2: // query
- cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
+ cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr + txn->req.sl.rq.u_l;
while (cur_ptr < cur_end && *cur_ptr != '?')
cur_ptr++;
break;
case 3: // uri
- cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
+ cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr + txn->req.sl.rq.u_l;
/* adjust req line offsets and lengths */
}
/* commit changes and adjust end of message */
- delta = buffer_replace2(s->req->buf, cur_ptr, cur_end, trash.str + offset, trash.len - offset);
+ delta = buffer_replace2(s->req.buf, cur_ptr, cur_end, trash.str + offset, trash.len - offset);
http_msg_move_end(&txn->req, delta);
return 0;
}
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
channel_abort(req);
- channel_abort(s->rep);
+ channel_abort(&s->res);
req->analysers = 0;
s->be->be_counters.denied_req++;
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
channel_abort(rep);
- channel_abort(s->req);
+ channel_abort(&s->req);
rep->analysers = 0;
s->be->be_counters.denied_resp++;
s->txn.req.flags |= HTTP_MSGF_WAIT_CONN;
if (be->options2 & PR_O2_NODELAY) {
- s->req->flags |= CF_NEVER_WAIT;
- s->rep->flags |= CF_NEVER_WAIT;
+ s->req.flags |= CF_NEVER_WAIT;
+ s->res.flags |= CF_NEVER_WAIT;
}
/* We want to enable the backend-specific analysers except those which
* be more reliable to store the list of analysers that have been run,
* but what we do here is OK for now.
*/
- s->req->analysers |= be->be_req_ana & ~(s->listener->analysers);
+ s->req.analysers |= be->be_req_ana & ~(s->listener->analysers);
return 1;
}
* when the default backend is assigned.
*/
s->be = s->fe;
- s->req = s->rep = NULL; /* will be allocated later */
s->comp_algo = NULL;
+ s->req.buf = s->res.buf = NULL;
/* Let's count a session now */
proxy_inc_fe_sess_ctr(l, p);
/* init store persistence */
s->store_count = 0;
- if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL))
- goto out_free_task; /* no memory */
-
- channel_init(s->req);
- s->req->prod = &s->si[0];
- s->req->cons = &s->si[1];
- s->si[0].ib = s->si[1].ob = s->req;
- s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
+ channel_init(&s->req);
+ s->req.prod = &s->si[0];
+ s->req.cons = &s->si[1];
+ s->si[0].ib = s->si[1].ob = &s->req;
+ s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
/* activate default analysers enabled for this listener */
- s->req->analysers = l->analysers;
-
- s->req->wto = TICK_ETERNITY;
- s->req->rto = TICK_ETERNITY;
- s->req->rex = TICK_ETERNITY;
- s->req->wex = TICK_ETERNITY;
- s->req->analyse_exp = TICK_ETERNITY;
+ s->req.analysers = l->analysers;
- if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
- goto out_free_req; /* no memory */
+ s->req.wto = TICK_ETERNITY;
+ s->req.rto = TICK_ETERNITY;
+ s->req.rex = TICK_ETERNITY;
+ s->req.wex = TICK_ETERNITY;
+ s->req.analyse_exp = TICK_ETERNITY;
- channel_init(s->rep);
- s->rep->prod = &s->si[1];
- s->rep->cons = &s->si[0];
- s->si[0].ob = s->si[1].ib = s->rep;
- s->rep->analysers = 0;
+ channel_init(&s->res);
+ s->res.prod = &s->si[1];
+ s->res.cons = &s->si[0];
+ s->si[0].ob = s->si[1].ib = &s->res;
+ s->res.analysers = 0;
if (s->fe->options2 & PR_O2_NODELAY) {
- s->req->flags |= CF_NEVER_WAIT;
- s->rep->flags |= CF_NEVER_WAIT;
+ s->req.flags |= CF_NEVER_WAIT;
+ s->res.flags |= CF_NEVER_WAIT;
}
- s->rep->rto = TICK_ETERNITY;
- s->rep->wto = TICK_ETERNITY;
- s->rep->rex = TICK_ETERNITY;
- s->rep->wex = TICK_ETERNITY;
- s->rep->analyse_exp = TICK_ETERNITY;
+ s->res.rto = TICK_ETERNITY;
+ s->res.wto = TICK_ETERNITY;
+ s->res.rex = TICK_ETERNITY;
+ s->res.wex = TICK_ETERNITY;
+ s->res.analyse_exp = TICK_ETERNITY;
txn = &s->txn;
/* Those variables will be checked and freed if non-NULL in
txn->req.flags = 0;
txn->rsp.flags = 0;
/* the HTTP messages need to know what buffer they're associated with */
- txn->req.chn = s->req;
- txn->rsp.chn = s->rep;
+ txn->req.chn = &s->req;
+ txn->rsp.chn = &s->res;
HLUA_INIT(&s->hlua);
* finished (=0, eg: monitoring), in both situations,
* we can release everything and close.
*/
- goto out_free_rep;
+ goto out_free_strm;
}
/* if logs require transport layer information, note it on the connection */
return 1;
/* Error unrolling */
- out_free_rep:
- pool_free2(pool2_channel, s->rep);
- out_free_req:
- pool_free2(pool2_channel, s->req);
- out_free_task:
+ out_free_strm:
/* and restore the connection pointer in case we destroyed it,
* because kill_mini_session() will need it.
*/
sess_change_server(s, NULL);
}
- if (s->req->pipe)
- put_pipe(s->req->pipe);
+ if (s->req.pipe)
+ put_pipe(s->req.pipe);
- if (s->rep->pipe)
- put_pipe(s->rep->pipe);
+ if (s->res.pipe)
+ put_pipe(s->res.pipe);
/* We may still be present in the buffer wait queue */
if (!LIST_ISEMPTY(&s->buffer_wait)) {
LIST_INIT(&s->buffer_wait);
}
- b_drop(&s->req->buf);
- b_drop(&s->rep->buf);
+ b_drop(&s->req.buf);
+ b_drop(&s->res.buf);
if (!LIST_ISEMPTY(&buffer_wq))
session_offer_buffers();
hlua_ctx_destroy(&s->hlua);
-
- pool_free2(pool2_channel, s->req);
- pool_free2(pool2_channel, s->rep);
-
http_end_txn(s);
/* ensure the client-side transport layer is destroyed */
struct buffer *b;
int margin = 0;
- if (buf == &s->req->buf)
+ if (buf == &s->req.buf)
margin = global.tune.reserved_bufs;
b = b_alloc_margin(buf, margin);
struct buffer **buf;
if (objt_appctx(s->si[0].end)) {
- buf = &s->req->buf;
+ buf = &s->req.buf;
margin = global.tune.reserved_bufs;
}
else {
- buf = &s->rep->buf;
+ buf = &s->res.buf;
margin = 0;
}
*/
void session_release_buffers(struct session *s)
{
- if (s->req->buf->size && buffer_empty(s->req->buf))
- b_free(&s->req->buf);
+ if (s->req.buf->size && buffer_empty(s->req.buf))
+ b_free(&s->req.buf);
- if (s->rep->buf->size && buffer_empty(s->rep->buf))
- b_free(&s->rep->buf);
+ if (s->res.buf->size && buffer_empty(s->res.buf))
+ b_free(&s->res.buf);
/* if we're certain to have at least 1 buffer available, and there is
* someone waiting, we can wake up a waiter and offer them.
void *ptr;
int i;
- if (s->req) {
- bytes = s->req->total - s->logs.bytes_in;
- s->logs.bytes_in = s->req->total;
- if (bytes) {
- s->fe->fe_counters.bytes_in += bytes;
-
- s->be->be_counters.bytes_in += bytes;
+ bytes = s->req.total - s->logs.bytes_in;
+ s->logs.bytes_in = s->req.total;
+ if (bytes) {
+ s->fe->fe_counters.bytes_in += bytes;
- if (objt_server(s->target))
- objt_server(s->target)->counters.bytes_in += bytes;
+ s->be->be_counters.bytes_in += bytes;
- if (s->listener && s->listener->counters)
- s->listener->counters->bytes_in += bytes;
+ if (objt_server(s->target))
+ objt_server(s->target)->counters.bytes_in += bytes;
- for (i = 0; i < MAX_SESS_STKCTR; i++) {
- if (!stkctr_entry(&s->stkctr[i]))
- continue;
+ if (s->listener && s->listener->counters)
+ s->listener->counters->bytes_in += bytes;
- ptr = stktable_data_ptr(s->stkctr[i].table,
- stkctr_entry(&s->stkctr[i]),
- STKTABLE_DT_BYTES_IN_CNT);
- if (ptr)
- stktable_data_cast(ptr, bytes_in_cnt) += bytes;
+ for (i = 0; i < MAX_SESS_STKCTR; i++) {
+ if (!stkctr_entry(&s->stkctr[i]))
+ continue;
- ptr = stktable_data_ptr(s->stkctr[i].table,
- stkctr_entry(&s->stkctr[i]),
- STKTABLE_DT_BYTES_IN_RATE);
- if (ptr)
- update_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
- s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
- }
+ ptr = stktable_data_ptr(s->stkctr[i].table,
+ stkctr_entry(&s->stkctr[i]),
+ STKTABLE_DT_BYTES_IN_CNT);
+ if (ptr)
+ stktable_data_cast(ptr, bytes_in_cnt) += bytes;
+
+ ptr = stktable_data_ptr(s->stkctr[i].table,
+ stkctr_entry(&s->stkctr[i]),
+ STKTABLE_DT_BYTES_IN_RATE);
+ if (ptr)
+ update_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
+ s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
}
}
- if (s->rep) {
- bytes = s->rep->total - s->logs.bytes_out;
- s->logs.bytes_out = s->rep->total;
- if (bytes) {
- s->fe->fe_counters.bytes_out += bytes;
-
- s->be->be_counters.bytes_out += bytes;
+ bytes = s->res.total - s->logs.bytes_out;
+ s->logs.bytes_out = s->res.total;
+ if (bytes) {
+ s->fe->fe_counters.bytes_out += bytes;
- if (objt_server(s->target))
- objt_server(s->target)->counters.bytes_out += bytes;
+ s->be->be_counters.bytes_out += bytes;
- if (s->listener && s->listener->counters)
- s->listener->counters->bytes_out += bytes;
+ if (objt_server(s->target))
+ objt_server(s->target)->counters.bytes_out += bytes;
- for (i = 0; i < MAX_SESS_STKCTR; i++) {
- if (!stkctr_entry(&s->stkctr[i]))
- continue;
+ if (s->listener && s->listener->counters)
+ s->listener->counters->bytes_out += bytes;
- ptr = stktable_data_ptr(s->stkctr[i].table,
- stkctr_entry(&s->stkctr[i]),
- STKTABLE_DT_BYTES_OUT_CNT);
- if (ptr)
- stktable_data_cast(ptr, bytes_out_cnt) += bytes;
+ for (i = 0; i < MAX_SESS_STKCTR; i++) {
+ if (!stkctr_entry(&s->stkctr[i]))
+ continue;
- ptr = stktable_data_ptr(s->stkctr[i].table,
- stkctr_entry(&s->stkctr[i]),
- STKTABLE_DT_BYTES_OUT_RATE);
- if (ptr)
- update_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
- s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
- }
+ ptr = stktable_data_ptr(s->stkctr[i].table,
+ stkctr_entry(&s->stkctr[i]),
+ STKTABLE_DT_BYTES_OUT_CNT);
+ if (ptr)
+ stktable_data_cast(ptr, bytes_out_cnt) += bytes;
+
+ ptr = stktable_data_ptr(s->stkctr[i].table,
+ stkctr_entry(&s->stkctr[i]),
+ STKTABLE_DT_BYTES_OUT_RATE);
+ if (ptr)
+ update_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
+ s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
}
}
}
now_ms, __FUNCTION__,
s,
s->req, s->rep,
- s->req->rex, s->rep->wex,
- s->req->flags, s->rep->flags,
- s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
+ s->req.rex, s->res.wex,
+ s->req.flags, s->res.flags,
+ s->req.buf->i, s->req.buf->o, s->res.buf->i, s->res.buf->o, s->res.cons->state, s->req.cons->state);
if (si->state == SI_ST_ASS) {
/* Server assigned to connection request, we have to try to connect now */
now_ms, __FUNCTION__,
s,
s->req, s->rep,
- s->req->rex, s->rep->wex,
- s->req->flags, s->rep->flags,
- s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
+ s->req.rex, s->res.wex,
+ s->req.flags, s->res.flags,
+ s->req.buf->i, s->req.buf->o, s->res.buf->i, s->res.buf->o, s->res.cons->state, s->req.cons->state);
if (si->state != SI_ST_REQ)
return;
/* we don't want to run the TCP or HTTP filters again if the backend has not changed */
if (s->fe == s->be) {
- s->req->analysers &= ~AN_REQ_INSPECT_BE;
- s->req->analysers &= ~AN_REQ_HTTP_PROCESS_BE;
+ s->req.analysers &= ~AN_REQ_INSPECT_BE;
+ s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
}
/* as soon as we know the backend, we must check if we have a matching forced or ignored
sw_failed:
/* immediately abort this request in case of allocation failure */
- channel_abort(s->req);
- channel_abort(s->rep);
+ channel_abort(&s->req);
+ channel_abort(&s->res);
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_RESOURCE;
s->flags |= SN_FINST_R;
s->txn.status = 500;
- s->req->analysers = 0;
- s->req->analyse_exp = TICK_ETERNITY;
+ s->req.analysers = 0;
+ s->req.analyse_exp = TICK_ETERNITY;
return 0;
}
unsigned int req_ana_back;
//DPRINTF(stderr, "%s:%d: cs=%d ss=%d(%d) rqf=0x%08x rpf=0x%08x\n", __FUNCTION__, __LINE__,
- // s->si[0].state, s->si[1].state, s->si[1].err_type, s->req->flags, s->rep->flags);
+ // s->si[0].state, s->si[1].state, s->si[1].err_type, s->req.flags, s->res.flags);
/* this data may be no longer valid, clear it */
memset(&s->txn.auth, 0, sizeof(s->txn.auth));
/* This flag must explicitly be set every time */
- s->req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
- s->rep->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
+ s->req.flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
+ s->res.flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
/* Keep a copy of req/rep flags so that we can detect shutdowns */
- rqf_last = s->req->flags & ~CF_MASK_ANALYSER;
- rpf_last = s->rep->flags & ~CF_MASK_ANALYSER;
+ rqf_last = s->req.flags & ~CF_MASK_ANALYSER;
+ rpf_last = s->res.flags & ~CF_MASK_ANALYSER;
/* we don't want the stream interface functions to recursively wake us up */
- if (s->req->prod->owner == t)
- s->req->prod->flags |= SI_FL_DONT_WAKE;
- if (s->req->cons->owner == t)
- s->req->cons->flags |= SI_FL_DONT_WAKE;
+ if (s->req.prod->owner == t)
+ s->req.prod->flags |= SI_FL_DONT_WAKE;
+ if (s->req.cons->owner == t)
+ s->req.cons->flags |= SI_FL_DONT_WAKE;
/* 1a: Check for low level timeouts if needed. We just set a flag on
* stream interfaces when their timeouts have expired.
* detect state changes when calling them.
*/
- channel_check_timeouts(s->req);
+ channel_check_timeouts(&s->req);
- if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
- s->req->cons->flags |= SI_FL_NOLINGER;
- si_shutw(s->req->cons);
+ if (unlikely((s->req.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+ s->req.cons->flags |= SI_FL_NOLINGER;
+ si_shutw(s->req.cons);
}
- if (unlikely((s->req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
- if (s->req->prod->flags & SI_FL_NOHALF)
- s->req->prod->flags |= SI_FL_NOLINGER;
- si_shutr(s->req->prod);
+ if (unlikely((s->req.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+ if (s->req.prod->flags & SI_FL_NOHALF)
+ s->req.prod->flags |= SI_FL_NOLINGER;
+ si_shutr(s->req.prod);
}
- channel_check_timeouts(s->rep);
+ channel_check_timeouts(&s->res);
- if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
- s->rep->cons->flags |= SI_FL_NOLINGER;
- si_shutw(s->rep->cons);
+ if (unlikely((s->res.flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
+ s->res.cons->flags |= SI_FL_NOLINGER;
+ si_shutw(s->res.cons);
}
- if (unlikely((s->rep->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
- if (s->rep->prod->flags & SI_FL_NOHALF)
- s->rep->prod->flags |= SI_FL_NOLINGER;
- si_shutr(s->rep->prod);
+ if (unlikely((s->res.flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
+ if (s->res.prod->flags & SI_FL_NOHALF)
+ s->res.prod->flags |= SI_FL_NOLINGER;
+ si_shutr(s->res.prod);
}
/* Once in a while we're woken up because the task expires. But
* So let's not run a whole session processing if only an expiration
* timeout needs to be refreshed.
*/
- if (!((s->req->flags | s->rep->flags) &
+ if (!((s->req.flags | s->res.flags) &
(CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
!((s->si[0].flags | s->si[1].flags) & (SI_FL_EXP|SI_FL_ERR)) &&
si_shutr(&s->si[0]);
si_shutw(&s->si[0]);
stream_int_report_error(&s->si[0]);
- if (!(s->req->analysers) && !(s->rep->analysers)) {
+ if (!(s->req.analysers) && !(s->res.analysers)) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
s->be->be_counters.failed_resp++;
if (srv)
srv->counters.failed_resp++;
- if (!(s->req->analysers) && !(s->rep->analysers)) {
+ if (!(s->req.analysers) && !(s->res.analysers)) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
t,
s, s->flags,
s->req, s->rep,
- s->req->rex, s->rep->wex,
- s->req->flags, s->rep->flags,
- s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state,
- s->rep->cons->err_type, s->req->cons->err_type,
- s->req->cons->conn_retries);
+ s->req.rex, s->res.wex,
+ s->req.flags, s->res.flags,
+ s->req.buf->i, s->req.buf->o, s->res.buf->i, s->res.buf->o, s->res.cons->state, s->req.cons->state,
+ s->res.cons->err_type, s->req.cons->err_type,
+ s->req.cons->conn_retries);
/* nothing special to be done on client side */
- if (unlikely(s->req->prod->state == SI_ST_DIS))
- s->req->prod->state = SI_ST_CLO;
+ if (unlikely(s->req.prod->state == SI_ST_DIS))
+ s->req.prod->state = SI_ST_CLO;
/* When a server-side connection is released, we have to count it and
* check for pending connections on this server.
*/
- if (unlikely(s->req->cons->state == SI_ST_DIS)) {
- s->req->cons->state = SI_ST_CLO;
+ if (unlikely(s->req.cons->state == SI_ST_DIS)) {
+ s->req.cons->state = SI_ST_CLO;
srv = objt_server(s->target);
if (srv) {
if (s->flags & SN_CURR_SESS) {
resync_request:
/* Analyse request */
- if (((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
- ((s->req->flags ^ rqf_last) & CF_MASK_STATIC) ||
+ if (((s->req.flags & ~rqf_last) & CF_MASK_ANALYSER) ||
+ ((s->req.flags ^ rqf_last) & CF_MASK_STATIC) ||
s->si[0].state != rq_prod_last ||
s->si[1].state != rq_cons_last ||
s->task->state & TASK_WOKEN_MSG) {
- unsigned int flags = s->req->flags;
+ unsigned int flags = s->req.flags;
- if (s->req->prod->state >= SI_ST_EST) {
+ if (s->req.prod->state >= SI_ST_EST) {
int max_loops = global.tune.maxpollevents;
unsigned int ana_list;
unsigned int ana_back;
* enabling them again when it disables itself, so
* that other analysers are called in similar conditions.
*/
- channel_auto_read(s->req);
- channel_auto_connect(s->req);
- channel_auto_close(s->req);
+ channel_auto_read(&s->req);
+ channel_auto_connect(&s->req);
+ channel_auto_close(&s->req);
/* We will call all analysers for which a bit is set in
- * s->req->analysers, following the bit order from LSB
+ * s->req.analysers, following the bit order from LSB
* to MSB. The analysers must remove themselves from
* the list when not needed. Any analyser may return 0
* to break out of the loop, either because of missing
* analyser and must immediately loop again.
*/
- ana_list = ana_back = s->req->analysers;
+ ana_list = ana_back = s->req.analysers;
while (ana_list && max_loops--) {
/* Warning! ensure that analysers are always placed in ascending order! */
if (ana_list & AN_REQ_INSPECT_FE) {
- if (!tcp_inspect_request(s, s->req, AN_REQ_INSPECT_FE))
+ if (!tcp_inspect_request(s, &s->req, AN_REQ_INSPECT_FE))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_INSPECT_FE);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_INSPECT_FE);
}
if (ana_list & AN_REQ_WAIT_HTTP) {
- if (!http_wait_for_request(s, s->req, AN_REQ_WAIT_HTTP))
+ if (!http_wait_for_request(s, &s->req, AN_REQ_WAIT_HTTP))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_WAIT_HTTP);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_WAIT_HTTP);
}
if (ana_list & AN_REQ_HTTP_PROCESS_FE) {
- if (!http_process_req_common(s, s->req, AN_REQ_HTTP_PROCESS_FE, s->fe))
+ if (!http_process_req_common(s, &s->req, AN_REQ_HTTP_PROCESS_FE, s->fe))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE);
}
if (ana_list & AN_REQ_SWITCHING_RULES) {
- if (!process_switching_rules(s, s->req, AN_REQ_SWITCHING_RULES))
+ if (!process_switching_rules(s, &s->req, AN_REQ_SWITCHING_RULES))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
}
if (ana_list & AN_REQ_INSPECT_BE) {
- if (!tcp_inspect_request(s, s->req, AN_REQ_INSPECT_BE))
+ if (!tcp_inspect_request(s, &s->req, AN_REQ_INSPECT_BE))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_INSPECT_BE);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_INSPECT_BE);
}
if (ana_list & AN_REQ_HTTP_PROCESS_BE) {
- if (!http_process_req_common(s, s->req, AN_REQ_HTTP_PROCESS_BE, s->be))
+ if (!http_process_req_common(s, &s->req, AN_REQ_HTTP_PROCESS_BE, s->be))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE);
}
if (ana_list & AN_REQ_HTTP_TARPIT) {
- if (!http_process_tarpit(s, s->req, AN_REQ_HTTP_TARPIT))
+ if (!http_process_tarpit(s, &s->req, AN_REQ_HTTP_TARPIT))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
}
if (ana_list & AN_REQ_SRV_RULES) {
- if (!process_server_rules(s, s->req, AN_REQ_SRV_RULES))
+ if (!process_server_rules(s, &s->req, AN_REQ_SRV_RULES))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_SRV_RULES);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_SRV_RULES);
}
if (ana_list & AN_REQ_HTTP_INNER) {
- if (!http_process_request(s, s->req, AN_REQ_HTTP_INNER))
+ if (!http_process_request(s, &s->req, AN_REQ_HTTP_INNER))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_INNER);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_INNER);
}
if (ana_list & AN_REQ_HTTP_BODY) {
- if (!http_wait_for_request_body(s, s->req, AN_REQ_HTTP_BODY))
+ if (!http_wait_for_request_body(s, &s->req, AN_REQ_HTTP_BODY))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_BODY);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_BODY);
}
if (ana_list & AN_REQ_PRST_RDP_COOKIE) {
- if (!tcp_persist_rdp_cookie(s, s->req, AN_REQ_PRST_RDP_COOKIE))
+ if (!tcp_persist_rdp_cookie(s, &s->req, AN_REQ_PRST_RDP_COOKIE))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
}
if (ana_list & AN_REQ_STICKING_RULES) {
- if (!process_sticking_rules(s, s->req, AN_REQ_STICKING_RULES))
+ if (!process_sticking_rules(s, &s->req, AN_REQ_STICKING_RULES))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_STICKING_RULES);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_STICKING_RULES);
}
if (ana_list & AN_REQ_HTTP_XFER_BODY) {
- if (!http_request_forward_body(s, s->req, AN_REQ_HTTP_XFER_BODY))
+ if (!http_request_forward_body(s, &s->req, AN_REQ_HTTP_XFER_BODY))
break;
- UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
+ UPDATE_ANALYSERS(s->req.analysers, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
}
break;
}
rq_prod_last = s->si[0].state;
rq_cons_last = s->si[1].state;
- s->req->flags &= ~CF_WAKE_ONCE;
- rqf_last = s->req->flags;
+ s->req.flags &= ~CF_WAKE_ONCE;
+ rqf_last = s->req.flags;
- if ((s->req->flags ^ flags) & CF_MASK_STATIC)
+ if ((s->req.flags ^ flags) & CF_MASK_STATIC)
goto resync_request;
}
* because some response analysers may indirectly enable new request
* analysers (eg: HTTP keep-alive).
*/
- req_ana_back = s->req->analysers;
+ req_ana_back = s->req.analysers;
resync_response:
/* Analyse response */
- if (((s->rep->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
- (s->rep->flags ^ rpf_last) & CF_MASK_STATIC ||
+ if (((s->res.flags & ~rpf_last) & CF_MASK_ANALYSER) ||
+ (s->res.flags ^ rpf_last) & CF_MASK_STATIC ||
s->si[0].state != rp_cons_last ||
s->si[1].state != rp_prod_last ||
s->task->state & TASK_WOKEN_MSG) {
- unsigned int flags = s->rep->flags;
+ unsigned int flags = s->res.flags;
- if ((s->rep->flags & CF_MASK_ANALYSER) &&
- (s->rep->analysers & AN_REQ_ALL)) {
+ if ((s->res.flags & CF_MASK_ANALYSER) &&
+ (s->res.analysers & AN_REQ_ALL)) {
/* Due to HTTP pipelining, the HTTP request analyser might be waiting
* for some free space in the response buffer, so we might need to call
* it when something changes in the response buffer, but still we pass
* it the request buffer. Note that the SI state might very well still
* be zero due to us returning a flow of redirects!
*/
- s->rep->analysers &= ~AN_REQ_ALL;
- s->req->flags |= CF_WAKE_ONCE;
+ s->res.analysers &= ~AN_REQ_ALL;
+ s->req.flags |= CF_WAKE_ONCE;
}
- if (s->rep->prod->state >= SI_ST_EST) {
+ if (s->res.prod->state >= SI_ST_EST) {
int max_loops = global.tune.maxpollevents;
unsigned int ana_list;
unsigned int ana_back;
* it disables itself, so that other analysers are called
* in similar conditions.
*/
- channel_auto_read(s->rep);
- channel_auto_close(s->rep);
+ channel_auto_read(&s->res);
+ channel_auto_close(&s->res);
/* We will call all analysers for which a bit is set in
- * s->rep->analysers, following the bit order from LSB
+ * s->res.analysers, following the bit order from LSB
* to MSB. The analysers must remove themselves from
* the list when not needed. Any analyser may return 0
* to break out of the loop, either because of missing
* are added in the middle.
*/
- ana_list = ana_back = s->rep->analysers;
+ ana_list = ana_back = s->res.analysers;
while (ana_list && max_loops--) {
/* Warning! ensure that analysers are always placed in ascending order! */
if (ana_list & AN_RES_INSPECT) {
- if (!tcp_inspect_response(s, s->rep, AN_RES_INSPECT))
+ if (!tcp_inspect_response(s, &s->res, AN_RES_INSPECT))
break;
- UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_INSPECT);
+ UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_INSPECT);
}
if (ana_list & AN_RES_WAIT_HTTP) {
- if (!http_wait_for_response(s, s->rep, AN_RES_WAIT_HTTP))
+ if (!http_wait_for_response(s, &s->res, AN_RES_WAIT_HTTP))
break;
- UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_WAIT_HTTP);
+ UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_WAIT_HTTP);
}
if (ana_list & AN_RES_STORE_RULES) {
- if (!process_store_rules(s, s->rep, AN_RES_STORE_RULES))
+ if (!process_store_rules(s, &s->res, AN_RES_STORE_RULES))
break;
- UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_STORE_RULES);
+ UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_STORE_RULES);
}
if (ana_list & AN_RES_HTTP_PROCESS_BE) {
- if (!http_process_res_common(s, s->rep, AN_RES_HTTP_PROCESS_BE, s->be))
+ if (!http_process_res_common(s, &s->res, AN_RES_HTTP_PROCESS_BE, s->be))
break;
- UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE);
+ UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE);
}
if (ana_list & AN_RES_HTTP_XFER_BODY) {
- if (!http_response_forward_body(s, s->rep, AN_RES_HTTP_XFER_BODY))
+ if (!http_response_forward_body(s, &s->res, AN_RES_HTTP_XFER_BODY))
break;
- UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
+ UPDATE_ANALYSERS(s->res.analysers, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
}
break;
}
rp_cons_last = s->si[0].state;
rp_prod_last = s->si[1].state;
- rpf_last = s->rep->flags;
+ rpf_last = s->res.flags;
- if ((s->rep->flags ^ flags) & CF_MASK_STATIC)
+ if ((s->res.flags ^ flags) & CF_MASK_STATIC)
goto resync_response;
}
/* maybe someone has added some request analysers, so we must check and loop */
- if (s->req->analysers & ~req_ana_back)
+ if (s->req.analysers & ~req_ana_back)
goto resync_request;
- if ((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER)
+ if ((s->req.flags & ~rqf_last) & CF_MASK_ANALYSER)
goto resync_request;
/* FIXME: here we should call protocol handlers which rely on
*/
srv = objt_server(s->target);
if (unlikely(!(s->flags & SN_ERR_MASK))) {
- if (s->req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
+ if (s->req.flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
/* Report it if the client got an error or a read timeout expired */
- s->req->analysers = 0;
- if (s->req->flags & CF_READ_ERROR) {
+ s->req.analysers = 0;
+ if (s->req.flags & CF_READ_ERROR) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
srv->counters.cli_aborts++;
s->flags |= SN_ERR_CLICL;
}
- else if (s->req->flags & CF_READ_TIMEOUT) {
+ else if (s->req.flags & CF_READ_TIMEOUT) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
srv->counters.cli_aborts++;
s->flags |= SN_ERR_CLITO;
}
- else if (s->req->flags & CF_WRITE_ERROR) {
+ else if (s->req.flags & CF_WRITE_ERROR) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
}
sess_set_term_flags(s);
}
- else if (s->rep->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
+ else if (s->res.flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
/* Report it if the server got an error or a read timeout expired */
- s->rep->analysers = 0;
- if (s->rep->flags & CF_READ_ERROR) {
+ s->res.analysers = 0;
+ if (s->res.flags & CF_READ_ERROR) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
srv->counters.srv_aborts++;
s->flags |= SN_ERR_SRVCL;
}
- else if (s->rep->flags & CF_READ_TIMEOUT) {
+ else if (s->res.flags & CF_READ_TIMEOUT) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
srv->counters.srv_aborts++;
s->flags |= SN_ERR_SRVTO;
}
- else if (s->rep->flags & CF_WRITE_ERROR) {
+ else if (s->res.flags & CF_WRITE_ERROR) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
* recent call to channel_abort().
*/
- if (unlikely(!s->req->analysers &&
- !(s->req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
- (s->req->prod->state >= SI_ST_EST) &&
- (s->req->to_forward != CHN_INFINITE_FORWARD))) {
+ if (unlikely(!s->req.analysers &&
+ !(s->req.flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
+ (s->req.prod->state >= SI_ST_EST) &&
+ (s->req.to_forward != CHN_INFINITE_FORWARD))) {
/* This buffer is freewheeling, there's no analyser
* attached to it. If any data are left in, we'll permit them to
* move.
*/
- channel_auto_read(s->req);
- channel_auto_connect(s->req);
- channel_auto_close(s->req);
- buffer_flush(s->req->buf);
+ channel_auto_read(&s->req);
+ channel_auto_connect(&s->req);
+ channel_auto_close(&s->req);
+ buffer_flush(s->req.buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer (which might possibly not be connected yet).
*/
- if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
- channel_forward(s->req, CHN_INFINITE_FORWARD);
+ if (!(s->req.flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ channel_forward(&s->req, CHN_INFINITE_FORWARD);
}
/* check if it is wise to enable kernel splicing to forward request data */
- if (!(s->req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
- s->req->to_forward &&
+ if (!(s->req.flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+ s->req.to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->rcv_pipe) &&
(objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->snd_pipe) &&
(pipes_used < global.maxpipes) &&
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
- (s->req->flags & CF_STREAMER_FAST)))) {
- s->req->flags |= CF_KERN_SPLICING;
+ (s->req.flags & CF_STREAMER_FAST)))) {
+ s->req.flags |= CF_KERN_SPLICING;
}
/* reflect what the L7 analysers have seen last */
- rqf_last = s->req->flags;
+ rqf_last = s->req.flags;
/*
* Now forward all shutdown requests between both sides of the buffer
* once the server has begun to respond. If a half-closed timeout is set, we adjust
* the other side's timeout as well.
*/
- if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
+ if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR))) {
- channel_shutw_now(s->req);
+ channel_shutw_now(&s->req);
if (tick_isset(s->fe->timeout.clientfin)) {
- s->rep->wto = s->fe->timeout.clientfin;
- s->rep->wex = tick_add(now_ms, s->rep->wto);
+ s->res.wto = s->fe->timeout.clientfin;
+ s->res.wex = tick_add(now_ms, s->res.wto);
}
}
/* shutdown(write) pending */
- if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
- channel_is_empty(s->req))) {
- if (s->req->flags & CF_READ_ERROR)
- s->req->cons->flags |= SI_FL_NOLINGER;
- si_shutw(s->req->cons);
+ if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+ channel_is_empty(&s->req))) {
+ if (s->req.flags & CF_READ_ERROR)
+ s->req.cons->flags |= SI_FL_NOLINGER;
+ si_shutw(s->req.cons);
if (tick_isset(s->be->timeout.serverfin)) {
- s->rep->rto = s->be->timeout.serverfin;
- s->rep->rex = tick_add(now_ms, s->rep->rto);
+ s->res.rto = s->be->timeout.serverfin;
+ s->res.rex = tick_add(now_ms, s->res.rto);
}
}
/* shutdown(write) done on server side, we must stop the client too */
- if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
- !s->req->analysers))
- channel_shutr_now(s->req);
+ if (unlikely((s->req.flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
+ !s->req.analysers))
+ channel_shutr_now(&s->req);
/* shutdown(read) pending */
- if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
- if (s->req->prod->flags & SI_FL_NOHALF)
- s->req->prod->flags |= SI_FL_NOLINGER;
- si_shutr(s->req->prod);
+ if (unlikely((s->req.flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+ if (s->req.prod->flags & SI_FL_NOHALF)
+ s->req.prod->flags |= SI_FL_NOLINGER;
+ si_shutr(s->req.prod);
if (tick_isset(s->fe->timeout.clientfin)) {
- s->rep->wto = s->fe->timeout.clientfin;
- s->rep->wex = tick_add(now_ms, s->rep->wto);
+ s->res.wto = s->fe->timeout.clientfin;
+ s->res.wex = tick_add(now_ms, s->res.wto);
}
}
* - there are data scheduled for emission in the buffer
* - the CF_AUTO_CONNECT flag is set (active connection)
*/
- if (s->req->cons->state == SI_ST_INI) {
- if (!(s->req->flags & CF_SHUTW)) {
- if ((s->req->flags & CF_AUTO_CONNECT) || !channel_is_empty(s->req)) {
+ if (s->req.cons->state == SI_ST_INI) {
+ if (!(s->req.flags & CF_SHUTW)) {
+ if ((s->req.flags & CF_AUTO_CONNECT) || !channel_is_empty(&s->req)) {
/* If we have an appctx, there is no connect method, so we
* immediately switch to the connected state, otherwise we
* perform a connection request.
*/
- s->req->cons->state = SI_ST_REQ; /* new connection requested */
- s->req->cons->conn_retries = s->be->conn_retries;
+ s->req.cons->state = SI_ST_REQ; /* new connection requested */
+ s->req.cons->conn_retries = s->be->conn_retries;
}
}
else {
- s->req->cons->state = SI_ST_CLO; /* shutw+ini = abort */
- channel_shutw_now(s->req); /* fix buffer flags upon abort */
- channel_shutr_now(s->rep);
+ s->req.cons->state = SI_ST_CLO; /* shutw+ini = abort */
+ channel_shutw_now(&s->req); /* fix buffer flags upon abort */
+ channel_shutr_now(&s->res);
}
}
}
/* Benchmarks have shown that it's optimal to do a full resync now */
- if (s->req->prod->state == SI_ST_DIS || s->req->cons->state == SI_ST_DIS)
+ if (s->req.prod->state == SI_ST_DIS || s->req.cons->state == SI_ST_DIS)
goto resync_stream_interface;
/* otherwise we want to check if we need to resync the req buffer or not */
- if ((s->req->flags ^ rqf_last) & CF_MASK_STATIC)
+ if ((s->req.flags ^ rqf_last) & CF_MASK_STATIC)
goto resync_request;
/* perform output updates to the response buffer */
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
* recent call to channel_abort().
*/
- if (unlikely(!s->rep->analysers &&
- !(s->rep->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
- (s->rep->prod->state >= SI_ST_EST) &&
- (s->rep->to_forward != CHN_INFINITE_FORWARD))) {
+ if (unlikely(!s->res.analysers &&
+ !(s->res.flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
+ (s->res.prod->state >= SI_ST_EST) &&
+ (s->res.to_forward != CHN_INFINITE_FORWARD))) {
/* This buffer is freewheeling, there's no analyser
* attached to it. If any data are left in, we'll permit them to
* move.
*/
- channel_auto_read(s->rep);
- channel_auto_close(s->rep);
- buffer_flush(s->rep->buf);
+ channel_auto_read(&s->res);
+ channel_auto_close(&s->res);
+ buffer_flush(s->res.buf);
/* We'll let data flow between the producer (if still connected)
* to the consumer.
*/
- if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW)))
- channel_forward(s->rep, CHN_INFINITE_FORWARD);
+ if (!(s->res.flags & (CF_SHUTR|CF_SHUTW_NOW)))
+ channel_forward(&s->res, CHN_INFINITE_FORWARD);
/* if we have no analyser anymore in any direction and have a
* tunnel timeout set, use it now. Note that we must respect
* the half-closed timeouts as well.
*/
- if (!s->req->analysers && s->be->timeout.tunnel) {
- s->req->rto = s->req->wto = s->rep->rto = s->rep->wto =
+ if (!s->req.analysers && s->be->timeout.tunnel) {
+ s->req.rto = s->req.wto = s->res.rto = s->res.wto =
s->be->timeout.tunnel;
- if ((s->req->flags & CF_SHUTR) && tick_isset(s->fe->timeout.clientfin))
- s->rep->wto = s->fe->timeout.clientfin;
- if ((s->req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
- s->rep->rto = s->be->timeout.serverfin;
- if ((s->rep->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
- s->req->wto = s->be->timeout.serverfin;
- if ((s->rep->flags & CF_SHUTW) && tick_isset(s->fe->timeout.clientfin))
- s->req->rto = s->fe->timeout.clientfin;
-
- s->req->rex = tick_add(now_ms, s->req->rto);
- s->req->wex = tick_add(now_ms, s->req->wto);
- s->rep->rex = tick_add(now_ms, s->rep->rto);
- s->rep->wex = tick_add(now_ms, s->rep->wto);
+ if ((s->req.flags & CF_SHUTR) && tick_isset(s->fe->timeout.clientfin))
+ s->res.wto = s->fe->timeout.clientfin;
+ if ((s->req.flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
+ s->res.rto = s->be->timeout.serverfin;
+ if ((s->res.flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
+ s->req.wto = s->be->timeout.serverfin;
+ if ((s->res.flags & CF_SHUTW) && tick_isset(s->fe->timeout.clientfin))
+ s->req.rto = s->fe->timeout.clientfin;
+
+ s->req.rex = tick_add(now_ms, s->req.rto);
+ s->req.wex = tick_add(now_ms, s->req.wto);
+ s->res.rex = tick_add(now_ms, s->res.rto);
+ s->res.wex = tick_add(now_ms, s->res.wto);
}
}
/* check if it is wise to enable kernel splicing to forward response data */
- if (!(s->rep->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
- s->rep->to_forward &&
+ if (!(s->res.flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
+ s->res.to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->snd_pipe) &&
(objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->rcv_pipe) &&
(pipes_used < global.maxpipes) &&
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
- (s->rep->flags & CF_STREAMER_FAST)))) {
- s->rep->flags |= CF_KERN_SPLICING;
+ (s->res.flags & CF_STREAMER_FAST)))) {
+ s->res.flags |= CF_KERN_SPLICING;
}
/* reflect what the L7 analysers have seen last */
- rpf_last = s->rep->flags;
+ rpf_last = s->res.flags;
/*
* Now forward all shutdown requests between both sides of the buffer
*/
/* first, let's check if the response buffer needs to shutdown(write) */
- if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
+ if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR))) {
- channel_shutw_now(s->rep);
+ channel_shutw_now(&s->res);
if (tick_isset(s->be->timeout.serverfin)) {
- s->req->wto = s->be->timeout.serverfin;
- s->req->wex = tick_add(now_ms, s->req->wto);
+ s->req.wto = s->be->timeout.serverfin;
+ s->req.wex = tick_add(now_ms, s->req.wto);
}
}
/* shutdown(write) pending */
- if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
- channel_is_empty(s->rep))) {
- si_shutw(s->rep->cons);
+ if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
+ channel_is_empty(&s->res))) {
+ si_shutw(s->res.cons);
if (tick_isset(s->fe->timeout.clientfin)) {
- s->req->rto = s->fe->timeout.clientfin;
- s->req->rex = tick_add(now_ms, s->req->rto);
+ s->req.rto = s->fe->timeout.clientfin;
+ s->req.rex = tick_add(now_ms, s->req.rto);
}
}
/* shutdown(write) done on the client side, we must stop the server too */
- if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
- !s->rep->analysers)
- channel_shutr_now(s->rep);
+ if (unlikely((s->res.flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
+ !s->res.analysers)
+ channel_shutr_now(&s->res);
/* shutdown(read) pending */
- if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
- if (s->rep->prod->flags & SI_FL_NOHALF)
- s->rep->prod->flags |= SI_FL_NOLINGER;
- si_shutr(s->rep->prod);
+ if (unlikely((s->res.flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
+ if (s->res.prod->flags & SI_FL_NOHALF)
+ s->res.prod->flags |= SI_FL_NOLINGER;
+ si_shutr(s->res.prod);
if (tick_isset(s->be->timeout.serverfin)) {
- s->req->wto = s->be->timeout.serverfin;
- s->req->wex = tick_add(now_ms, s->req->wto);
+ s->req.wto = s->be->timeout.serverfin;
+ s->req.wex = tick_add(now_ms, s->req.wto);
}
}
- if (s->req->prod->state == SI_ST_DIS || s->req->cons->state == SI_ST_DIS)
+ if (s->req.prod->state == SI_ST_DIS || s->req.cons->state == SI_ST_DIS)
goto resync_stream_interface;
- if (s->req->flags != rqf_last)
+ if (s->req.flags != rqf_last)
goto resync_request;
- if ((s->rep->flags ^ rpf_last) & CF_MASK_STATIC)
+ if ((s->res.flags ^ rpf_last) & CF_MASK_STATIC)
goto resync_response;
/* we're interested in getting wakeups again */
- s->req->prod->flags &= ~SI_FL_DONT_WAKE;
- s->req->cons->flags &= ~SI_FL_DONT_WAKE;
+ s->req.prod->flags &= ~SI_FL_DONT_WAKE;
+ s->req.cons->flags &= ~SI_FL_DONT_WAKE;
/* This is needed only when debugging is enabled, to indicate
* client-side or server-side close. Please note that in the unlikely
}
}
- if (likely((s->rep->cons->state != SI_ST_CLO) ||
- (s->req->cons->state > SI_ST_INI && s->req->cons->state < SI_ST_CLO))) {
+ if (likely((s->res.cons->state != SI_ST_CLO) ||
+ (s->req.cons->state > SI_ST_INI && s->req.cons->state < SI_ST_CLO))) {
if ((s->fe->options & PR_O_CONTSTATS) && (s->flags & SN_BE_ASSIGNED))
session_process_counters(s);
- if (s->rep->cons->state == SI_ST_EST && obj_type(s->rep->cons->end) != OBJ_TYPE_APPCTX)
- si_update(s->rep->cons);
+ if (s->res.cons->state == SI_ST_EST && obj_type(s->res.cons->end) != OBJ_TYPE_APPCTX)
+ si_update(s->res.cons);
- if (s->req->cons->state == SI_ST_EST && obj_type(s->req->cons->end) != OBJ_TYPE_APPCTX)
- si_update(s->req->cons);
+ if (s->req.cons->state == SI_ST_EST && obj_type(s->req.cons->end) != OBJ_TYPE_APPCTX)
+ si_update(s->req.cons);
- s->req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
- s->rep->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
+ s->req.flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
+ s->res.flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
s->si[0].prev_state = s->si[0].state;
s->si[1].prev_state = s->si[1].state;
s->si[0].flags &= ~(SI_FL_ERR|SI_FL_EXP);
* request timeout is set and the server has not yet sent a response.
*/
- if ((s->rep->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
- (tick_isset(s->req->wex) || tick_isset(s->rep->rex))) {
- s->req->flags |= CF_READ_NOEXP;
- s->req->rex = TICK_ETERNITY;
+ if ((s->res.flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
+ (tick_isset(s->req.wex) || tick_isset(s->res.rex))) {
+ s->req.flags |= CF_READ_NOEXP;
+ s->req.rex = TICK_ETERNITY;
}
/* When any of the stream interfaces is attached to an applet,
* both functions are always called and that we wake up if at
* least one did something.
*/
- if ((si_applet_call(s->req->cons) | si_applet_call(s->rep->cons)) != 0) {
+ if ((si_applet_call(s->req.cons) | si_applet_call(s->res.cons)) != 0) {
if (task_in_rq(t)) {
t->expire = TICK_ETERNITY;
session_release_buffers(s);
}
update_exp_and_leave:
- t->expire = tick_first(tick_first(s->req->rex, s->req->wex),
- tick_first(s->rep->rex, s->rep->wex));
- if (s->req->analysers)
- t->expire = tick_first(t->expire, s->req->analyse_exp);
+ t->expire = tick_first(tick_first(s->req.rex, s->req.wex),
+ tick_first(s->res.rex, s->res.wex));
+ if (s->req.analysers)
+ t->expire = tick_first(t->expire, s->req.analyse_exp);
if (s->si[0].exp)
t->expire = tick_first(t->expire, s->si[0].exp);
fprintf(stderr,
"[%u] queuing with exp=%u req->rex=%u req->wex=%u req->ana_exp=%u"
" rep->rex=%u rep->wex=%u, si[0].exp=%u, si[1].exp=%u, cs=%d, ss=%d\n",
- now_ms, t->expire, s->req->rex, s->req->wex, s->req->analyse_exp,
- s->rep->rex, s->rep->wex, s->si[0].exp, s->si[1].exp, s->si[0].state, s->si[1].state);
+ now_ms, t->expire, s->req.rex, s->req.wex, s->req.analyse_exp,
+ s->res.rex, s->res.wex, s->si[0].exp, s->si[1].exp, s->si[0].state, s->si[1].state);
#endif
#ifdef DEBUG_DEV
/* let's do a final log if we need it */
if (!LIST_ISEMPTY(&s->fe->logformat) && s->logs.logwait &&
!(s->flags & SN_MONITOR) &&
- (!(s->fe->options & PR_O_NULLNOLOG) || s->req->total)) {
+ (!(s->fe->options & PR_O_NULLNOLOG) || s->req.total)) {
s->do_log(s);
}
/* kill a session and set the termination flags to <why> (one of SN_ERR_*) */
void session_shutdown(struct session *session, int why)
{
- if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW))
+ if (session->req.flags & (CF_SHUTW|CF_SHUTW_NOW))
return;
- channel_shutw_now(session->req);
- channel_shutr_now(session->rep);
+ channel_shutw_now(&session->req);
+ channel_shutr_now(&session->res);
session->task->nice = 1024;
if (!(session->flags & SN_ERR_MASK))
session->flags |= why;