/* fall through */
case PROMEX_ST_DONE:
- /* Don't add TLR because mux-h1 will take care of it */
- res_htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
- si_rx_room_blk(si);
- goto out;
- }
- channel_add_input(res, 1);
+ /* no more data are expected. Don't add TLR because mux-h1 will take care of it */
+ res_htx->flags |= HTX_FL_EOM;
appctx->st0 = PROMEX_ST_END;
/* fall through */
a channel. The channel is chosen depending on the sample direction.
internal.htx.has_eom : boolean
- Returns true if the HTX message associated to a channel contains an
- end-of-message block (EOM). Otherwise, it returns false. The channel is
- chosen depending on the sample direction.
+ Returns true if the HTX message associated to a channel contains the
+ end-of-message flag (EOM). Otherwise, it returns false. The channel is chosen
+ depending on the sample direction.
internal.htx.nbblks : integer
Returns the number of blocks present in the HTX message associated to a
-----------------------------------------------
HTX API
Version 1.0
- ( Last update: 2019-06-20 )
+ ( Last update: 2020-12-02 )
-----------------------------------------------
Author : Christopher Faulet
Contact : cfaulet at haproxy dot com
An HTX block can be as well a start-line as a header, a body part or a
trailer. For all these types of block, a payload is attached to the block. It
-can also be a marker, the end-of-headers, end-of-trailers or end-of-message. For
-these blocks, there is no payload but it counts for a byte. It is important to
-not skip it when data are forwarded.
+can also be a marker, the end-of-headers or end-of-trailers. For these blocks,
+there is no payload but it counts for a byte. It is important to not skip it
+when data are forwarded.
As already said, a block is composed of metadata and a payload. Metadata are
stored in the blocks part and are composed of 2 fields :
- 0100 (4) : A data block
- 0101 (5) : A trailer block
- 0110 (6) : The end-of-trailers marker
- - 0111 (7) : The end-of-message marker
- 1111 (15) : An unused block
Other types are unused for now and reserved for futur extensions.
- zero or more trailer blocks (optional)
- an end-of-trailers marker (optional but always set if there is at least
one trailer block)
- - an end-of-message marker.
Only one HTTP request at a time can be stored in an HTX message. For HTTP
response, it is more complicated. Only one "final" response can be stored in an
HTX message. It is a response with status-code 101 or greater or equal to
200. But it may be preceded by several 1xx informational responses. Such
-responses are part of the same HTX message, so there is no end-of-message marker
-for them.
+responses are part of the same HTX message.
+
+When the end of the message is reached a special flag is set on the message
+(HTX_FL_EOM). It means no more data are expected for this message, except
+tunneled data. But tunneled data will never be mixed with message data. Thus
+once the flag marking the end of the message is set, it is easy to know the
+message ends.
3.1. The start-line
3.4. The end-of markers
-These blocks are used to delimit parts of an HTX message. It exists three
-markers:
+These blocks are used to delimit parts of an HTX message. It exists two
+markers :
- end-of-headers (EOH)
- end-of-trailers (EOT)
- - end-of-message (EOM)
-EOH and EOM are always present in an HTX message. EOT is optional.
+EOH is always present in an HTX message. EOT is optional.
4. The HTX API
struct buffer *htxbuf);
int h1_parse_msg_tlrs(struct h1m *h1m, struct htx *dsthtx,
struct buffer *srcbuf, size_t ofs, size_t max);
-int h1_parse_msg_eom(struct h1m *h1m, struct htx *dsthtx, size_t max);
int h1_format_htx_reqline(const struct htx_sl *sl, struct buffer *chk);
int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk);
*
* An HTX block is as well a header as a body part or a trailer. For all these
* types of block, a payload is attached to the block. It can also be a mark,
- * like the end-of-headers or end-of-message. For these blocks, there is no
+ * like the end-of-headers or end-of-trailers. For these blocks, there is no
* payload but it count for a byte. It is important to not skip it when data are
* forwarded. Metadata of an HTX block are composed of 2 fields :
*
* - 0101 = data
* - 0110 = trailer
* - 0111 = end-of-trailers
- * - 1000 = end-of-message
* ...
* - 1111 = unused
*
HTX_BLK_DATA = 4, /* data block */
HTX_BLK_TLR = 5, /* trailer name/value block */
HTX_BLK_EOT = 6, /* end-of-trailers block */
- HTX_BLK_EOM = 7, /* end-of-message block */
- /* 8 .. 14 unused */
+ /* 7 .. 14 unused */
HTX_BLK_UNUSED = 15, /* unused/removed block */
};
}
/* Returns 1 if no more data are expected for the message <htx>. Otherwise it
- * returns 0. Note that it is illegal to call this with htx == NULL. Note also
- * the EOM block may be missing.
+ * returns 0. Note that it is illegal to call this with htx == NULL. This
+ * function relies on the HTX_FL_EOM flags. It means tunneled data are not
+ * considered here.
*/
static inline int htx_expect_more(const struct htx *htx)
{
return htx_append_msg(htx, htxbuf(msg));
}
+/* Remove all blocks except headers. Trailers will also be removed too. */
static inline void htx_skip_msg_payload(struct htx *htx)
{
struct htx_blk *blk = htx_get_first_blk(htx);
while (blk) {
enum htx_blk_type type = htx_get_blk_type(blk);
- blk = ((type > HTX_BLK_EOH && type < HTX_BLK_EOM)
+ blk = ((type > HTX_BLK_EOH)
? htx_remove_blk(htx, blk)
: htx_get_next_blk(htx, blk));
}
case HTX_BLK_DATA: return "HTX_BLK_DATA";
case HTX_BLK_TLR: return "HTX_BLK_TLR";
case HTX_BLK_EOT: return "HTX_BLK_EOT";
- case HTX_BLK_EOM: return "HTX_BLK_EOM";
case HTX_BLK_UNUSED: return "HTX_BLK_UNUSED";
default: return "HTX_BLK_???";
};
if (appctx->st0 == HTX_CACHE_DATA) {
len = first->len - sizeof(*cache_ptr) - appctx->ctx.cache.sent;
if (len) {
- ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOM);
+ ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_UNUSED);
if (ret < len) {
si_rx_room_blk(si);
goto out;
}
}
- appctx->st0 = HTX_CACHE_END;
+ appctx->st0 = HTX_CACHE_EOM;
}
if (appctx->st0 == HTX_CACHE_EOM) {
- res_htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
- si_rx_room_blk(si);
- goto out;
- }
+ /* no more data are expected. */
+ res_htx->flags |= HTX_FL_EOM;
appctx->st0 = HTX_CACHE_END;
}
sl = http_get_stline(htx);
if (sl &&
(sl->flags & (HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN|HTX_SL_F_CHNK)) == HTX_SL_F_XFER_LEN &&
- htx_get_tail_type(htx) == HTX_BLK_EOM) {
+ (htx->flags & HTX_FL_EOM)) {
struct htx_blk * blk;
char *end;
size_t len = 0;
for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
struct comp_state *st = filter->ctx;
struct htx *htx = htxbuf(&msg->chn->buf);
struct htx_ret htxret = htx_find_offset(htx, offset);
- struct htx_blk *blk;
- int ret, consumed = 0, to_forward = 0;
+ struct htx_blk *blk, *next;
+ int ret, consumed = 0, to_forward = 0, last = 0;
blk = htxret.blk;
offset = htxret.ret;
- for (; blk && len; blk = htx_get_next_blk(htx, blk)) {
+ for (next = NULL; blk && len; blk = next) {
enum htx_blk_type type = htx_get_blk_type(blk);
uint32_t sz = htx_get_blksz(blk);
struct ist v;
- switch (type) {
- case HTX_BLK_UNUSED:
- break;
+ next = htx_get_next_blk(htx, blk);
+ while (next && htx_get_blk_type(next) == HTX_BLK_UNUSED)
+ next = htx_get_next_blk(htx, blk);
+
+ if (!(msg->flags & HTTP_MSGF_COMPRESSING))
+ goto consume;
+
+ if (htx_compression_buffer_init(htx, &trash) < 0) {
+ msg->chn->flags |= CF_WAKE_WRITE;
+ goto end;
+ }
+ switch (type) {
case HTX_BLK_DATA:
+ /* it is the last data block */
+ last = ((!next && (htx->flags & HTX_FL_EOM)) || (next && htx_get_blk_type(next) != HTX_BLK_DATA));
v = htx_get_blk_value(htx, blk);
- v.ptr += offset;
- v.len -= offset;
- if (v.len > len)
+ v = istadv(v, offset);
+ if (v.len > len) {
+ last = 0;
v.len = len;
- if (htx_compression_buffer_init(htx, &trash) < 0) {
- msg->chn->flags |= CF_WAKE_WRITE;
- goto end;
}
+
ret = htx_compression_buffer_add_data(st, v.ptr, v.len, &trash);
- if (ret < 0)
- goto error;
- if (htx_compression_buffer_end(st, &trash, 0) < 0)
+ if (ret < 0 || htx_compression_buffer_end(st, &trash, last) < 0)
goto error;
+ BUG_ON(v.len != ret);
+
+ if (ret == sz && !b_data(&trash))
+ next = htx_remove_blk(htx, blk);
+ else
+ blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
+
len -= ret;
consumed += ret;
to_forward += b_data(&trash);
- if (ret == sz && !b_data(&trash)) {
- offset = 0;
- blk = htx_remove_blk(htx, blk);
- continue;
- }
- v.len = ret;
- blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
+ if (last)
+ msg->flags &= ~HTTP_MSGF_COMPRESSING;
break;
case HTX_BLK_TLR:
case HTX_BLK_EOT:
- case HTX_BLK_EOM:
- if (msg->flags & HTTP_MSGF_COMPRESSING) {
- if (htx_compression_buffer_init(htx, &trash) < 0) {
- msg->chn->flags |= CF_WAKE_WRITE;
- goto end;
- }
- if (htx_compression_buffer_end(st, &trash, 1) < 0)
+ if (htx_compression_buffer_end(st, &trash, 1) < 0)
+ goto error;
+ if (b_data(&trash)) {
+ struct htx_blk *last = htx_add_last_data(htx, ist2(b_head(&trash), b_data(&trash)));
+ if (!last)
goto error;
- if (b_data(&trash)) {
- struct htx_blk *last = htx_add_last_data(htx, ist2(b_head(&trash), b_data(&trash)));
- if (!last)
- goto error;
- blk = htx_get_next_blk(htx, last);
- if (!blk)
- goto error;
- to_forward += b_data(&trash);
- }
- msg->flags &= ~HTTP_MSGF_COMPRESSING;
- /* We let the mux add last empty chunk and empty trailers */
+ blk = htx_get_next_blk(htx, last);
+ if (!blk)
+ goto error;
+ to_forward += b_data(&trash);
}
+ msg->flags &= ~HTTP_MSGF_COMPRESSING;
/* fall through */
default:
+ consume:
sz -= offset;
if (sz > len)
sz = len;
/* Switch messages without any payload to DONE state */
if (((h1m->flags & H1_MF_CLEN) && h1m->body_len == 0) ||
- ((h1m->flags & (H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK)) == H1_MF_XFER_LEN))
+ ((h1m->flags & (H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK)) == H1_MF_XFER_LEN)) {
h1m->state = H1_MSG_DONE;
+ dsthtx->flags |= HTX_FL_EOM;
+ }
end:
return ret;
goto end;
}
- if (!h1m->curr_len)
+ if (!h1m->curr_len) {
h1m->state = H1_MSG_DONE;
+ (*dsthtx)->flags |= HTX_FL_EOM;
+ }
}
else if (h1m->flags & H1_MF_CHNK) {
/* te:chunked : parse chunks */
* body. Switch the message in DONE state
*/
h1m->state = H1_MSG_DONE;
+ (*dsthtx)->flags |= HTX_FL_EOM;
}
else {
/* no content length, read till SHUTW */
goto error;
h1m->state = H1_MSG_DONE;
+ dsthtx->flags |= HTX_FL_EOM;
end:
return ret;
return 0;
}
-/* Finish HTTP/1 parsing by adding the HTX EOM block. It returns 1 on success or
- * 0 if it couldn't proceed. There is no parsing at this stage, but a parsing
- * error is reported if the message state is not H1_MSG_DONE. */
-int h1_parse_msg_eom(struct h1m *h1m, struct htx *dsthtx, size_t max)
-{
- if (h1m->state != H1_MSG_DONE) {
- h1m->err_state = h1m->state;
- h1m->err_pos = h1m->next;
- dsthtx->flags |= HTX_FL_PARSING_ERROR;
- return 0;
- }
-
- dsthtx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (max < sizeof(struct htx_blk) + 1 || !htx_add_endof(dsthtx, HTX_BLK_EOM))
- return 0;
-
- return 1;
-}
-
-
/* Appends the H1 representation of the request line <sl> to the chunk <chk>. It
* returns 1 if data are successfully appended, otherwise it returns 0.
*/
if (*msgf & H2_MSGF_BODY_TUNNEL)
*msgf &= ~(H2_MSGF_BODY|H2_MSGF_BODY_CL);
- if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0))
+ if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0) ||
+ (*msgf & H2_MSGF_BODY_TUNNEL)) {
+ /* Request without body or tunnel requested */
sl_flags |= HTX_SL_F_BODYLESS;
+ htx->flags |= HTX_FL_EOM;
+ }
/* update the start line with last detected header info */
sl->flags |= sl_flags;
/* Set bytes used in the HTX message for the headers now */
sl->hdrs_bytes = htx_used_space(htx) - used;
- if (*msgf & H2_MSGF_BODY_TUNNEL) {
- /* Add the EOM for tunnel requests (CONNECT) */
- htx->flags |= HTX_FL_EOM; /* no more message data are expected */
- if (!htx_add_endof(htx, HTX_BLK_EOM))
- goto fail;
- }
-
ret = 1;
return ret;
else
*msgf &= ~H2_MSGF_BODY_TUNNEL;
- if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0))
+ if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0) ||
+ (*msgf & H2_MSGF_BODY_TUNNEL)) {
+ /* Response without body or tunnel sucessfully established */
sl_flags |= HTX_SL_F_BODYLESS;
+ htx->flags |= HTX_FL_EOM;
+ }
/* update the start line with last detected header info */
sl->flags |= sl_flags;
/* Set bytes used in the HTX message for the headers now */
sl->hdrs_bytes = htx_used_space(htx) - used;
- if (*msgf & H2_MSGF_BODY_TUNNEL) {
- /* Tunnel sucessfully established, add the EOM now, all data are part of the tunnel */
- htx->flags |= HTX_FL_EOM; /* no more message data are expected */
- if (!htx_add_endof(htx, HTX_BLK_EOM))
- goto fail;
- }
-
ret = 1;
return ret;
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
uint32_t vlen;
char *nl;
- if (type == HTX_BLK_EOM) {
- stop = 1;
- break;
- }
-
vlen = sz;
if (vlen > count) {
if (type != HTX_BLK_DATA)
break;
case HTX_BLK_TLR:
- case HTX_BLK_EOM:
+ case HTX_BLK_EOT:
stop = 1;
break;
struct ist v;
uint32_t vlen;
- if (type == HTX_BLK_EOM) {
- len = 0;
- break;
- }
-
vlen = sz;
if (len > 0 && vlen > len)
vlen = len;
break;
case HTX_BLK_TLR:
- case HTX_BLK_EOM:
+ case HTX_BLK_EOT:
len = 0;
break;
if (!htx_add_endof(htx, HTX_BLK_EOH) ||
- (body_len && !htx_add_data_atonce(htx, ist2(body, body_len))) ||
- !htx_add_endof(htx, HTX_BLK_EOM))
+ (body_len && !htx_add_data_atonce(htx, ist2(body, body_len))))
goto fail;
+ htx->flags |= HTX_FL_EOM;
+
/* Now, forward the response and terminate the transaction */
s->txn->status = code;
htx_to_buf(htx, &s->res.buf);
if (!(ctx->ctx.hlua_apphttp.flags & APPLET_HDR_SENT))
goto error;
- /* Don't add TLR because mux-h1 will take care of it */
- res_htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
- si_rx_room_blk(si);
- goto out;
- }
- channel_add_input(res, 1);
+ /* no more data are expected. Don't add TLR because mux-h1 will take care of it */
+ res_htx->flags |= HTX_FL_EOM;
strm->txn->status = ctx->ctx.hlua_apphttp.status;
ctx->ctx.hlua_apphttp.flags |= APPLET_RSP_SENT;
}
* in case we previously disabled it, otherwise we might cause
* the client to delay further data.
*/
- if ((sess->listener->options & LI_O_NOQUICKACK) &&
- (htx_get_tail_type(htx) != HTX_BLK_EOM))
+ if ((sess->listener->options & LI_O_NOQUICKACK) && !(htx->flags & HTX_FL_EOM))
conn_set_quickack(cli_conn, 1);
/*************************************************************
/* Now we're in HTTP_MSG_DATA. We just need to know if all data have
* been received or if the buffer is full.
*/
- if (htx_get_tail_type(htx) > HTX_BLK_DATA ||
+ if ((htx->flags & HTX_FL_EOM) || htx_get_tail_type(htx) > HTX_BLK_DATA ||
channel_htx_full(req, htx, global.tune.maxrewrite))
goto http_end;
* in HTTP_MSG_ENDING state. Then if all data was marked to be
* forwarded, set the state to HTTP_MSG_DONE.
*/
- if (htx_get_tail_type(htx) != HTX_BLK_EOM)
+ if (!(htx->flags & HTX_FL_EOM))
goto missing_data_or_waiting;
msg->msg_state = HTTP_MSG_ENDING;
* in HTTP_MSG_ENDING state. Then if all data was marked to be
* forwarded, set the state to HTTP_MSG_DONE.
*/
- if (htx_get_tail_type(htx) != HTX_BLK_EOM)
+ if (!(htx->flags & HTX_FL_EOM))
goto missing_data_or_waiting;
msg->msg_state = HTTP_MSG_ENDING;
goto fail;
}
- if (!htx_add_endof(htx, HTX_BLK_EOH) || !htx_add_endof(htx, HTX_BLK_EOM))
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
goto fail;
+ htx->flags |= HTX_FL_EOM;
htx_to_buf(htx, &res->buf);
if (!http_forward_proxy_resp(s, 1))
goto fail;
!htx_add_header(htx, ist("Location"), location))
goto fail;
- if (!htx_add_endof(htx, HTX_BLK_EOH) || !htx_add_endof(htx, HTX_BLK_EOM))
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
goto fail;
+ htx->flags |= HTX_FL_EOM;
htx_to_buf(htx, &res->buf);
if (!http_forward_proxy_resp(s, 1))
goto fail;
if (!htx_add_header(htx, ist("content-length"), ist(clen)) ||
(body && b_data(body) && ctype && !htx_add_header(htx, ist("content-type"), ist(ctype))) ||
!htx_add_endof(htx, HTX_BLK_EOH) ||
- (body && b_data(body) && !htx_add_data_atonce(htx, ist2(b_head(body), b_data(body)))) ||
- !htx_add_endof(htx, HTX_BLK_EOM))
+ (body && b_data(body) && !htx_add_data_atonce(htx, ist2(b_head(body), b_data(body)))))
goto fail;
+
+ htx->flags |= HTX_FL_EOM;
}
leave:
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT) {
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT) {
finished = 1;
break;
}
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA) {
if (!h1_format_htx_data(htx_get_blk_value(htx, blk), temp, 0))
for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
rescan_hdr:
type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOH || type == HTX_BLK_EOM)
+ if (type == HTX_BLK_EOH)
break;
if (type != HTX_BLK_HDR)
continue;
ret += sent;
}
- if (!htx_add_endof(htx, HTX_BLK_EOM)) {
- memprintf(errmsg, "unable to add EOM into the HTX message");
- goto error;
- }
+ htx->flags |= HTX_FL_EOM;
return 1;
return 1;
}
-/* Returns 1 if the HTX message contains an EOM block. Otherwise it returns
- * 0. Concretely, it only checks the tail. The channel is chosen depending on
- * the sample direction. */
+/* Returns 1 if the HTX message contains EOM flag. Otherwise it returns 0. The
+ * channel is chosen depending on the sample direction.
+ */
static int
smp_fetch_htx_has_eom(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
{
if (!htx)
return 0;
- smp->data.u.sint = (htx_get_tail_type(htx) == HTX_BLK_EOM);
+ smp->data.u.sint = !!(htx->flags & HTX_FL_EOM);
smp->data.type = SMP_T_BOOL;
smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
return 1;
/* This is the last block in use */
if (htx->head == htx->tail) {
+ uint32_t flags = htx->flags; /* Preserve flags */
+
htx_reset(htx);
+ htx->flags |= flags;
return NULL;
}
}
/* Transfer HTX blocks from <src> to <dst>, stopping on the first block of the
- * type <mark> (typically EOH or EOM) or when <count> bytes were moved
+ * type <mark> (typically EOH or EOT) or when <count> bytes were moved
* (including payload and meta-data). It returns the number of bytes moved and
* the last HTX block inserted in <dst>.
*/
}
next:
blk = htx_remove_blk(src, blk);
- if (type == mark)
+ if (type != HTX_BLK_UNUSED && type == mark)
break;
}
return htx_add_endof(htx, HTX_BLK_EOT);
}
-/* Adds an HTX block of type EOH, EOT, or EOM in <htx>. It returns the new block
- * on success. Otherwise, it returns NULL.
+/* Adds an HTX block of type EOH or EOT in <htx>. It returns the new block on
+ * success. Otherwise, it returns NULL.
*/
struct htx_blk *htx_add_endof(struct htx *htx, enum htx_blk_type type)
{
/* 32 buffers: one for the ring's root, rest for the mbuf itself */
#define FCGI_C_MBUF_CNT 32
+/* Size for a record header (also size of empty record) */
+#define FCGI_RECORD_HEADER_SZ 8
+
/* FCGI connection descriptor */
struct fcgi_conn {
struct connection *conn;
/* FCGI stream flags (32 bits) */
#define FCGI_SF_NONE 0x00000000
#define FCGI_SF_ES_RCVD 0x00000001 /* end-of-stream received (empty STDOUT or EDN_REQUEST record) */
-#define FCGI_SF_ES_SENT 0x00000002 /* end-of-strem sent (empty STDIN record) */
-#define FCGI_SF_ABRT_SENT 0x00000004 /* abort sent (ABORT_REQUEST record) */
+#define FCGI_SF_ES_SENT 0x00000002 /* end-of-stream sent (empty STDIN record) */
+#define FCGI_SF_EP_SENT 0x00000004 /* end-of-param sent (empty PARAMS record) */
+#define FCGI_SF_ABRT_SENT 0x00000008 /* abort sent (ABORT_REQUEST record) */
/* Stream flags indicating the reason the stream is blocked */
#define FCGI_SF_BLK_MBUSY 0x00000010 /* blocked waiting for mux access (transient) */
#define FCGI_SF_WANT_SHUTW 0x00002000 /* a stream couldn't shutw() (mux full/busy) */
#define FCGI_SF_KILL_CONN 0x00004000 /* kill the whole connection with this stream */
-/* Other flags */
-#define FCGI_SF_H1_PARSING_DONE 0x00010000
/* FCGI stream descriptor */
struct fcgi_strm {
for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
while (1) {
outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
- if (outbuf.size >= 8 || !b_space_wraps(mbuf))
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
break;
realign_again:
b_slow_realign(mbuf, trash.area, b_data(mbuf));
}
- if (outbuf.size < 8)
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
goto full;
/* vsn: 1(FCGI_VERSION), type: (9)FCGI_GET_VALUES, id: 0x0000,
* len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
- memcpy(outbuf.area, "\x01\x09\x00\x00\x00\x00\x00\x00", 8);
- outbuf.data = 8;
+ memcpy(outbuf.area, "\x01\x09\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
/* Note: Don't send the param FCGI_MAX_CONNS because its value cannot be
* handled by HAProxy.
/* update the record's size now */
TRACE_PROTO("FCGI GET_VALUES record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL, fconn->conn, 0, 0, (size_t[]){outbuf.data-8});
- fcgi_set_record_size(outbuf.area, outbuf.data - 8);
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
b_add(mbuf, outbuf.data);
ret = 1;
while (1) {
outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
- if (outbuf.size >= 8 || !b_space_wraps(mbuf))
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
break;
realign_again:
b_slow_realign(mbuf, trash.area, b_data(mbuf));
}
- if (outbuf.size < 8)
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
goto full;
/* vsn: 1(FCGI_VERSION), type: (1)FCGI_BEGIN_REQUEST, id: fstrm->id,
* len: 0x0008, padding: 0x00, rsv: 0x00 */
- memcpy(outbuf.area, "\x01\x01\x00\x00\x00\x08\x00\x00", 8);
+ memcpy(outbuf.area, "\x01\x01\x00\x00\x00\x08\x00\x00", FCGI_RECORD_HEADER_SZ);
fcgi_set_record_id(outbuf.area, fstrm->id);
- outbuf.data = 8;
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
if (fconn->flags & FCGI_CF_KEEP_CONN) {
TRACE_STATE("keep connection opened", FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
while (1) {
outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
- if (outbuf.size >= 8 || !b_space_wraps(mbuf))
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
break;
realign_again:
b_slow_realign(mbuf, trash.area, b_data(mbuf));
}
- if (outbuf.size < 8)
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
goto full;
/* vsn: 1(FCGI_VERSION), type: rtype, id: fstrm->id,
* len: 0x0000, padding: 0x00, rsv: 0x00 */
- memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", 8);
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
outbuf.area[1] = rtype;
fcgi_set_record_id(outbuf.area, fstrm->id);
- outbuf.data = 8;
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
/* commit the record */
b_add(mbuf, outbuf.data);
TRACE_POINT(FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm);
ret = fcgi_strm_send_empty_record(fconn, fstrm, FCGI_PARAMS);
- if (ret)
+ if (ret) {
+ fstrm->flags |= FCGI_SF_EP_SENT;
TRACE_PROTO("FCGI PARAMS record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){0});
+ }
return ret;
}
while (1) {
outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
- if (outbuf.size >= 8 || !b_space_wraps(mbuf))
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
break;
realign_again:
b_slow_realign(mbuf, trash.area, b_data(mbuf));
}
- if (outbuf.size < 8)
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
goto full;
/* vsn: 1(FCGI_VERSION), type: (4)FCGI_PARAMS, id: fstrm->id,
* len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
- memcpy(outbuf.area, "\x01\x04\x00\x00\x00\x00\x00\x00", 8);
+ memcpy(outbuf.area, "\x01\x04\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
fcgi_set_record_id(outbuf.area, fstrm->id);
- outbuf.data = 8;
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
blk = htx_get_head_blk(htx);
while (blk) {
if (!fcgi_encode_param(&outbuf, &p)) {
if (b_space_wraps(mbuf))
goto realign_again;
- if (outbuf.data == 8)
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
goto full;
goto done;
}
if (!fcgi_encode_param(&outbuf, &p)) {
if (b_space_wraps(mbuf))
goto realign_again;
- if (outbuf.data == 8)
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
goto full;
}
TRACE_STATE("add server name header", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm);
goto error;
/* update the record's size */
- TRACE_PROTO("FCGI PARAMS record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - 8});
- fcgi_set_record_size(outbuf.area, outbuf.data - 8);
+ TRACE_PROTO("FCGI PARAMS record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - FCGI_RECORD_HEADER_SZ});
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
b_add(mbuf, outbuf.data);
end:
struct buffer *mbuf;
struct htx_blk *blk;
enum htx_blk_type type;
- uint32_t size;
+ uint32_t size, extra_bytes;
size_t total = 0;
+ extra_bytes = 0;
+
TRACE_ENTER(FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){count});
if (!count)
goto end;
if (unlikely(size == count && htx_nbblks(htx) == 1 && type == HTX_BLK_DATA)) {
void *old_area = mbuf->area;
+ /* Last block of the message: Reserve the size for the empty stdin record */
+ if (htx->flags & HTX_FL_EOM)
+ extra_bytes = FCGI_RECORD_HEADER_SZ;
+
if (b_data(mbuf)) {
/* Too bad there are data left there. We're willing to memcpy/memmove
* up to 1/4 of the buffer, which means that it's OK to copy a large
* and that it's also OK to copy few data without realigning. Otherwise
* we'll pretend the mbuf is full and wait for it to become empty.
*/
- if (size + 8 <= b_room(mbuf) &&
+ if (size + FCGI_RECORD_HEADER_SZ + extra_bytes <= b_room(mbuf) &&
(b_data(mbuf) <= b_size(mbuf) / 4 ||
- (size <= b_size(mbuf) / 4 && size + 8 <= b_contig_space(mbuf))))
+ (size <= b_size(mbuf) / 4 && size + FCGI_RECORD_HEADER_SZ + extra_bytes <= b_contig_space(mbuf))))
goto copy;
goto full;
}
/* map a FCGI record to the HTX block so that we can put the
* record header there.
*/
- *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 8, size + 8);
+ *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - FCGI_RECORD_HEADER_SZ, size + FCGI_RECORD_HEADER_SZ);
outbuf.area = b_head(mbuf);
/* prepend a FCGI record header just before the DATA block */
- memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", 8);
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
fcgi_set_record_id(outbuf.area, fstrm->id);
fcgi_set_record_size(outbuf.area, size);
copy:
while (1) {
outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
- if (outbuf.size >= 8 || !b_space_wraps(mbuf))
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ + extra_bytes || !b_space_wraps(mbuf))
break;
realign_again:
b_slow_realign(mbuf, trash.area, b_data(mbuf));
}
- if (outbuf.size < 8)
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ + extra_bytes)
goto full;
/* vsn: 1(FCGI_VERSION), type: (5)FCGI_STDIN, id: fstrm->id,
* len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
- memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", 8);
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
fcgi_set_record_id(outbuf.area, fstrm->id);
- outbuf.data = 8;
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
blk = htx_get_head_blk(htx);
while (blk && count) {
case HTX_BLK_DATA:
TRACE_PROTO("sending stding data", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){size});
v = htx_get_blk_value(htx, blk);
- if (v.len > count)
+
+ if (htx_is_unique_blk(htx, blk) && (htx->flags & HTX_FL_EOM))
+ extra_bytes = FCGI_RECORD_HEADER_SZ; /* Last block of the message */
+
+ if (v.len > count) {
v.len = count;
+ extra_bytes = 0;
+ }
- if (v.len > b_room(&outbuf)) {
+ if (v.len + FCGI_RECORD_HEADER_SZ + extra_bytes > b_room(&outbuf)) {
/* It doesn't fit at once. If it at least fits once split and
* the amount of data to move is low, let's defragment the
* buffer now.
*/
if (b_space_wraps(mbuf) &&
- b_data(&outbuf) + v.len <= b_room(mbuf) &&
+ b_data(&outbuf) + v.len + extra_bytes <= b_room(mbuf) &&
b_data(mbuf) <= MAX_DATA_REALIGN)
goto realign_again;
- v.len = b_room(&outbuf);
+ v.len = b_room(&outbuf) - FCGI_RECORD_HEADER_SZ - extra_bytes;
}
if (!v.len || !chunk_memcat(&outbuf, v.ptr, v.len)) {
- if (outbuf.data == 8)
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
goto full;
goto done;
}
}
break;
- case HTX_BLK_EOM:
- goto done;
-
default:
break;
}
done:
/* update the record's size */
- TRACE_PROTO("FCGI STDIN record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - 8});
- fcgi_set_record_size(outbuf.area, outbuf.data - 8);
+ TRACE_PROTO("FCGI STDIN record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - FCGI_RECORD_HEADER_SZ});
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
b_add(mbuf, outbuf.data);
+ /* Send the empty stding here to finish the message */
+ if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)) {
+ TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
+ if (!fcgi_strm_send_empty_stdin(fconn, fstrm)) {
+ /* bytes already reserved for this record. It should not fail */
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ TRACE_PROTO("processing error", FCGI_EV_TX_RECORD|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ }
+ }
+
end:
TRACE_LEAVE(FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){total});
return total;
* it's empty. Thus we cheat and pretend we already
* have a few bytes there.
*/
- max = buf_room_for_htx_data(buf) + (fconn->state == FCGI_CS_RECORD_H ? 8 : 0);
- buf->head = sizeof(struct htx) - (fconn->state == FCGI_CS_RECORD_H ? 8 : 0);
+ max = buf_room_for_htx_data(buf) + (fconn->state == FCGI_CS_RECORD_H ? FCGI_RECORD_HEADER_SZ : 0);
+ buf->head = sizeof(struct htx) - (fconn->state == FCGI_CS_RECORD_H ? FCGI_RECORD_HEADER_SZ : 0);
}
else
max = buf_room_for_htx_data(buf);
return ret;
}
-static size_t fcgi_strm_add_eom(struct fcgi_strm *fstrm, struct h1m *h1m, struct htx *htx,
- struct buffer *buf, size_t *ofs, size_t max)
-{
- int ret;
-
- TRACE_ENTER(FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fstrm->fconn->conn, fstrm, 0, (size_t[]){max});
- ret = h1_parse_msg_eom(h1m, htx, max);
- if (!ret) {
- TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fstrm->fconn->conn, fstrm);
- if (htx->flags & HTX_FL_PARSING_ERROR) {
- TRACE_USER("rejected H1 response", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM|FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
- fcgi_strm_error(fstrm);
- fcgi_strm_capture_bad_message(fstrm->fconn, fstrm, h1m, buf);
- }
- goto end;
- }
- fstrm->flags |= FCGI_SF_H1_PARSING_DONE;
- end:
- TRACE_LEAVE(FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fstrm->fconn->conn, fstrm, 0, (size_t[]){ret});
- return ret;
-}
-
static size_t fcgi_strm_parse_response(struct fcgi_strm *fstrm, struct buffer *buf, size_t count)
{
struct fcgi_conn *fconn = fstrm->fconn;
if (!(h1m->flags & H1_MF_XFER_LEN) && fstrm->state != FCGI_SS_ERROR &&
(fstrm->flags & FCGI_SF_ES_RCVD) && b_data(&fstrm->rxbuf) == total) {
TRACE_DEVEL("end of data", FCGI_EV_RSP_DATA, fconn->conn, fstrm);
- if (!(h1m->flags & H1_MF_VER_11))
- fstrm->flags |= FCGI_SF_H1_PARSING_DONE;
+ htx->flags |= HTX_FL_EOM;
h1m->state = H1_MSG_DONE;
TRACE_USER("H1 response fully rcvd", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fconn->conn, fstrm, htx);
}
TRACE_PROTO("rcvd H1 response trailers", FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fconn->conn, fstrm, htx);
}
else if (h1m->state == H1_MSG_DONE) {
- if (!(fstrm->flags & FCGI_SF_H1_PARSING_DONE)) {
- if (!fcgi_strm_add_eom(fstrm, h1m, htx, &fstrm->rxbuf, &total, count))
- break;
-
- TRACE_USER("H1 response fully rcvd", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fconn->conn, fstrm, htx);
- }
-
+ TRACE_USER("H1 response fully rcvd", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fconn->conn, fstrm, htx);
if (b_data(&fstrm->rxbuf) > total) {
htx->flags |= HTX_FL_PARSING_ERROR;
TRACE_PROTO("too much data, parsing error", FCGI_EV_RSP_DATA, fconn->conn, fstrm);
else
TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
- if (b_data(&fstrm->rxbuf) || (fstrm->h1m.state == H1_MSG_DONE && !(fstrm->flags & FCGI_SF_H1_PARSING_DONE)))
+ if (b_data(&fstrm->rxbuf))
cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
else {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
- if (fstrm->state == FCGI_SS_ERROR || (fstrm->flags & FCGI_SF_H1_PARSING_DONE)) {
+ if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) {
cs->flags |= CS_FL_EOI;
if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN)))
cs->flags |= CS_FL_EOS;
* full. Otherwise, the request is invalid.
*/
sl = http_get_stline(htx);
- if (!sl || (!(sl->flags & HTX_SL_F_CLEN) && (htx_get_tail_type(htx) != HTX_BLK_EOM))) {
+ if (!sl || (!(sl->flags & HTX_SL_F_CLEN) && !(htx->flags & HTX_FL_EOM))) {
htx->flags |= HTX_FL_PARSING_ERROR;
fcgi_strm_error(fstrm);
goto done;
break;
case HTX_BLK_EOH:
- TRACE_PROTO("sending FCGI PARAMS record", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx);
- ret = fcgi_strm_send_empty_params(fconn, fstrm);
- if (!ret)
- goto done;
+ if (!(fstrm->flags & FCGI_SF_EP_SENT)) {
+ TRACE_PROTO("sending FCGI PARAMS record", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_empty_params(fconn, fstrm);
+ if (!ret)
+ goto done;
+ }
+ if (htx_is_unique_blk(htx, blk) && (htx->flags & HTX_FL_EOM)) {
+ TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_empty_stdin(fconn, fstrm);
+ if (!ret)
+ goto done;
+ }
goto remove_blk;
case HTX_BLK_DATA:
}
break;
- case HTX_BLK_EOM:
- TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
- ret = fcgi_strm_send_empty_stdin(fconn, fstrm);
- if (!ret)
- goto done;
- goto remove_blk;
-
default:
remove_blk:
htx_remove_blk(htx, blk);
#define H1S_F_WANT_MSK 0x00000070
#define H1S_F_NOT_FIRST 0x00000080 /* The H1 stream is not the first one */
-#define H1S_F_PARSING_DONE 0x00000200 /* Set when incoming message parsing is finished (EOM added) */
-
+/* 0x00000200 unsued */
#define H1S_F_NOT_IMPL_ERROR 0x00000400 /* Set when a feature is not implemented during the message parsing */
#define H1S_F_PARSING_ERROR 0x00000800 /* Set when an error occurred during the message parsing */
#define H1S_F_PROCESSING_ERROR 0x00001000 /* Set when an error occurred during the message xfer */
const struct h1m *h1m;
h1m = ((h1s->h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
- if (h1m->state == H1_MSG_DONE)
- return !(h1s->flags & H1S_F_PARSING_DONE);
-
- return b_data(&h1s->h1c->ibuf);
+ return ((h1m->state == H1_MSG_DONE) ? 0 : b_data(&h1s->h1c->ibuf));
}
/* Creates a new conn-stream and the associate stream. <input> is used as input
TRACE_STATE("h1s on error, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s);
}
- if (!(h1c->flags & (H1C_F_ST_ERROR|H1C_F_ST_SHUTDOWN)) && /* No error/shutdown on h1c */
+ if (!(h1c->flags & (H1C_F_ST_ERROR|H1C_F_ST_SHUTDOWN)) && /* No error/shutdown on h1c */
!(h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH)) && /* No error/shutdown on conn */
- (h1s->flags & (H1S_F_WANT_KAL|H1S_F_PARSING_DONE)) == (H1S_F_WANT_KAL|H1S_F_PARSING_DONE) && /* K/A possible */
+ (h1s->flags & H1S_F_WANT_KAL) && /* K/A possible */
h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE) { /* req/res in DONE state */
h1c->flags |= (H1C_F_ST_IDLE|H1C_F_WAIT_NEXT_REQ);
TRACE_STATE("set idle mode on h1c, waiting for the next request", H1_EV_H1C_ERR, h1c->conn, h1s);
h1s->res.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
TRACE_STATE("switch H1 stream in tunnel mode", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
- if (h1c->flags & H1C_F_IS_BACK)
- h1s->flags &= ~H1S_F_PARSING_DONE;
if (h1c->flags & H1C_F_WAIT_OUTPUT) {
h1c->flags &= ~H1C_F_WAIT_OUTPUT;
return ret;
}
-/*
- * Add the EOM in the HTX message. It returns 1 on success or 0 if it couldn't
- * proceed. This functions is responsible to update the parser state <h1m>.
- */
-static size_t h1_process_eom(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
- struct buffer *buf, size_t *ofs, size_t max)
-{
- int ret;
-
- TRACE_ENTER(H1_EV_RX_DATA|H1_EV_RX_EOI, h1s->h1c->conn, h1s, 0, (size_t[]){max});
- ret = h1_parse_msg_eom(h1m, htx, max);
- if (!ret) {
- TRACE_DEVEL("leaving on missing data or error", H1_EV_RX_DATA|H1_EV_RX_EOI, h1s->h1c->conn, h1s);
- if (htx->flags & HTX_FL_PARSING_ERROR) {
- h1s->flags |= H1S_F_PARSING_ERROR;
- TRACE_USER("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_EOI|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
- h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
- }
- goto end;
- }
-
- h1s->flags |= H1S_F_PARSING_DONE;
- end:
- TRACE_LEAVE(H1_EV_RX_DATA|H1_EV_RX_EOI, h1s->h1c->conn, h1s, 0, (size_t[]){ret});
- return ret;
-}
-
/*
* Process incoming data. It parses data and transfer them from h1c->ibuf into
* <buf>. It returns the number of bytes parsed and transferred if > 0, or 0 if
H1_EV_RX_DATA|H1_EV_RX_TLRS, h1c->conn, h1s, htx, (size_t[]){ret});
}
else if (h1m->state == H1_MSG_DONE) {
- if (!(h1s->flags & H1S_F_PARSING_DONE)) {
- if (!h1_process_eom(h1s, h1m, htx, &h1c->ibuf, &total, count))
- break;
-
- TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "H1 request fully rcvd" : "H1 response fully rcvd"),
- H1_EV_RX_DATA|H1_EV_RX_EOI, h1c->conn, h1s, htx);
- }
+ TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "H1 request fully rcvd" : "H1 response fully rcvd"),
+ H1_EV_RX_DATA|H1_EV_RX_EOI, h1c->conn, h1s, htx);
if ((h1m->flags & H1_MF_RESP) &&
((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) || h1s->status == 101))
* requests, wait the response to do so or not depending on the status
* code.
*/
- if ((h1s->flags & H1S_F_PARSING_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG))
+ if ((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG))
h1s->cs->flags |= CS_FL_EOI;
if (h1s_data_pending(h1s) && !htx_is_empty(htx))
struct htx_blk *blk;
struct buffer tmp;
size_t total = 0;
+ int last_data = 0;
if (!count)
goto end;
void *old_area = h1c->obuf.area;
TRACE_PROTO("sending message data (zero-copy)", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, chn_htx, (size_t[]){count});
+ if (h1m->state == H1_MSG_DATA && chn_htx->flags & HTX_FL_EOM) {
+ TRACE_DEVEL("last message block", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s);
+ last_data = 1;
+ }
+
h1c->obuf.area = buf->area;
h1c->obuf.head = sizeof(struct htx) + blk->addr;
h1c->obuf.data = count;
htx_reset(chn_htx);
/* The message is chunked. We need to emit the chunk
- * size. We have at least the size of the struct htx to
- * write the chunk envelope. It should be enough.
+ * size and eventually the last chunk. We have at least
+ * the size of the struct htx to write the chunk
+ * envelope. It should be enough.
*/
if (h1m->flags & H1_MF_CHNK) {
h1_emit_chunk_size(&h1c->obuf, count);
h1_emit_chunk_crlf(&h1c->obuf);
+ if (last_data) {
+ /* Emit the last chunk too at the buffer's end */
+ b_putblk(&h1c->obuf, "0\r\n\r\n", 5);
+ }
}
total += count;
else
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request tunneled data xferred" : "H1 response tunneled data xferred"),
H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){count});
+
+ if (last_data) {
+ h1m->state = H1_MSG_DONE;
+ if (h1s->h1c->flags & H1C_F_WAIT_OUTPUT) {
+ h1s->h1c->flags &= ~H1C_F_WAIT_OUTPUT;
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ TRACE_STATE("Re-enable read on h1c", H1_EV_TX_DATA|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn, h1s);
+ }
+
+ TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "H1 request fully xferred" : "H1 response fully xferred"),
+ H1_EV_TX_DATA, h1c->conn, h1s);
+ }
goto out;
}
tmp.area = h1c->obuf.area + h1c->obuf.head;
h1s->flags |= H1S_F_HAVE_SRV_NAME;
}
- if (!chunk_memcat(&tmp, "\r\n", 2))
- goto full;
-
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request headers xferred" : "H1 response headers xferred"),
H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
if (!(h1m->flags & H1_MF_RESP) && h1s->meth == HTTP_METH_CONNECT) {
- /* Must have a EOM before tunnel data */
- h1m->state = H1_MSG_DONE;
+ if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
+ goto done;
}
else if ((h1m->flags & H1_MF_RESP) &&
((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) || h1s->status == 101)) {
- /* Must have a EOM before tunnel data */
- h1m->state = H1_MSG_DONE;
+ if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
+ goto done;
}
else if ((h1m->flags & H1_MF_RESP) &&
h1s->status < 200 && (h1s->status == 100 || h1s->status >= 102)) {
+ if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
h1m_init_res(&h1s->res);
h1m->flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
h1s->flags &= ~H1S_F_HAVE_O_CONN;
TRACE_STATE("1xx response xferred", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
}
else if ((h1m->flags & H1_MF_RESP) && h1s->meth == HTTP_METH_HEAD) {
- h1m->state = H1_MSG_DONE;
+ if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
TRACE_STATE("HEAD response processed", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ goto done;
}
- else
+ else {
+ /* EOM flag is set and it is the last block */
+ if (htx_is_unique_blk(chn_htx, blk) && (chn_htx->flags & HTX_FL_EOM)) {
+ if ((h1m->flags & H1_MF_CHNK) && !chunk_memcat(&tmp, "\r\n0\r\n\r\n", 7))
+ goto full;
+ else if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
+ goto done;
+ }
+ else if (!chunk_memcat(&tmp, "\r\n", 2))
+ goto full;
h1m->state = H1_MSG_DATA;
+ }
break;
case H1_MSG_DATA:
case H1_MSG_TUNNEL:
- if (type == HTX_BLK_EOM) {
- /* Chunked message without explicit trailers */
- if (h1m->flags & H1_MF_CHNK) {
- if (!chunk_memcat(&tmp, "0\r\n\r\n", 5))
- goto full;
- }
- goto done;
- }
- else if (type == HTX_BLK_EOT || type == HTX_BLK_TLR) {
+ if (type == HTX_BLK_EOT || type == HTX_BLK_TLR) {
/* If the message is not chunked, never
* add the last chunk. */
if ((h1m->flags & H1_MF_CHNK) && !chunk_memcat(&tmp, "0\r\n", 3))
TRACE_PROTO("sending message data", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, chn_htx, (size_t[]){sz});
+ /* It is the last block of this message. After this one,
+ * only tunneled data may be forwarded. */
+ if (h1m->state == H1_MSG_DATA && htx_is_unique_blk(chn_htx, blk) && (chn_htx->flags & HTX_FL_EOM)) {
+ TRACE_DEVEL("last message block", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s);
+ last_data = 1;
+ }
if (vlen > count) {
/* Get the maximum amount of data we can xferred */
vlen = count;
+ last_data = 0;
}
chklen = 0;
(chklen < 4096) ? 3 : (chklen < 65536) ? 4 :
(chklen < 1048576) ? 5 : 8);
chklen += 4; /* 2 x CRLF */
+
+ /* If it is the end of the chunked message (without EOT), reserve the
+ * last chunk size */
+ if (last_data)
+ chklen += 5;
}
if (vlen + chklen > b_room(&tmp)) {
if (chklen >= b_room(&tmp))
goto full;
vlen = b_room(&tmp) - chklen;
+ last_data = 0;
}
v = htx_get_blk_value(chn_htx, blk);
v.len = vlen;
if (!h1_format_htx_data(v, &tmp, !!(h1m->flags & H1_MF_CHNK)))
goto full;
+ /* Space already reserved, so it must succeed */
+ if ((h1m->flags & H1_MF_CHNK) && last_data && !chunk_memcat(&tmp, "0\r\n\r\n", 5))
+ goto error;
+
if (h1m->state == H1_MSG_DATA)
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request payload data xferred" : "H1 response payload data xferred"),
H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){v.len});
else
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request tunneled data xferred" : "H1 response tunneled data xferred"),
H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){v.len});
+ if (last_data)
+ goto done;
break;
case H1_MSG_TRAILERS:
- if (type == HTX_BLK_EOM)
- goto done;
- else if (type != HTX_BLK_TLR && type != HTX_BLK_EOT)
+ if (type != HTX_BLK_TLR && type != HTX_BLK_EOT)
goto error;
trailers:
h1m->state = H1_MSG_TRAILERS;
+
/* If the message is not chunked, ignore
* trailers. It may happen with H2 messages. */
if (!(h1m->flags & H1_MF_CHNK))
goto full;
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request trailers xferred" : "H1 response trailers xferred"),
H1_EV_TX_DATA|H1_EV_TX_TLRS, h1c->conn, h1s);
+ goto done;
}
else { // HTX_BLK_TLR
n = htx_get_blk_name(chn_htx, blk);
break;
case H1_MSG_DONE:
- if (type != HTX_BLK_EOM)
- goto error;
+ TRACE_STATE("unexpected data xferred in done state", H1_EV_TX_DATA|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error; /* For now return an error */
+
done:
h1m->state = H1_MSG_DONE;
if (!(h1m->flags & H1_MF_RESP) && h1s->meth == HTTP_METH_CONNECT) {
if (!(msgf & H2_MSGF_RSP_1XX))
*flags |= H2_SF_HEADERS_RCVD;
- if (htx_get_tail_type(htx) != HTX_BLK_EOM && (h2c->dff & H2_F_HEADERS_END_STREAM)) {
- /* Mark the end of message using EOM */
- htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(htx, HTX_BLK_EOM)) {
- TRACE_STATE("failed to append HTX EOM block into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
- goto fail;
- }
+ if (h2c->dff & H2_F_HEADERS_END_STREAM) {
+ /* no more data are expected for this message */
+ htx->flags |= HTX_FL_EOM;
}
/* success */
* was aborted. Otherwise (request path + tunnel abrted), the
* EOM was already reported.
*/
- if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
- htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(htx, HTX_BLK_EOM)) {
- TRACE_STATE("h2s rxbuf is full, failed to add EOM", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
- h2c->flags |= H2_CF_DEM_SFULL;
- goto fail;
- }
- }
+ if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT))
+ htx->flags |= HTX_FL_EOM;
}
h2c->rcvd_c += h2c->dpl;
struct http_hdr list[global.tune.max_http_hdr];
struct h2c *h2c = h2s->h2c;
struct htx_blk *blk;
- struct htx_blk *blk_end;
struct buffer outbuf;
struct buffer *mbuf;
struct htx_sl *sl;
int es_now = 0;
int ret = 0;
int hdr;
- int idx;
TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
return 0;
}
- /* determine the first block which must not be deleted, blk_end may
- * be NULL if all blocks have to be deleted.
- */
- idx = htx_get_head(htx);
- blk_end = NULL;
- while (idx != -1) {
- type = htx_get_blk_type(htx_get_blk(htx, idx));
- idx = htx_get_next(htx, idx);
- if (type == HTX_BLK_EOH) {
- if (idx != -1)
- blk_end = htx_get_blk(htx, idx);
- break;
- }
- }
-
- /* get the start line, we do have one */
- blk = htx_get_head_blk(htx);
- BUG_ON(!blk || htx_get_blk_type(blk) != HTX_BLK_RES_SL);
- ALREADY_CHECKED(blk);
- sl = htx_get_blk_ptr(htx, blk);
- h2s->status = sl->info.res.status;
- if (h2s->status < 100 || h2s->status > 999) {
- TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
- goto fail;
- }
- else if (h2s->status == 101) {
- /* 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
- TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
- goto fail;
- }
- else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
- /* Abort the tunnel attempt */
- h2s->flags &= ~H2_SF_BODY_TUNNEL;
- h2s->flags |= H2_SF_TUNNEL_ABRT;
- }
-
- /* and the rest of the headers, that we dump starting at header 0 */
+ /* get the start line (we do have one) and the rest of the headers,
+ * that we dump starting at header 0 */
+ sl = NULL;
hdr = 0;
-
- idx = htx_get_head(htx); // returns the SL that we skip
- while ((idx = htx_get_next(htx, idx)) != -1) {
- blk = htx_get_blk(htx, idx);
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
type = htx_get_blk_type(blk);
if (type == HTX_BLK_UNUSED)
continue;
- if (type != HTX_BLK_HDR)
+ if (type == HTX_BLK_EOH)
break;
- if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
- TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ if (type == HTX_BLK_HDR) {
+ if (!sl) {
+ TRACE_ERROR("no start-line", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else if (type == HTX_BLK_RES_SL) {
+ if (sl) {
+ TRACE_PROTO("multiple start-lines", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ sl = htx_get_blk_ptr(htx, blk);
+ h2s->status = sl->info.res.status;
+ if (h2s->status < 100 || h2s->status > 999) {
+ TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ else if (h2s->status == 101) {
+ /* 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
+ TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
+ /* Abort the tunnel attempt */
+ h2s->flags &= ~H2_SF_BODY_TUNNEL;
+ h2s->flags |= H2_SF_TUNNEL_ABRT;
+ }
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
goto fail;
}
-
- list[hdr].n = htx_get_blk_name(htx, blk);
- list[hdr].v = htx_get_blk_value(htx, blk);
- hdr++;
}
/* marker for end of headers */
list[hdr].n = ist("");
- if (h2s->status == 204 || h2s->status == 304) {
- /* no contents, claim c-len is present and set to zero */
- es_now = 1;
- }
-
mbuf = br_tail(h2c->mbuf);
retry:
if (!h2_get_buf(h2c, mbuf)) {
}
}
- /* we may need to add END_STREAM except for 1xx responses.
- * FIXME: we should also set it when we know for sure that the
- * content-length is zero as well as on 204/304
+ /* remove all header blocks including the EOH and compute the
+ * corresponding size.
*/
- if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 200 && h2s->status < 300) {
- /* Don't set EOM if a tunnel is successfully established
- * (2xx responses to a connect). In this case, the EOM must be found
- */
- if (!blk_end || htx_get_blk_type(blk_end) != HTX_BLK_EOM)
- goto fail;
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ /* The removed block is the EOH */
+ if (type == HTX_BLK_EOH)
+ break;
}
- else if (blk_end && htx_get_blk_type(blk_end) == HTX_BLK_EOM && h2s->status >= 200)
- es_now = 1;
- if (!h2s->cs || h2s->cs->flags & CS_FL_SHW)
+ if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
+ /* Response already closed: add END_STREAM */
+ es_now = 1;
+ }
+ else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 200 && h2s->status < 300) {
+ /* Don't set ES if a tunnel is successfully established (2xx responses to a connect). */
+ }
+ else if (h2s->status == 204 || h2s->status == 304) {
+ /* no contents, claim c-len is present and set to zero */
es_now = 1;
+ }
+ else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
+ /* EOM+empty: we may need to add END_STREAM except for 1xx
+ * responses.
+ */
+ es_now = 1;
+ }
if (es_now)
outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
}
/* OK we could properly deliver the response */
-
- /* remove all header blocks including the EOH and compute the
- * corresponding size.
- *
- * FIXME: We should remove everything when es_now is set.
- */
- ret = 0;
- idx = htx_get_head(htx);
- blk = htx_get_blk(htx, idx);
- while (blk != blk_end) {
- ret += htx_get_blksz(blk);
- blk = htx_remove_blk(htx, blk);
- }
-
- if (blk_end && htx_get_blk_type(blk_end) == HTX_BLK_EOM) {
- ret += htx_get_blksz(blk_end);
- htx_remove_blk(htx, blk_end);
- }
end:
TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
return ret;
struct http_hdr list[global.tune.max_http_hdr];
struct h2c *h2c = h2s->h2c;
struct htx_blk *blk;
- struct htx_blk *blk_end;
struct buffer outbuf;
struct buffer *mbuf;
struct htx_sl *sl;
int es_now = 0;
int ret = 0;
int hdr;
- int idx;
TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
return 0;
}
- /* determine the first block which must not be deleted, blk_end may
- * be NULL if all blocks have to be deleted.
- */
- idx = htx_get_head(htx);
- blk_end = NULL;
- while (idx != -1) {
- type = htx_get_blk_type(htx_get_blk(htx, idx));
- idx = htx_get_next(htx, idx);
- if (type == HTX_BLK_EOH) {
- if (idx != -1)
- blk_end = htx_get_blk(htx, idx);
- break;
- }
- }
-
- /* get the start line, we do have one */
- blk = htx_get_head_blk(htx);
- BUG_ON(!blk || htx_get_blk_type(blk) != HTX_BLK_REQ_SL);
- ALREADY_CHECKED(blk);
- sl = htx_get_blk_ptr(htx, blk);
- meth = htx_sl_req_meth(sl);
- uri = htx_sl_req_uri(sl);
- if (unlikely(uri.len == 0)) {
- TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
- goto fail;
- }
-
- /* and the rest of the headers, that we dump starting at header 0 */
+ /* get the start line (we do have one) and the rest of the headers,
+ * that we dump starting at header 0 */
+ sl = NULL;
hdr = 0;
-
- idx = htx_get_head(htx); // returns the SL that we skip
- while ((idx = htx_get_next(htx, idx)) != -1) {
- blk = htx_get_blk(htx, idx);
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
type = htx_get_blk_type(blk);
if (type == HTX_BLK_UNUSED)
continue;
- if (type != HTX_BLK_HDR)
+ if (type == HTX_BLK_EOH)
break;
- if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
- TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
- goto fail;
+ if (type == HTX_BLK_REQ_SL) {
+ if (sl) {
+ TRACE_ERROR("multiple start-lines", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ sl = htx_get_blk_ptr(htx, blk);
+ meth = htx_sl_req_meth(sl);
+ uri = htx_sl_req_uri(sl);
+ if (unlikely(uri.len == 0)) {
+ TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
}
+ else if (type == HTX_BLK_HDR) {
+ if (!sl) {
+ TRACE_ERROR("no start-line", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
- list[hdr].n = htx_get_blk_name(htx, blk);
- list[hdr].v = htx_get_blk_value(htx, blk);
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
- /* Skip header if same name is used to add the server name */
- if ((h2c->flags & H2_CF_IS_BACK) && h2c->proxy->server_id_hdr_name &&
- isteq(list[hdr].n, ist2(h2c->proxy->server_id_hdr_name, h2c->proxy->server_id_hdr_len)))
- continue;
+ /* Skip header if same name is used to add the server name */
+ if ((h2c->flags & H2_CF_IS_BACK) && h2c->proxy->server_id_hdr_name &&
+ isteq(list[hdr].n, ist2(h2c->proxy->server_id_hdr_name, h2c->proxy->server_id_hdr_len)))
+ continue;
- hdr++;
+ hdr++;
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
}
/* Now add the server name to a header (if requested) */
goto realign_again;
goto full;
}
+ h2s->flags |= H2_SF_BODY_TUNNEL;
} else {
/* other methods need a :scheme. If an authority is known from
* the request line, it must be sent, otherwise only host is
}
}
- /* we may need to add END_STREAM if we have no body :
- * - request already closed, or :
- * - no transfer-encoding, and :
- * - no content-length or content-length:0
- * except for CONNECT requests.
+ /* remove all header blocks including the EOH and compute the
+ * corresponding size.
*/
- if (likely(sl->info.req.meth != HTTP_METH_CONNECT)) {
- if (blk_end && htx_get_blk_type(blk_end) == HTX_BLK_EOM)
- es_now = 1;
- if (sl->flags & HTX_SL_F_BODYLESS)
- es_now = 1;
- }
- else {
- /* For CONNECT requests, the EOM must be found and eaten without setting the ES */
- if (!blk_end || htx_get_blk_type(blk_end) != HTX_BLK_EOM)
- goto fail;
- h2s->flags |= H2_SF_BODY_TUNNEL;
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ /* The removed block is the EOH */
+ if (type == HTX_BLK_EOH)
+ break;
}
- if (!h2s->cs || h2s->cs->flags & CS_FL_SHW)
+ if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
+ /* Request already closed: add END_STREAM */
es_now = 1;
+ }
+ if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
+ /* EOM+empty: we may need to add END_STREAM (except for CONNECT
+ * request)
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL))
+ es_now = 1;
+ }
if (es_now)
outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
h2s->st = H2_SS_HLOC;
}
- /* remove all header blocks including the EOH and compute the
- * corresponding size.
- *
- * FIXME: We should remove everything when es_now is set.
- */
- ret = 0;
- idx = htx_get_head(htx);
- blk = htx_get_blk(htx, idx);
- while (blk != blk_end) {
- ret += htx_get_blksz(blk);
- blk = htx_remove_blk(htx, blk);
- }
-
- if (blk_end && htx_get_blk_type(blk_end) == HTX_BLK_EOM) {
- ret += htx_get_blksz(blk_end);
- htx_remove_blk(htx, blk_end);
- }
-
end:
return ret;
full:
* present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
* caller must check the stream's status to detect any error which might have
* happened subsequently to a successful send. Returns the number of data bytes
- * consumed, or zero if nothing done. Note that EOM count for 1 byte.
+ * consumed, or zero if nothing done.
*/
static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
{
int fsize; /* h2 frame size */
struct htx_blk *blk;
enum htx_blk_type type;
- int idx;
int trunc_out; /* non-zero if truncated on out buf */
TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
htx = htx_from_buf(buf);
- /* We only come here with HTX_BLK_DATA blocks. However, while looping,
- * we can meet an HTX_BLK_EOM block that we'll leave to the caller to
- * handle.
- */
+ /* We only come here with HTX_BLK_DATA blocks */
new_frame:
if (!count || htx_is_empty(htx))
goto end;
- idx = htx_get_head(htx);
- blk = htx_get_blk(htx, idx);
- type = htx_get_blk_type(blk); // DATA or EOM
- bsize = htx_get_blksz(blk);
- fsize = bsize;
- trunc_out = 0;
-
- if (type == HTX_BLK_EOM) {
- if (h2s->flags & H2_SF_ES_SENT) {
- /* ES already sent */
- htx_remove_blk(htx, blk);
- total++; // EOM counts as one byte
- count--;
- goto end;
- }
- }
- else if (type != HTX_BLK_DATA)
- goto end;
- else if ((h2c->flags & H2_CF_IS_BACK) &&
+ if ((h2c->flags & H2_CF_IS_BACK) &&
(h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
/* The response HEADERS frame not received yet. Thus the tunnel
* is not fully established yet. In this situation, we block
goto end;
}
+ blk = htx_get_head_blk(htx);
+ type = htx_get_blk_type(blk);
+ bsize = htx_get_blksz(blk);
+ fsize = bsize;
+ trunc_out = 0;
+ if (type != HTX_BLK_DATA)
+ goto end;
+
mbuf = br_tail(h2c->mbuf);
retry:
if (!h2_get_buf(h2c, mbuf)) {
* unblocked on window opening. Note: we don't implement padding.
*/
- /* EOM is presented with bsize==1 but would lead to the emission of an
- * empty frame, thus we force it to zero here.
- */
- if (type == HTX_BLK_EOM)
- bsize = fsize = 0;
-
if (!fsize)
goto send_empty;
/* update the frame's size */
h2_set_frame_size(outbuf.area, fsize);
- /* FIXME: for now we only set the ES flag on empty DATA frames, once
- * meeting EOM. We should optimize this later.
- */
- if (type == HTX_BLK_EOM) {
- total++; // EOM counts as one byte
- count--;
-
- /* EOM+empty: we may need to add END_STREAM (except for tunneled
- * message)
- */
- if (!(h2s->flags & H2_SF_BODY_TUNNEL))
- es_now = 1;
- }
-
- if (es_now)
- outbuf.area[4] |= H2_F_DATA_END_STREAM;
-
- /* commit the H2 response */
- b_add(mbuf, fsize + 9);
-
- /* consume incoming HTX block, including EOM */
+ /* consume incoming HTX block */
total += fsize;
if (fsize == bsize) {
htx_remove_blk(htx, blk);
- if (fsize) {
- TRACE_DEVEL("more data available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
- goto new_frame;
+ if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
+ /* EOM+empty: we may need to add END_STREAM (except for tunneled
+ * message)
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL))
+ es_now = 1;
}
- } else {
+ }
+ else {
/* we've truncated this block */
htx_cut_data_blk(htx, blk, fsize);
- if (trunc_out)
- goto new_frame;
}
+ if (es_now)
+ outbuf.area[4] |= H2_F_DATA_END_STREAM;
+
+ /* commit the H2 response */
+ b_add(mbuf, fsize + 9);
+
if (es_now) {
if (h2s->st == H2_SS_OPEN)
h2s->st = H2_SS_HLOC;
h2s->flags |= H2_SF_ES_SENT;
TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
}
+ else if (fsize) {
+ if (fsize == bsize) {
+ TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto new_frame;
+ }
+ else if (trunc_out) {
+ /* we've truncated this block */
+ goto new_frame;
+ }
+ }
end:
TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
* which might have happened subsequently to a successful send. The htx blocks
* are automatically removed from the message. The htx message is assumed to be
* valid since produced from the internal code. Processing stops when meeting
- * the EOM, which is *not* removed. All trailers are processed at once and sent
- * as a single frame. The ES flag is always set.
+ * the EOT, which *is* removed. All trailers are processed at once and sent as a
+ * single frame. The ES flag is always set.
*/
static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
{
struct http_hdr list[global.tune.max_http_hdr];
struct h2c *h2c = h2s->h2c;
struct htx_blk *blk;
- struct htx_blk *blk_end;
struct buffer outbuf;
struct buffer *mbuf;
enum htx_blk_type type;
goto end;
}
- /* determine the first block which must not be deleted, blk_end may
- * be NULL if all blocks have to be deleted. also get trailers.
- */
- idx = htx_get_head(htx);
- blk_end = NULL;
-
+ /* get trailers. */
hdr = 0;
- while (idx != -1) {
- blk = htx_get_blk(htx, idx);
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
type = htx_get_blk_type(blk);
- idx = htx_get_next(htx, idx);
+
if (type == HTX_BLK_UNUSED)
continue;
- if (type == HTX_BLK_EOT) {
- if (idx != -1)
- blk_end = blk;
- break;
- }
- if (type != HTX_BLK_TLR)
+ if (type == HTX_BLK_EOT)
break;
+ if (type == HTX_BLK_TLR) {
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
- if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
- TRACE_ERROR("too many trailers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
goto fail;
}
-
- list[hdr].n = htx_get_blk_name(htx, blk);
- list[hdr].v = htx_get_blk_value(htx, blk);
- hdr++;
}
/* marker for end of trailers */
done:
/* remove all header blocks till the end and compute the corresponding size. */
ret = 0;
- idx = htx_get_head(htx);
- blk = htx_get_blk(htx, idx);
- while (blk != blk_end) {
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
ret += htx_get_blksz(blk);
blk = htx_remove_blk(htx, blk);
- }
-
- if (blk_end && htx_get_blk_type(blk_end) == HTX_BLK_EOM) {
- ret += htx_get_blksz(blk_end);
- htx_remove_blk(htx, blk_end);
+ /* The removed block is the EOT */
+ if (type == HTX_BLK_EOT)
+ break;
}
end:
goto end;
}
- htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_EOM);
+ htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
buf_htx->flags |= HTX_FL_PARSING_ERROR;
break;
case HTX_BLK_DATA:
- case HTX_BLK_EOM:
- /* all these cause the emission of a DATA frame (possibly empty).
- * This EOM necessarily is one before trailers, as the EOM following
- * trailers would have been consumed by the trailers parser.
- */
+ /* all these cause the emission of a DATA frame (possibly empty) */
ret = h2s_make_data(h2s, buf, count);
if (ret > 0) {
htx = htx_from_buf(buf);
case HTX_BLK_TLR:
case HTX_BLK_EOT:
- /* This is the first trailers block, all the subsequent ones AND
- * the EOM will be swallowed by the parser.
- */
+ /* This is the first trailers block, all the subsequent ones */
ret = h2s_make_trailers(h2s, htx);
if (ret > 0) {
total += ret;
while (blk) {
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA) {
struct ist v = htx_get_blk_value(htx, blk);
}
if (appctx->st0 == STAT_HTTP_DONE) {
- /* Don't add TLR because mux-h1 will take care of it */
- res_htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
- si_rx_room_blk(si);
- goto out;
- }
- channel_add_input(&s->res, 1);
+ /* no more data are expected. Don't add TLR because mux-h1 will take care of it */
+ res_htx->flags |= HTX_FL_EOM;
appctx->st0 = STAT_HTTP_END;
}
(istlen(body) && !htx_add_data_atonce(htx, body)))
goto error_htx;
- htx->flags |= HTX_FL_EOM; /* no more data are expected. Only EOM remains to add now */
- if (!htx_add_endof(htx, HTX_BLK_EOM))
- goto error_htx;
-
+ /* no more data are expected */
+ htx->flags |= HTX_FL_EOM;
htx_to_buf(htx, &check->bo);
break;
}
struct ist desc = IST_NULL;
int i, match, inverse;
- last_read |= (!htx_free_data_space(htx) || (htx_get_tail_type(htx) == HTX_BLK_EOM));
+ last_read |= (!htx_free_data_space(htx) || (htx->flags & HTX_FL_EOM));
if (htx->flags & HTX_FL_PARSING_ERROR) {
status = HCHK_STATUS_L7RSP;
for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
enum htx_blk_type type = htx_get_blk_type(blk);
- if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA) {
if (!chunk_istcat(&trash, htx_get_blk_value(htx, blk)))