CURLE_WRITE_ERROR : CURLE_OK;
if(result)
failf(data, "hyperstream: couldn't pass blank header");
+ /* Hyper does chunked decoding itself. If it was added during
+ * response header processing, remove it again. */
+ Curl_cwriter_remove_by_name(data, "chunked");
}
return result;
}
struct dynbuf request_data;
size_t nsent;
size_t headerlines;
+ struct Curl_chunker ch;
enum keeponval {
KEEPON_DONE,
KEEPON_CONNECT,
Curl_dyn_init(&ts->rcvbuf, DYN_PROXY_CONNECT_HEADERS);
Curl_dyn_init(&ts->request_data, DYN_HTTP_REQUEST);
+ Curl_httpchunk_init(data, &ts->ch, TRUE);
*pts = ts;
connkeep(cf->conn, "HTTP proxy CONNECT");
{
if(ts->tunnel_state == new_state)
return;
- /* leaving this one */
- switch(ts->tunnel_state) {
- case H1_TUNNEL_CONNECT:
- data->req.ignorebody = FALSE;
- break;
- default:
- break;
- }
/* entering this one */
switch(new_state) {
case H1_TUNNEL_INIT:
h1_tunnel_go_state(cf, ts, H1_TUNNEL_FAILED, data);
Curl_dyn_free(&ts->rcvbuf);
Curl_dyn_free(&ts->request_data);
+ Curl_httpchunk_free(data, &ts->ch);
free(ts);
cf->ctx = NULL;
}
STRCONST("chunked"))) {
infof(data, "CONNECT responded chunked");
ts->chunked_encoding = TRUE;
- /* init our chunky engine */
- Curl_httpchunk_init(data);
+ /* reset our chunky engine */
+ Curl_httpchunk_reset(data, &ts->ch, TRUE);
}
}
else if(Curl_compareheader(header,
struct SingleRequest *k = &data->req;
curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
char *linep;
- size_t perline;
+ size_t line_len;
int error, writetype;
#define SELECT_OK 0
break;
}
}
- else {
+ else if(ts->chunked_encoding) {
/* chunked-encoded body, so we need to do the chunked dance
properly to know when the end of the body is reached */
- CHUNKcode r;
- CURLcode extra;
size_t consumed = 0;
/* now parse the chunked piece of data so that we can
properly tell when the stream ends */
- r = Curl_httpchunk_read(data, &byte, 1, &consumed, &extra);
- if(r == CHUNKE_STOP) {
+ result = Curl_httpchunk_read(data, &ts->ch, &byte, 1, &consumed);
+ if(result)
+ return result;
+ if(Curl_httpchunk_is_done(data, &ts->ch)) {
/* we're done reading chunks! */
infof(data, "chunk reading DONE");
ts->keepon = KEEPON_DONE;
ts->headerlines++;
linep = Curl_dyn_ptr(&ts->rcvbuf);
- perline = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */
+ line_len = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */
/* output debug if that is requested */
- Curl_debug(data, CURLINFO_HEADER_IN, linep, perline);
+ Curl_debug(data, CURLINFO_HEADER_IN, linep, line_len);
/* send the header to the callback */
writetype = CLIENTWRITE_HEADER | CLIENTWRITE_CONNECT |
(ts->headerlines == 1 ? CLIENTWRITE_STATUS : 0);
- result = Curl_client_write(data, writetype, linep, perline);
+ result = Curl_client_write(data, writetype, linep, line_len);
if(result)
return result;
- result = Curl_bump_headersize(data, perline, TRUE);
+ result = Curl_bump_headersize(data, line_len, TRUE);
if(result)
return result;
" bytes of response-body", ts->cl);
}
else if(ts->chunked_encoding) {
- CHUNKcode r;
- CURLcode extra;
- size_t consumed = 0;
-
infof(data, "Ignore chunked response-body");
-
- /* We set ignorebody true here since the chunked decoder
- function will acknowledge that. Pay attention so that this is
- cleared again when this function returns! */
- k->ignorebody = TRUE;
-
- if(linep[1] == '\n')
- /* this can only be a LF if the letter at index 0 was a CR */
- linep++;
-
- /* now parse the chunked piece of data so that we can properly
- tell when the stream ends */
- r = Curl_httpchunk_read(data, linep + 1, 1, &consumed, &extra);
- if(r == CHUNKE_STOP) {
- /* we're done reading chunks! */
- infof(data, "chunk reading DONE");
- ts->keepon = KEEPON_DONE;
- }
}
else {
/* without content-length or chunked encoding, we
*done = (result == CURLE_OK) && tunnel_is_established(cf->ctx);
if(*done) {
cf->connected = TRUE;
+ /* Restore `data->req` fields that may habe been touched */
+ data->req.header = TRUE; /* assume header */
+ data->req.bytecount = 0;
+ data->req.ignorebody = FALSE;
+ Curl_client_cleanup(data);
+ Curl_pgrsSetUploadCounter(data, 0);
+ Curl_pgrsSetDownloadCounter(data, 0);
+
tunnel_free(cf, data);
}
return result;
};
-/* supported content encodings table. */
-static const struct Curl_cwtype * const encodings[] = {
+/* supported general content decoders. */
+static const struct Curl_cwtype * const general_unencoders[] = {
&identity_encoding,
#ifdef HAVE_LIBZ
&deflate_encoding,
NULL
};
+/* supported content decoders only for transfer encodings */
+static const struct Curl_cwtype * const transfer_unencoders[] = {
+#ifndef CURL_DISABLE_HTTP
+ &Curl_httpchunk_unencoder,
+#endif
+ NULL
+};
/* Provide a list of comma-separated names of supported encodings.
*/
DEBUGASSERT(blen);
buf[0] = 0;
- for(cep = encodings; *cep; cep++) {
+ for(cep = general_unencoders; *cep; cep++) {
ce = *cep;
if(!strcasecompare(ce->name, CONTENT_ENCODING_DEFAULT))
len += strlen(ce->name) + 2;
}
else if(blen > len) {
char *p = buf;
- for(cep = encodings; *cep; cep++) {
+ for(cep = general_unencoders; *cep; cep++) {
ce = *cep;
if(!strcasecompare(ce->name, CONTENT_ENCODING_DEFAULT)) {
strcpy(p, ce->name);
};
/* Find the content encoding by name. */
-static const struct Curl_cwtype *find_encoding(const char *name,
- size_t len)
+static const struct Curl_cwtype *find_unencode_writer(const char *name,
+ size_t len,
+ Curl_cwriter_phase phase)
{
const struct Curl_cwtype * const *cep;
- for(cep = encodings; *cep; cep++) {
+ if(phase == CURL_CW_TRANSFER_DECODE) {
+ for(cep = transfer_unencoders; *cep; cep++) {
+ const struct Curl_cwtype *ce = *cep;
+ if((strncasecompare(name, ce->name, len) && !ce->name[len]) ||
+ (ce->alias && strncasecompare(name, ce->alias, len)
+ && !ce->alias[len]))
+ return ce;
+ }
+ }
+ /* look among the general decoders */
+ for(cep = general_unencoders; *cep; cep++) {
const struct Curl_cwtype *ce = *cep;
if((strncasecompare(name, ce->name, len) && !ce->name[len]) ||
(ce->alias && strncasecompare(name, ce->alias, len) && !ce->alias[len]))
CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
const char *enclist, int is_transfer)
{
- struct SingleRequest *k = &data->req;
Curl_cwriter_phase phase = is_transfer?
CURL_CW_TRANSFER_DECODE:CURL_CW_CONTENT_DECODE;
CURLcode result;
if(!ISSPACE(*enclist))
namelen = enclist - name + 1;
- /* Special case: chunked encoding is handled at the reader level. */
- if(is_transfer && namelen == 7 && strncasecompare(name, "chunked", 7)) {
- k->chunk = TRUE; /* chunks coming our way. */
- Curl_httpchunk_init(data); /* init our chunky engine. */
- }
- else if(namelen) {
+ if(namelen) {
const struct Curl_cwtype *cwt;
struct Curl_cwriter *writer;
- if((is_transfer && !data->set.http_transfer_encoding) ||
+ /* if we skip the decoding in this phase, do not look further.
+ * Exception is "chunked" transfer-encoding which always must happen */
+ if((is_transfer && !data->set.http_transfer_encoding &&
+ (namelen != 7 || !strncasecompare(name, "chunked", 7))) ||
(!is_transfer && data->set.http_ce_skip)) {
/* not requested, ignore */
return CURLE_OK;
return CURLE_BAD_CONTENT_ENCODING;
}
- cwt = find_encoding(name, namelen);
+ cwt = find_unencode_writer(name, namelen, phase);
if(!cwt)
cwt = &error_writer; /* Defer error at use. */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMPT, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMPT, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtmp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTMPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_DICT, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
file_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
0, /* defport */
ftp_domore_getsock, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_FTP, /* defport */
ftp_domore_getsock, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_FTPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_GOPHER, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_GOPHER, /* defport */
static CURLcode http_setup_conn(struct Curl_easy *data,
struct connectdata *conn);
+static CURLcode http_write_resp(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ bool is_eos,
+ bool *done);
+
#ifdef USE_WEBSOCKETS
static CURLcode ws_setup_conn(struct Curl_easy *data,
struct connectdata *conn);
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ http_write_resp, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_HTTP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
Curl_ws_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ http_write_resp, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_HTTP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ http_write_resp, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_HTTPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
Curl_ws_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ http_write_resp, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_HTTPS, /* defport */
{
struct SingleRequest *k = &data->req;
+ *done = FALSE;
if(data->req.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
/*
* Read any HTTP header lines from the server and pass them to the client app.
*/
-CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
- struct connectdata *conn,
- const char *buf, size_t blen,
- size_t *pconsumed)
+static CURLcode http_rw_headers(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t *pconsumed)
{
- CURLcode result;
+ struct connectdata *conn = data->conn;
+ CURLcode result = CURLE_OK;
struct SingleRequest *k = &data->req;
char *headp;
char *end_ptr;
+ bool leftover_body = FALSE;
/* header line within buffer loop */
*pconsumed = 0;
if(st == STATUS_BAD) {
/* this is not the beginning of a protocol first header line */
k->header = FALSE;
- k->badheader = TRUE;
streamclose(conn, "bad HTTP: No end-of-message indicator");
if(!data->set.http09_allowed) {
failf(data, "Received HTTP/0.9 when not allowed");
return CURLE_UNSUPPORTED_PROTOCOL;
}
+ leftover_body = TRUE;
goto out;
}
}
return CURLE_UNSUPPORTED_PROTOCOL;
}
k->header = FALSE;
- if(blen)
- /* since there's more, this is a partial bad header */
- k->badheader = TRUE;
- else {
- /* this was all we read so it's all a bad header */
- k->badheader = TRUE;
- return CURLE_OK;
- }
- break;
+ leftover_body = TRUE;
+ goto out;
}
}
headp = Curl_dyn_ptr(&data->state.headerb);
if((0x0a == *headp) || (0x0d == *headp)) {
size_t headerlen;
+ bool switch_to_h2 = FALSE;
/* Zero-length header line means end of headers! */
if('\r' == *headp)
/* we'll get more headers (HTTP/2 response) */
k->header = TRUE;
k->headerline = 0; /* restart the header line counter */
-
- /* switch to http2 now. The bytes after response headers
- are also processed here, otherwise they are lost. */
- result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
- if(result)
- return result;
- *pconsumed += blen;
- blen = 0;
+ switch_to_h2 = TRUE;
}
#ifdef USE_WEBSOCKETS
else if(k->upgr101 == UPGR101_WS) {
*/
if(data->req.no_body)
k->download_done = TRUE;
-#ifndef CURL_DISABLE_RTSP
- else if((conn->handler->protocol & CURLPROTO_RTSP) &&
- (data->set.rtspreq == RTSPREQ_DESCRIBE) &&
- (k->size <= -1))
- /* Respect section 4.4 of rfc2326: If the Content-Length header is
- absent, a length 0 must be assumed. It will prevent libcurl from
- hanging on DESCRIBE request that got refused for whatever
- reason */
- k->download_done = TRUE;
-#endif
/* If max download size is *zero* (nothing) we already have
nothing and can safely return ok now! But for HTTP/2, we'd
/* We continue reading headers, reset the line-based header */
Curl_dyn_reset(&data->state.headerb);
+ if(switch_to_h2) {
+ /* Having handled the headers, we can do the HTTP/2 switch.
+ * Any remaining `buf` bytes are already HTTP/2 and passed to
+ * be processed. */
+ result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
+ if(result)
+ return result;
+ *pconsumed += blen;
+ blen = 0;
+ }
+
continue;
}
there might be a non-header part left in the end of the read
buffer. */
out:
+ if(!k->header && !leftover_body) {
+ Curl_dyn_free(&data->state.headerb);
+ }
return CURLE_OK;
}
+/*
+ * HTTP protocol `write_resp` implementation. Will parse headers
+ * when not done yet and otherwise return without consuming data.
+ */
+CURLcode Curl_http_write_resp_hds(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t *pconsumed,
+ bool *done)
+{
+ *done = FALSE;
+ if(!data->req.header) {
+ *pconsumed = 0;
+ return CURLE_OK;
+ }
+ else {
+ CURLcode result;
+
+ result = http_rw_headers(data, buf, blen, pconsumed);
+ if(!result && !data->req.header) {
+ /* we have successfully finished parsing the HEADERs */
+ result = Curl_http_firstwrite(data, data->conn, done);
+
+ if(!data->req.no_body && Curl_dyn_len(&data->state.headerb)) {
+ /* leftover from parsing something that turned out not
+ * to be a header, only happens if we allow for
+ * HTTP/0.9 like responses */
+ result = Curl_client_write(data, CLIENTWRITE_BODY,
+ Curl_dyn_ptr(&data->state.headerb),
+ Curl_dyn_len(&data->state.headerb));
+ }
+ Curl_dyn_free(&data->state.headerb);
+ }
+ return result;
+ }
+}
+
+static CURLcode http_write_resp(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ bool is_eos,
+ bool *done)
+{
+ CURLcode result;
+ size_t consumed;
+ int flags;
+
+ *done = FALSE;
+ result = Curl_http_write_resp_hds(data, buf, blen, &consumed, done);
+ if(result || *done)
+ goto out;
+
+ DEBUGASSERT(consumed <= blen);
+ blen -= consumed;
+ buf += consumed;
+ /* either all was consumed in header parsing, or we have data left
+ * and are done with heders, e.g. it is BODY data */
+ DEBUGASSERT(!blen || !data->req.header);
+ if(!data->req.header && (blen || is_eos)) {
+ /* BODY data after header been parsed, write and consume */
+ flags = CLIENTWRITE_BODY;
+ if(is_eos)
+ flags |= CLIENTWRITE_EOS;
+ result = Curl_client_write(data, flags, (char *)buf, blen);
+ }
+out:
+ return result;
+}
/* Decode HTTP status code string. */
CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len)
CURLcode Curl_http_size(struct Curl_easy *data);
-CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
- struct connectdata *conn,
- const char *buf, size_t blen,
- size_t *pconsumed);
+CURLcode Curl_http_write_resp_hds(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t *pconsumed,
+ bool *done);
/**
* Curl_http_output_auth() setups the authentication headers for the
*/
-void Curl_httpchunk_init(struct Curl_easy *data)
+void Curl_httpchunk_init(struct Curl_easy *data, struct Curl_chunker *ch,
+ bool ignore_body)
{
- struct connectdata *conn = data->conn;
- struct Curl_chunker *chunk = &conn->chunk;
- chunk->hexindex = 0; /* start at 0 */
- chunk->state = CHUNK_HEX; /* we get hex first! */
- Curl_dyn_init(&conn->trailer, DYN_H1_TRAILER);
+ (void)data;
+ ch->hexindex = 0; /* start at 0 */
+ ch->state = CHUNK_HEX; /* we get hex first! */
+ ch->last_code = CHUNKE_OK;
+ Curl_dyn_init(&ch->trailer, DYN_H1_TRAILER);
+ ch->ignore_body = ignore_body;
}
-/*
- * chunk_read() returns a OK for normal operations, or a positive return code
- * for errors. STOP means this sequence of chunks is complete. The 'wrote'
- * argument is set to tell the caller how many bytes we actually passed to the
- * client (for byte-counting and whatever).
- *
- * The states and the state-machine is further explained in the header file.
- *
- * This function always uses ASCII hex values to accommodate non-ASCII hosts.
- * For example, 0x0d and 0x0a are used instead of '\r' and '\n'.
- */
-CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
- char *buf,
- size_t blen,
- size_t *pconsumed,
- CURLcode *extrap)
+void Curl_httpchunk_reset(struct Curl_easy *data, struct Curl_chunker *ch,
+ bool ignore_body)
+{
+ (void)data;
+ ch->hexindex = 0; /* start at 0 */
+ ch->state = CHUNK_HEX; /* we get hex first! */
+ ch->last_code = CHUNKE_OK;
+ Curl_dyn_reset(&ch->trailer);
+ ch->ignore_body = ignore_body;
+}
+
+void Curl_httpchunk_free(struct Curl_easy *data, struct Curl_chunker *ch)
+{
+ (void)data;
+ Curl_dyn_free(&ch->trailer);
+}
+
+bool Curl_httpchunk_is_done(struct Curl_easy *data, struct Curl_chunker *ch)
+{
+ (void)data;
+ return ch->state == CHUNK_DONE;
+}
+
+static CURLcode httpchunk_readwrite(struct Curl_easy *data,
+ struct Curl_chunker *ch,
+ struct Curl_cwriter *cw_next,
+ const char *buf, size_t blen,
+ size_t *pconsumed)
{
CURLcode result = CURLE_OK;
- struct connectdata *conn = data->conn;
- struct Curl_chunker *ch = &conn->chunk;
- struct SingleRequest *k = &data->req;
size_t piece;
*pconsumed = 0; /* nothing's written yet */
+ /* first check terminal states that will not progress anywhere */
+ if(ch->state == CHUNK_DONE)
+ return CURLE_OK;
+ if(ch->state == CHUNK_FAILED)
+ return CURLE_RECV_ERROR;
/* the original data is written to the client, but we go on with the
chunk read process, to properly calculate the content length */
- if(data->set.http_te_skip && !k->ignorebody) {
- result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen);
+ if(data->set.http_te_skip && !ch->ignore_body) {
+ if(cw_next)
+ result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY, buf, blen);
+ else
+ result = Curl_client_write(data, CLIENTWRITE_BODY, (char *)buf, blen);
if(result) {
- *extrap = result;
- return CHUNKE_PASSTHRU_ERROR;
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_PASSTHRU_ERROR;
+ return result;
}
}
switch(ch->state) {
case CHUNK_HEX:
if(ISXDIGIT(*buf)) {
- if(ch->hexindex < CHUNK_MAXNUM_LEN) {
- ch->hexbuffer[ch->hexindex] = *buf;
- buf++;
- blen--;
- ch->hexindex++;
- }
- else {
- return CHUNKE_TOO_LONG_HEX; /* longer hex than we support */
+ if(ch->hexindex >= CHUNK_MAXNUM_LEN) {
+ failf(data, "chunk hex-length longer than %d", CHUNK_MAXNUM_LEN);
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_TOO_LONG_HEX; /* longer than we support */
+ return CURLE_RECV_ERROR;
}
+ ch->hexbuffer[ch->hexindex++] = *buf;
+ buf++;
+ blen--;
}
else {
char *endptr;
- if(0 == ch->hexindex)
+ if(0 == ch->hexindex) {
/* This is illegal data, we received junk where we expected
a hexadecimal digit. */
- return CHUNKE_ILLEGAL_HEX;
+ failf(data, "chunk hex-length char not a hex digit: 0x%x", *buf);
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_ILLEGAL_HEX;
+ return CURLE_RECV_ERROR;
+ }
/* blen and buf are unmodified */
ch->hexbuffer[ch->hexindex] = 0;
-
- if(curlx_strtoofft(ch->hexbuffer, &endptr, 16, &ch->datasize))
- return CHUNKE_ILLEGAL_HEX;
+ if(curlx_strtoofft(ch->hexbuffer, &endptr, 16, &ch->datasize)) {
+ failf(data, "chunk hex-length not valid: '%s'", ch->hexbuffer);
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_ILLEGAL_HEX;
+ return CURLE_RECV_ERROR;
+ }
ch->state = CHUNK_LF; /* now wait for the CRLF */
}
break;
piece = curlx_sotouz(ch->datasize);
/* Write the data portion available */
- if(!data->set.http_te_skip && !k->ignorebody) {
- result = Curl_client_write(data, CLIENTWRITE_BODY, buf, piece);
-
+ if(!data->set.http_te_skip && !ch->ignore_body) {
+ if(cw_next)
+ result = Curl_cwriter_write(data, cw_next, CLIENTWRITE_BODY,
+ buf, piece);
+ else
+ result = Curl_client_write(data, CLIENTWRITE_BODY,
+ (char *)buf, piece);
if(result) {
- *extrap = result;
- return CHUNKE_PASSTHRU_ERROR;
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_PASSTHRU_ERROR;
+ return result;
}
}
case CHUNK_POSTLF:
if(*buf == 0x0a) {
/* The last one before we go back to hex state and start all over. */
- Curl_httpchunk_init(data); /* sets state back to CHUNK_HEX */
+ Curl_httpchunk_reset(data, ch, ch->ignore_body);
+ }
+ else if(*buf != 0x0d) {
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_BAD_CHUNK;
+ return CURLE_RECV_ERROR;
}
- else if(*buf != 0x0d)
- return CHUNKE_BAD_CHUNK;
buf++;
blen--;
break;
case CHUNK_TRAILER:
if((*buf == 0x0d) || (*buf == 0x0a)) {
- char *tr = Curl_dyn_ptr(&conn->trailer);
+ char *tr = Curl_dyn_ptr(&ch->trailer);
/* this is the end of a trailer, but if the trailer was zero bytes
there was no trailer and we move on */
if(tr) {
size_t trlen;
- result = Curl_dyn_addn(&conn->trailer, (char *)STRCONST("\x0d\x0a"));
- if(result)
- return CHUNKE_OUT_OF_MEMORY;
-
- tr = Curl_dyn_ptr(&conn->trailer);
- trlen = Curl_dyn_len(&conn->trailer);
+ result = Curl_dyn_addn(&ch->trailer, (char *)STRCONST("\x0d\x0a"));
+ if(result) {
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_OUT_OF_MEMORY;
+ return result;
+ }
+ tr = Curl_dyn_ptr(&ch->trailer);
+ trlen = Curl_dyn_len(&ch->trailer);
if(!data->set.http_te_skip) {
- result = Curl_client_write(data,
- CLIENTWRITE_HEADER|CLIENTWRITE_TRAILER,
- tr, trlen);
+ if(cw_next)
+ result = Curl_cwriter_write(data, cw_next,
+ CLIENTWRITE_HEADER|
+ CLIENTWRITE_TRAILER,
+ tr, trlen);
+ else
+ result = Curl_client_write(data,
+ CLIENTWRITE_HEADER|
+ CLIENTWRITE_TRAILER,
+ tr, trlen);
if(result) {
- *extrap = result;
- return CHUNKE_PASSTHRU_ERROR;
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_PASSTHRU_ERROR;
+ return result;
}
}
- Curl_dyn_reset(&conn->trailer);
+ Curl_dyn_reset(&ch->trailer);
ch->state = CHUNK_TRAILER_CR;
if(*buf == 0x0a)
/* already on the LF */
}
}
else {
- result = Curl_dyn_addn(&conn->trailer, buf, 1);
- if(result)
- return CHUNKE_OUT_OF_MEMORY;
+ result = Curl_dyn_addn(&ch->trailer, buf, 1);
+ if(result) {
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_OUT_OF_MEMORY;
+ return result;
+ }
}
buf++;
blen--;
buf++;
blen--;
}
- else
- return CHUNKE_BAD_CHUNK;
+ else {
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_BAD_CHUNK;
+ return CURLE_RECV_ERROR;
+ }
break;
case CHUNK_TRAILER_POSTCR:
case CHUNK_STOP:
if(*buf == 0x0a) {
blen--;
-
/* Record the length of any data left in the end of the buffer
even if there's no more chunks to read */
ch->datasize = blen;
-
- return CHUNKE_STOP; /* return stop */
+ ch->state = CHUNK_DONE;
+ return CURLE_OK;
}
- else
- return CHUNKE_BAD_CHUNK;
+ else {
+ ch->state = CHUNK_FAILED;
+ ch->last_code = CHUNKE_BAD_CHUNK;
+ return CURLE_RECV_ERROR;
+ }
+ case CHUNK_DONE:
+ return CURLE_OK;
+
+ case CHUNK_FAILED:
+ return CURLE_RECV_ERROR;
}
+
}
- return CHUNKE_OK;
+ return CURLE_OK;
}
-const char *Curl_chunked_strerror(CHUNKcode code)
+static const char *Curl_chunked_strerror(CHUNKcode code)
{
switch(code) {
default:
case CHUNKE_BAD_CHUNK:
return "Malformed encoding found";
case CHUNKE_PASSTHRU_ERROR:
- DEBUGASSERT(0); /* never used */
- return "";
+ return "Error writing data to client";
case CHUNKE_BAD_ENCODING:
return "Bad content-encoding found";
case CHUNKE_OUT_OF_MEMORY:
}
}
+CURLcode Curl_httpchunk_read(struct Curl_easy *data,
+ struct Curl_chunker *ch,
+ char *buf, size_t blen,
+ size_t *pconsumed)
+{
+ return httpchunk_readwrite(data, ch, NULL, buf, blen, pconsumed);
+}
+
+struct chunked_writer {
+ struct Curl_cwriter super;
+ struct Curl_chunker ch;
+};
+
+static CURLcode cw_chunked_init(struct Curl_easy *data,
+ struct Curl_cwriter *writer)
+{
+ struct chunked_writer *ctx = (struct chunked_writer *)writer;
+
+ data->req.chunk = TRUE; /* chunks coming our way. */
+ Curl_httpchunk_init(data, &ctx->ch, FALSE);
+ return CURLE_OK;
+}
+
+static void cw_chunked_close(struct Curl_easy *data,
+ struct Curl_cwriter *writer)
+{
+ struct chunked_writer *ctx = (struct chunked_writer *)writer;
+ Curl_httpchunk_free(data, &ctx->ch);
+}
+
+static CURLcode cw_chunked_write(struct Curl_easy *data,
+ struct Curl_cwriter *writer, int type,
+ const char *buf, size_t blen)
+{
+ struct chunked_writer *ctx = (struct chunked_writer *)writer;
+ CURLcode result;
+ size_t consumed;
+
+ if(!(type & CLIENTWRITE_BODY))
+ return Curl_cwriter_write(data, writer->next, type, buf, blen);
+
+ consumed = 0;
+ result = httpchunk_readwrite(data, &ctx->ch, writer->next, buf, blen,
+ &consumed);
+
+ if(result) {
+ if(CHUNKE_PASSTHRU_ERROR == ctx->ch.last_code) {
+ failf(data, "Failed reading the chunked-encoded stream");
+ }
+ else {
+ failf(data, "%s in chunked-encoding",
+ Curl_chunked_strerror(ctx->ch.last_code));
+ }
+ return result;
+ }
+
+ blen -= consumed;
+ if(CHUNK_DONE == ctx->ch.state) {
+ /* chunks read successfully, download is complete */
+ data->req.download_done = TRUE;
+ if(blen) {
+ infof(data, "Leftovers after chunking: %zu bytes", blen);
+ }
+ }
+ else if((type & CLIENTWRITE_EOS) && !data->req.no_body) {
+ failf(data, "transfer closed with outstanding read data remaining");
+ return CURLE_PARTIAL_FILE;
+ }
+
+ return CURLE_OK;
+}
+
+/* HTTP chunked Transfer-Encoding decoder */
+const struct Curl_cwtype Curl_httpchunk_unencoder = {
+ "chunked",
+ NULL,
+ cw_chunked_init,
+ cw_chunked_write,
+ cw_chunked_close,
+ sizeof(struct chunked_writer)
+};
+
#endif /* CURL_DISABLE_HTTP */
*
***************************************************************************/
+#ifndef CURL_DISABLE_HTTP
+
+#include "dynbuf.h"
+
struct connectdata;
/*
signalled If this is an empty trailer CHUNKE_STOP will be signalled.
Otherwise the trailer will be broadcasted via Curl_client_write() and the
next state will be CHUNK_TRAILER */
- CHUNK_TRAILER_POSTCR
+ CHUNK_TRAILER_POSTCR,
+
+ /* Successfully de-chunked everything */
+ CHUNK_DONE,
+
+ /* Failed on seeing a bad or not correctly terminated chunk */
+ CHUNK_FAILED
} ChunkyState;
typedef enum {
- CHUNKE_STOP = -1,
CHUNKE_OK = 0,
CHUNKE_TOO_LONG_HEX = 1,
CHUNKE_ILLEGAL_HEX,
CHUNKE_BAD_CHUNK,
CHUNKE_BAD_ENCODING,
CHUNKE_OUT_OF_MEMORY,
- CHUNKE_PASSTHRU_ERROR, /* Curl_httpchunk_read() returns a CURLcode to use */
- CHUNKE_LAST
+ CHUNKE_PASSTHRU_ERROR /* Curl_httpchunk_read() returns a CURLcode to use */
} CHUNKcode;
-const char *Curl_chunked_strerror(CHUNKcode code);
-
struct Curl_chunker {
curl_off_t datasize;
ChunkyState state;
+ CHUNKcode last_code;
+ struct dynbuf trailer; /* for chunked-encoded trailer */
unsigned char hexindex;
- char hexbuffer[ CHUNK_MAXNUM_LEN + 1]; /* +1 for null-terminator */
+ char hexbuffer[CHUNK_MAXNUM_LEN + 1]; /* +1 for null-terminator */
+ BIT(ignore_body); /* never write response body data */
};
/* The following functions are defined in http_chunks.c */
-void Curl_httpchunk_init(struct Curl_easy *data);
-CHUNKcode Curl_httpchunk_read(struct Curl_easy *data, char *buf,
- size_t blen, size_t *pconsumed,
- CURLcode *passthru);
+void Curl_httpchunk_init(struct Curl_easy *data, struct Curl_chunker *ch,
+ bool ignore_body);
+void Curl_httpchunk_free(struct Curl_easy *data, struct Curl_chunker *ch);
+void Curl_httpchunk_reset(struct Curl_easy *data, struct Curl_chunker *ch,
+ bool ignore_body);
+
+/*
+ * Read BODY bytes in HTTP/1.1 chunked encoding from `buf` and return
+ * the amount of bytes consumed. The actual response bytes and trailer
+ * headers are written out to the client.
+ * On success, this will consume all bytes up to the end of the response,
+ * e.g. the last chunk, has been processed.
+ * @param data the transfer involved
+ * @param ch the chunker instance keeping state across calls
+ * @param buf the response data
+ * @param blen amount of bytes in `buf`
+ * @param pconsumed on successful return, the number of bytes in `buf`
+ * consumed
+ *
+ * This function always uses ASCII hex values to accommodate non-ASCII hosts.
+ * For example, 0x0d and 0x0a are used instead of '\r' and '\n'.
+ */
+CURLcode Curl_httpchunk_read(struct Curl_easy *data, struct Curl_chunker *ch,
+ char *buf, size_t blen, size_t *pconsumed);
+
+/**
+ * @return TRUE iff chunked decoded has finished successfully.
+ */
+bool Curl_httpchunk_is_done(struct Curl_easy *data, struct Curl_chunker *ch);
+
+extern const struct Curl_cwtype Curl_httpchunk_unencoder;
+
+#endif /* !CURL_DISABLE_HTTP */
#endif /* HEADER_CURL_HTTP_CHUNKS_H */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
imap_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_IMAP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
imap_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_IMAPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_LDAP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_LDAPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_MQTT, /* defport */
}
/* read/write data if it is ready to do so */
- result = Curl_readwrite(data->conn, data, &done);
+ result = Curl_readwrite(data, &done);
if(done || (result == CURLE_RECV_ERROR)) {
/* If CURLE_RECV_ERROR happens early enough, we assume it was a race
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
oldap_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_LDAP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
oldap_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_LDAPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
pop3_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_POP3, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
pop3_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_POP3S, /* defport */
struct connectdata *conn, curl_socket_t *socks);
/*
- * Parse and write out any available RTP data.
+ * Parse and write out an RTSP response.
* @param data the transfer
* @param conn the connection
* @param buf data read from connection
* @param blen amount of data in buf
- * @param consumed out, number of blen consumed
+ * @param is_eos TRUE iff this is the last write
* @param readmore out, TRUE iff complete buf was consumed and more data
* is needed
*/
-static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
- struct connectdata *conn,
- const char *buf,
- size_t blen,
- size_t *pconsumed,
- bool *readmore);
+static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data,
+ const char *buf,
+ size_t blen,
+ bool is_eos,
+ bool *done);
static CURLcode rtsp_setup_connection(struct Curl_easy *data,
struct connectdata *conn);
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
rtsp_disconnect, /* disconnect */
- rtsp_rtp_readwrite, /* readwrite */
+ rtsp_rtp_write_resp, /* write_resp */
rtsp_conncheck, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_RTSP, /* defport */
return result;
}
+/**
+ * write any BODY bytes missing to the client, ignore the rest.
+ */
+static CURLcode rtp_write_body_junk(struct Curl_easy *data,
+ const char *buf,
+ size_t blen)
+{
+ struct rtsp_conn *rtspc = &(data->conn->proto.rtspc);
+ curl_off_t body_remain;
+ bool in_body;
+
+ in_body = (data->req.headerline && !rtspc->in_header) &&
+ (data->req.size >= 0) &&
+ (data->req.bytecount < data->req.size);
+ body_remain = in_body? (data->req.size - data->req.bytecount) : 0;
+ DEBUGASSERT(body_remain >= 0);
+ if(body_remain) {
+ if((curl_off_t)blen > body_remain)
+ blen = (size_t)body_remain;
+ return Curl_client_write(data, CLIENTWRITE_BODY, (char *)buf, blen);
+ }
+ return CURLE_OK;
+}
+
static CURLcode rtsp_filter_rtp(struct Curl_easy *data,
- struct connectdata *conn,
const char *buf,
size_t blen,
- bool in_body,
size_t *pconsumed)
{
- struct rtsp_conn *rtspc = &(conn->proto.rtspc);
+ struct rtsp_conn *rtspc = &(data->conn->proto.rtspc);
CURLcode result = CURLE_OK;
+ size_t skip_len = 0;
*pconsumed = 0;
while(blen) {
+ bool in_body = (data->req.headerline && !rtspc->in_header) &&
+ (data->req.size >= 0) &&
+ (data->req.bytecount < data->req.size);
switch(rtspc->state) {
case RTP_PARSE_SKIP: {
DEBUGASSERT(Curl_dyn_len(&rtspc->buf) == 0);
- if(in_body && buf[0] != '$') {
- /* in BODY and no valid start, do not consume and return */
- goto out;
- }
while(blen && buf[0] != '$') {
if(!in_body && buf[0] == 'R' &&
data->set.rtspreq != RTSPREQ_RECEIVE) {
goto out;
}
}
- /* junk, consume without buffering */
+ /* junk/BODY, consume without buffering */
*pconsumed += 1;
++buf;
--blen;
+ ++skip_len;
}
if(blen && buf[0] == '$') {
/* possible start of an RTP message, buffer */
+ if(skip_len) {
+ /* end of junk/BODY bytes, flush */
+ result = rtp_write_body_junk(data,
+ (char *)(buf - skip_len), skip_len);
+ skip_len = 0;
+ if(result)
+ goto out;
+ }
if(Curl_dyn_addn(&rtspc->buf, buf, 1)) {
result = CURLE_OUT_OF_MEMORY;
goto out;
if(!(data->state.rtp_channel_mask[idx] & (1 << off))) {
/* invalid channel number, junk or BODY data */
rtspc->state = RTP_PARSE_SKIP;
- if(in_body) {
- /* we do not consume this byte, it is BODY data */
- DEBUGF(infof(data, "RTSP: invalid RTP channel %d in BODY, "
- "treating as BODY data", idx));
- if(*pconsumed == 0) {
- /* We did not consume the initial '$' in our buffer, but had
- * it from an earlier call. We cannot un-consume it and have
- * to write it directly as BODY data */
- result = Curl_client_write(data, CLIENTWRITE_BODY,
- Curl_dyn_ptr(&rtspc->buf), 1);
- Curl_dyn_free(&rtspc->buf);
- if(result)
- goto out;
- }
- else {
- /* un-consume the '$' and leave */
- Curl_dyn_free(&rtspc->buf);
- *pconsumed -= 1;
- --buf;
- ++blen;
+ DEBUGASSERT(skip_len == 0);
+ /* we do not consume this byte, it is BODY data */
+ DEBUGF(infof(data, "RTSP: invalid RTP channel %d, skipping", idx));
+ if(*pconsumed == 0) {
+ /* We did not consume the initial '$' in our buffer, but had
+ * it from an earlier call. We cannot un-consume it and have
+ * to write it directly as BODY data */
+ result = rtp_write_body_junk(data, Curl_dyn_ptr(&rtspc->buf), 1);
+ if(result)
goto out;
- }
}
else {
- /* not BODY, forget the junk '$'. Do not consume this byte,
- * it might be a start */
- infof(data, "RTSP: invalid RTP channel %d, skipping", idx);
- Curl_dyn_free(&rtspc->buf);
+ /* count the '$' as skip and continue */
+ skip_len = 1;
}
+ Curl_dyn_free(&rtspc->buf);
break;
}
/* a valid channel, so we expect this to be a real RTP message */
}
}
out:
+ if(!result && skip_len)
+ result = rtp_write_body_junk(data, (char *)(buf - skip_len), skip_len);
return result;
}
-static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
- struct connectdata *conn,
- const char *buf,
- size_t blen,
- size_t *pconsumed,
- bool *readmore)
+static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data,
+ const char *buf,
+ size_t blen,
+ bool is_eos,
+ bool *done)
{
- struct rtsp_conn *rtspc = &(conn->proto.rtspc);
+ struct rtsp_conn *rtspc = &(data->conn->proto.rtspc);
CURLcode result = CURLE_OK;
size_t consumed = 0;
- bool in_body;
if(!data->req.header)
rtspc->in_header = FALSE;
- in_body = (data->req.headerline && !rtspc->in_header) &&
- (data->req.size >= 0) &&
- (data->req.bytecount < data->req.size);
-
- *readmore = FALSE;
- *pconsumed = 0;
+ *done = FALSE;
if(!blen) {
goto out;
}
+ DEBUGF(infof(data, "rtsp_rtp_write_resp(len=%zu, in_header=%d, eos=%d)",
+ blen, rtspc->in_header, is_eos));
+
/* If header parsing is not onging, extract RTP messages */
if(!rtspc->in_header) {
- result = rtsp_filter_rtp(data, conn, buf, blen, in_body, &consumed);
+ result = rtsp_filter_rtp(data, buf, blen, &consumed);
if(result)
goto out;
- *pconsumed += consumed;
buf += consumed;
blen -= consumed;
+ /* either we consumed all or are at the start of header parsing */
+ DEBUGASSERT(blen == 0 || data->req.header);
}
/* we want to parse headers, do so */
if(data->req.header && blen) {
rtspc->in_header = TRUE;
- result = Curl_http_readwrite_headers(data, conn, buf, blen,
- &consumed);
+ result = Curl_http_write_resp_hds(data, buf, blen, &consumed, done);
if(result)
goto out;
- *pconsumed += consumed;
buf += consumed;
blen -= consumed;
rtspc->in_header = FALSE;
if(!rtspc->in_header) {
- /* If header parsing is done and data left, extract RTP messages */
- in_body = (data->req.headerline && !rtspc->in_header) &&
- (data->req.size >= 0) &&
- (data->req.bytecount < data->req.size);
- result = rtsp_filter_rtp(data, conn, buf, blen, in_body, &consumed);
+ /* If header parsing is done, extract interleaved RTP messages */
+ if((data->set.rtspreq == RTSPREQ_DESCRIBE) && (data->req.size <= -1)) {
+ /* Respect section 4.4 of rfc2326: If the Content-Length header is
+ absent, a length 0 must be assumed. It will prevent libcurl from
+ hanging on DESCRIBE request that got refused for whatever
+ reason */
+ data->req.download_done = TRUE;
+ }
+ result = rtsp_filter_rtp(data, buf, blen, &consumed);
if(result)
goto out;
- *pconsumed += consumed;
+ blen -= consumed;
}
}
if(rtspc->state != RTP_PARSE_SKIP)
- *readmore = TRUE;
+ *done = FALSE;
+ /* we MUST have consumed all bytes */
+ DEBUGF(infof(data, "rtsp_rtp_write_resp(len=%zu, in_header=%d, done=%d)",
+ blen, rtspc->in_header, *done));
+ DEBUGASSERT(blen == 0);
+ if(!result && is_eos) {
+ result = Curl_client_write(data, CLIENTWRITE_BODY|CLIENTWRITE_EOS,
+ (char *)buf, 0);
+ }
out:
- if(!*readmore && data->set.rtspreq == RTSPREQ_RECEIVE) {
+ if((data->set.rtspreq == RTSPREQ_RECEIVE) &&
+ (rtspc->state == RTP_PARSE_SKIP)) {
/* In special mode RECEIVE, we just process one chunk of network
* data, so we stop the transfer here, if we have no incomplete
* RTP message pending. */
- data->req.keepon &= ~KEEP_RECV;
+ data->req.download_done = TRUE;
}
return result;
}
#endif
/* it is one of those, at least */
DEBUGASSERT(type & (CLIENTWRITE_BODY|CLIENTWRITE_HEADER|CLIENTWRITE_INFO));
- /* BODY is only BODY */
- DEBUGASSERT(!(type & CLIENTWRITE_BODY) || (type == CLIENTWRITE_BODY));
- /* INFO is only INFO */
- DEBUGASSERT(!(type & CLIENTWRITE_INFO) || (type == CLIENTWRITE_INFO));
+ /* BODY is only BODY (with optional EOS) */
+ DEBUGASSERT(!(type & CLIENTWRITE_BODY) ||
+ ((type & ~(CLIENTWRITE_BODY|CLIENTWRITE_EOS)) == 0));
+ /* INFO is only INFO (with optional EOS) */
+ DEBUGASSERT(!(type & CLIENTWRITE_INFO) ||
+ ((type & ~(CLIENTWRITE_INFO|CLIENTWRITE_EOS)) == 0));
if(!data->req.writer_stack) {
result = do_init_stack(data);
struct Curl_cwriter *writer, int type,
const char *buf, size_t nbytes)
{
- if(!nbytes)
- return CURLE_OK;
if(!writer)
return CURLE_WRITE_ERROR;
return writer->cwt->do_write(data, writer, type, buf, nbytes);
{
CURLcode result;
size_t nwrite, excess_len = 0;
- const char *excess_data = NULL;
if(!(type & CLIENTWRITE_BODY)) {
if((type & CLIENTWRITE_CONNECT) && data->set.suppress_connect_headers)
return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
}
+ if(!data->req.bytecount) {
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
+ if(data->req.exp100 > EXP100_SEND_DATA)
+ /* set time stamp to compare with when waiting for the 100 */
+ data->req.start100 = Curl_now();
+ }
+
+ /* Here, we deal with REAL BODY bytes. All filtering and transfer
+ * encodings have been applied and only the true content, e.g. BODY,
+ * bytes are passed here.
+ * This allows us to check sizes, update stats, etc. independent
+ * from the protocol in play. */
+
+ if(data->req.no_body && nbytes > 0) {
+ /* BODY arrives although we want none, bail out */
+ streamclose(data->conn, "ignoring body");
+ DEBUGF(infof(data, "did not want a BODY, but seeing %zu bytes",
+ nbytes));
+ data->req.download_done = TRUE;
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
+
+ /* Determine if we see any bytes in excess to what is allowed.
+ * We write the allowed bytes and handle excess further below.
+ * This gives deterministic BODY writes on varying buffer receive
+ * lengths. */
nwrite = nbytes;
if(-1 != data->req.maxdownload) {
size_t wmax = get_max_body_write_len(data, data->req.maxdownload);
if(nwrite > wmax) {
excess_len = nbytes - wmax;
nwrite = wmax;
- excess_data = buf + nwrite;
}
if(nwrite == wmax) {
}
}
+ /* Error on too large filesize is handled below, after writing
+ * the permitted bytes */
if(data->set.max_filesize) {
size_t wmax = get_max_body_write_len(data, data->set.max_filesize);
if(nwrite > wmax) {
}
}
+ /* Update stats, write and report progress */
data->req.bytecount += nwrite;
++data->req.bodywrites;
if(!data->req.ignorebody && nwrite) {
return result;
if(excess_len) {
- if(data->conn->handler->readwrite) {
- /* RTSP hack moved from transfer loop to here */
- bool readmore = FALSE; /* indicates data is incomplete, need more */
- size_t consumed = 0;
- result = data->conn->handler->readwrite(data, data->conn,
- excess_data, excess_len,
- &consumed, &readmore);
- if(result)
- return result;
- DEBUGASSERT(consumed <= excess_len);
- excess_len -= consumed;
- if(readmore) {
- data->req.download_done = FALSE;
- data->req.keepon |= KEEP_RECV; /* we're not done reading */
- }
- }
- if(excess_len && !data->req.ignorebody) {
+ if(!data->req.ignorebody) {
infof(data,
"Excess found writing body:"
" excess = %zu"
return CURLE_OK;
}
+void Curl_cwriter_remove_by_name(struct Curl_easy *data,
+ const char *name)
+{
+ struct Curl_cwriter **anchor = &data->req.writer_stack;
+
+ while(*anchor) {
+ if(!strcmp(name, (*anchor)->cwt->name)) {
+ struct Curl_cwriter *w = (*anchor);
+ *anchor = w->next;
+ Curl_cwriter_free(data, w);
+ continue;
+ }
+ anchor = &((*anchor)->next);
+ }
+}
/*
* Internal read-from-socket function. This is meant to deal with plain
#define CLIENTWRITE_CONNECT (1<<4) /* a CONNECT related HEADER */
#define CLIENTWRITE_1XX (1<<5) /* a 1xx response related HEADER */
#define CLIENTWRITE_TRAILER (1<<6) /* a trailer HEADER */
+#define CLIENTWRITE_EOS (1<<7) /* End Of transfer download Stream */
/**
* Write `len` bytes at `prt` to the client. `type` indicates what
CURLcode Curl_cwriter_add(struct Curl_easy *data,
struct Curl_cwriter *writer);
+void Curl_cwriter_remove_by_name(struct Curl_easy *data,
+ const char *name);
+
/**
* Convenience method for calling `writer->do_write()` that
* checks for NULL writer.
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
smb_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SMB, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
smb_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SMBS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
smtp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SMTP, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
smtp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SMTPS, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_TELNET, /* defport */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
tftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_TFTP, /* defport */
return TRUE;
}
+/**
+ * Receive raw response data for the transfer.
+ * @param data the transfer
+ * @param buf buffer to keep response data received
+ * @param blen length of `buf`
+ * @param eos_reliable if EOS detection in underlying connection is reliable
+ * @param err error code in case of -1 return
+ * @return number of bytes read or -1 for error
+ */
+static ssize_t Curl_xfer_recv_resp(struct Curl_easy *data,
+ char *buf, size_t blen,
+ bool eos_reliable,
+ CURLcode *err)
+{
+ ssize_t nread;
+
+ DEBUGASSERT(blen > 0);
+ /* If we are reading BODY data and the connection does NOT handle EOF
+ * and we know the size of the BODY data, limit the read amount */
+ if(!eos_reliable && !data->req.header && data->req.size != -1) {
+ curl_off_t totalleft = data->req.size - data->req.bytecount;
+ if(totalleft <= 0)
+ blen = 0;
+ else if(totalleft < (curl_off_t)blen)
+ blen = (size_t)totalleft;
+ }
+
+ if(!blen) {
+ /* want nothing - continue as if read nothing. */
+ DEBUGF(infof(data, "readwrite_data: we're done"));
+ *err = CURLE_OK;
+ return 0;
+ }
+
+ *err = Curl_read(data, data->conn->sockfd, buf, blen, &nread);
+ if(*err)
+ return -1;
+ DEBUGASSERT(nread >= 0);
+ *err = CURLE_OK;
+ return nread;
+}
+
/*
* Go ahead and do a read if we have a readable socket or if
* the stream was rewound (in which case we have data in a
* buffer)
*/
static CURLcode readwrite_data(struct Curl_easy *data,
- struct connectdata *conn,
struct SingleRequest *k,
int *didwhat, bool *done)
{
+ struct connectdata *conn = data->conn;
CURLcode result = CURLE_OK;
char *buf;
size_t blen;
- size_t consumed;
int maxloops = 10;
curl_off_t total_received = 0;
- bool data_eof_handled = FALSE;
+ bool is_multiplex = FALSE;
DEBUGASSERT(data->state.buffer);
*done = FALSE;
/* This is where we loop until we have read everything there is to
read or we get a CURLE_AGAIN */
do {
- bool is_empty_data = FALSE;
- size_t bytestoread = data->set.buffer_size;
-
- /* For HTTP/2 and HTTP/3, read data without caring about the content
- length. This is safe because body in HTTP/2 is always segmented
- thanks to its framing layer. Meanwhile, we have to call Curl_read
- to ensure that http2_handle_stream_close is called when we read all
- incoming bytes for a particular stream. */
- bool is_http3 = Curl_conn_is_http3(data, conn, FIRSTSOCKET);
- data_eof_handled = is_http3 || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
-
- if(data->set.max_recv_speed) {
- /* Limit the amount we read here, break on reaching it */
- curl_off_t net_limit = data->set.max_recv_speed - total_received;
- if(net_limit <= 0)
- break;
- if((size_t)net_limit < bytestoread)
- bytestoread = (size_t)net_limit;
+ bool is_eos = FALSE;
+ size_t bytestoread;
+ ssize_t nread;
+
+ if(!is_multiplex) {
+ /* Multiplexed connection have inherent handling of EOF and we do not
+ * have to carefully restrict the amount we try to read.
+ * Multiplexed changes only in one direction. */
+ is_multiplex = Curl_conn_is_multiplex(conn, FIRSTSOCKET);
}
- /* Each loop iteration starts with a fresh buffer and handles
- * all data read into it. */
buf = data->state.buffer;
blen = 0;
+ bytestoread = data->set.buffer_size;
- /* If we are reading BODY data and the connection does NOT handle EOF
- * and we know the size of the BODY data, limit the read amount */
- if(!k->header && !data_eof_handled && k->size != -1) {
- curl_off_t totalleft = k->size - k->bytecount;
- if(totalleft <= 0)
- bytestoread = 0;
- else if(totalleft < (curl_off_t)bytestoread)
- bytestoread = (size_t)totalleft;
+ /* Observe any imposed speed limit */
+ if(bytestoread && data->set.max_recv_speed) {
+ curl_off_t net_limit = data->set.max_recv_speed - total_received;
+ if(net_limit <= 0)
+ break;
+ if((size_t)net_limit < bytestoread)
+ bytestoread = (size_t)net_limit;
}
- if(bytestoread) {
- /* receive data from the network! */
- ssize_t nread; /* number of bytes read */
- result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
+ nread = Curl_xfer_recv_resp(data, buf, bytestoread,
+ is_multiplex, &result);
+ if(nread < 0) {
if(CURLE_AGAIN == result) {
result = CURLE_OK;
break; /* get out of loop */
}
- else if(result)
- goto out;
- DEBUGASSERT(nread >= 0);
- blen = (size_t)nread;
- }
- else {
- /* read nothing but since we wanted nothing we consider this an OK
- situation to proceed from */
- DEBUGF(infof(data, "readwrite_data: we're done"));
- }
-
- if(!k->bytecount) {
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- if(k->exp100 > EXP100_SEND_DATA)
- /* set time stamp to compare with when waiting for the 100 */
- k->start100 = Curl_now();
+ goto out; /* real error */
}
+ /* We only get a 0-length read on EndOfStream */
+ blen = (size_t)nread;
+ is_eos = (blen == 0);
*didwhat |= KEEP_RECV;
- /* indicates data of zero size, i.e. empty file */
- is_empty_data = ((blen == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
-
- if(0 < blen || is_empty_data) {
- /* data->state.buffer is allocated 1 byte larger than
- * data->set.buffer_size admits. *wink* */
- /* TODO: we should really not rely on this being 0-terminated, since
- * the actual data read might contain 0s. */
- buf[blen] = 0;
- }
if(!blen) {
/* if we receive 0 or less here, either the data transfer is done or the
server closed the connection and we bail out from this! */
- if(data_eof_handled)
+ if(is_multiplex)
DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
else
DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
- k->keepon = 0; /* stop sending as well */
- if(!is_empty_data)
- break;
- }
-
- if(conn->handler->readwrite) {
- bool readmore = FALSE; /* indicates data is incomplete, need more */
- consumed = 0;
- result = conn->handler->readwrite(data, conn, buf, blen,
- &consumed, &readmore);
- if(result)
- goto out;
- if(readmore)
- break;
- buf += consumed;
- blen -= consumed;
- if(k->download_done) {
- /* We've stopped dealing with input, get out of the do-while loop */
- if(blen > 0) {
- infof(data,
- "Excess found:"
- " excess = %zu"
- " url = %s (zero-length body)",
- blen, data->state.up.path);
- }
-
- /* we make sure that this socket isn't read more now */
- k->keepon &= ~KEEP_RECV;
+ if(k->eos_written) { /* already did write this to client, leave */
+ k->keepon = 0; /* stop sending as well */
break;
}
}
+ total_received += blen;
-#ifndef CURL_DISABLE_HTTP
- /* Since this is a two-state thing, we check if we are parsing
- headers at the moment or not. */
- if(k->header) {
- consumed = 0;
- result = Curl_http_readwrite_headers(data, conn, buf, blen, &consumed);
- if(result)
- goto out;
- buf += consumed;
- blen -= consumed;
-
- if(conn->handler->readwrite &&
- (k->maxdownload <= 0 && blen > 0)) {
- bool readmore = FALSE; /* indicates data is incomplete, need more */
- consumed = 0;
- result = conn->handler->readwrite(data, conn, buf, blen,
- &consumed, &readmore);
- if(result)
- goto out;
- if(readmore)
- break;
- buf += consumed;
- blen -= consumed;
- }
-
- if(k->download_done) {
- /* We've stopped dealing with input, get out of the do-while loop */
- if(blen > 0) {
- infof(data,
- "Excess found:"
- " excess = %zu"
- " url = %s (zero-length body)",
- blen, data->state.up.path);
- }
-
- /* we make sure that this socket isn't read more now */
- k->keepon &= ~KEEP_RECV;
- break;
- }
- }
-#endif /* CURL_DISABLE_HTTP */
-
-
- /* This is not an 'else if' since it may be a rest from the header
- parsing, where the beginning of the buffer is headers and the end
- is non-headers. */
- if(!k->header && (blen > 0 || is_empty_data)) {
-
- if(data->req.no_body && blen > 0) {
- /* data arrives although we want none, bail out */
- streamclose(conn, "ignoring body");
- DEBUGF(infof(data, "did not want a BODY, but seeing %zu bytes",
- blen));
- *done = TRUE;
- result = CURLE_WEIRD_SERVER_REPLY;
- goto out;
- }
-
-#ifndef CURL_DISABLE_HTTP
- if(0 == k->bodywrites && !is_empty_data) {
- /* These checks are only made the first time we are about to
- write a piece of the body */
- if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
- /* HTTP-only checks */
- result = Curl_http_firstwrite(data, conn, done);
- if(result || *done)
- goto out;
- }
- } /* this is the first time we write a body part */
-#endif /* CURL_DISABLE_HTTP */
-
-#ifndef CURL_DISABLE_HTTP
- if(k->chunk) {
- /*
- * Here comes a chunked transfer flying and we need to decode this
- * properly. While the name says read, this function both reads
- * and writes away the data.
- */
- CURLcode extra;
- CHUNKcode res;
-
- consumed = 0;
- res = Curl_httpchunk_read(data, buf, blen, &consumed, &extra);
-
- if(CHUNKE_OK < res) {
- if(CHUNKE_PASSTHRU_ERROR == res) {
- failf(data, "Failed reading the chunked-encoded stream");
- result = extra;
- goto out;
- }
- failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
- result = CURLE_RECV_ERROR;
- goto out;
- }
-
- buf += consumed;
- blen -= consumed;
- if(CHUNKE_STOP == res) {
- /* we're done reading chunks! */
- k->keepon &= ~KEEP_RECV; /* read no more */
- /* chunks read successfully, download is complete */
- k->download_done = TRUE;
-
- /* N number of bytes at the end of the str buffer that weren't
- written to the client. */
- if(conn->chunk.datasize) {
- infof(data, "Leftovers after chunking: % "
- CURL_FORMAT_CURL_OFF_T "u bytes",
- conn->chunk.datasize);
- }
- }
- /* If it returned OK, we just keep going */
- }
-#endif /* CURL_DISABLE_HTTP */
-
- total_received += blen;
-
- if(!k->chunk && (blen || k->badheader || is_empty_data)) {
- /* If this is chunky transfer, it was already written */
-
- if(k->badheader) {
- /* we parsed a piece of data wrongly assuming it was a header
- and now we output it as body instead */
- size_t headlen = Curl_dyn_len(&data->state.headerb);
-
- /* Don't let excess data pollute body writes */
- if(k->maxdownload != -1 && (curl_off_t)headlen > k->maxdownload)
- headlen = (size_t)k->maxdownload;
-
- result = Curl_client_write(data, CLIENTWRITE_BODY,
- Curl_dyn_ptr(&data->state.headerb),
- headlen);
- if(result)
- goto out;
- }
-
- if(blen) {
-#ifndef CURL_DISABLE_POP3
- if(conn->handler->protocol & PROTO_FAMILY_POP3) {
- result = k->ignorebody? CURLE_OK :
- Curl_pop3_write(data, buf, blen);
- }
- else
-#endif /* CURL_DISABLE_POP3 */
- result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen);
- }
- k->badheader = FALSE; /* taken care of now */
-
- if(result)
- goto out;
- }
-
- if(k->download_done && !is_http3) {
- /* HTTP/3 over QUIC should keep reading until QUIC connection
- is closed. In contrast to HTTP/2 which can stop reading
- from TCP connection, HTTP/3 over QUIC needs ACK from server
- to ensure stream closure. It should keep reading. */
- k->keepon &= ~KEEP_RECV; /* we're done reading */
- }
- } /* if(!header and data to read) */
+ result = Curl_xfer_write_resp(data, buf, blen, is_eos, done);
+ if(result || *done)
+ goto out;
- if(is_empty_data) {
- /* if we received nothing, the server closed the connection and we
- are done */
- k->keepon &= ~KEEP_RECV;
- k->download_done = TRUE;
+ /* if we are done, we stop receiving. On multiplexed connections,
+ * we should read the EOS. Which may arrive as meta data after
+ * the bytes. Not taking it in might lead to RST of streams. */
+ if((!is_multiplex && data->req.download_done) || is_eos) {
+ data->req.keepon &= ~KEEP_RECV;
}
-
- if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV)) {
- /* this is a paused or stopped transfer */
+ /* if we are PAUSEd or stopped receiving, leave the loop */
+ if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV))
break;
- }
} while(maxloops-- && data_pending(data));
}
if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
- (conn->bits.close || data_eof_handled)) {
+ (conn->bits.close || is_multiplex)) {
/* When we've read the entire thing and the close bit is set, the server
may now close the connection. If there's now any kind of sending going
on from our side, we need to stop that immediately. */
* Curl_readwrite() is the low-level function to be called when data is to
* be read and written to/from the connection.
*/
-CURLcode Curl_readwrite(struct connectdata *conn,
- struct Curl_easy *data,
+CURLcode Curl_readwrite(struct Curl_easy *data,
bool *done)
{
+ struct connectdata *conn = data->conn;
struct SingleRequest *k = &data->req;
CURLcode result;
struct curltime now;
the stream was rewound (in which case we have data in a
buffer) */
if((k->keepon & KEEP_RECV) && (select_bits & CURL_CSELECT_IN)) {
- result = readwrite_data(data, conn, k, &didwhat, done);
+ result = readwrite_data(data, k, &didwhat, done);
if(result || *done)
goto out;
}
result = CURLE_PARTIAL_FILE;
goto out;
}
- if(!(data->req.no_body) && k->chunk &&
- (conn->chunk.state != CHUNK_STOP)) {
- /*
- * In chunked mode, return an error if the connection is closed prior to
- * the empty (terminating) chunk is read.
- *
- * The condition above used to check for
- * conn->proto.http->chunk.datasize != 0 which is true after reading
- * *any* chunk, not just the empty chunk.
- *
- */
- failf(data, "transfer closed with outstanding read data remaining");
- result = CURLE_PARTIAL_FILE;
- goto out;
- }
if(Curl_pgrsUpdate(data)) {
result = CURLE_ABORTED_BY_CALLBACK;
goto out;
} /* if(k->getheader || !data->req.no_body) */
}
+
+CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
+ char *buf, size_t blen,
+ bool is_eos, bool *done)
+{
+ CURLcode result = CURLE_OK;
+
+ if(data->conn->handler->write_resp) {
+ /* protocol handlers offering this function take full responsibility
+ * for writing all received download data to the client. */
+ result = data->conn->handler->write_resp(data, buf, blen, is_eos, done);
+ }
+ else {
+ /* No special handling by protocol handler, write all received data
+ * as BODY to the client. */
+ if(blen || is_eos) {
+ int cwtype = CLIENTWRITE_BODY;
+ if(is_eos)
+ cwtype |= CLIENTWRITE_EOS;
+
+#ifndef CURL_DISABLE_POP3
+ if(blen && data->conn->handler->protocol & PROTO_FAMILY_POP3) {
+ result = data->req.ignorebody? CURLE_OK :
+ Curl_pop3_write(data, buf, blen);
+ }
+ else
+#endif /* CURL_DISABLE_POP3 */
+ result = Curl_client_write(data, cwtype, buf, blen);
+ }
+ }
+
+ if(!result && is_eos) {
+ /* If we wrote the EOS, we are definitely done */
+ data->req.eos_written = TRUE;
+ data->req.download_done = TRUE;
+ }
+ return result;
+}
CURLcode Curl_follow(struct Curl_easy *data, char *newurl,
followtype type);
-CURLcode Curl_readwrite(struct connectdata *conn,
- struct Curl_easy *data, bool *done);
+CURLcode Curl_readwrite(struct Curl_easy *data, bool *done);
+int Curl_single_getsock(struct Curl_easy *data,
+ struct connectdata *conn, curl_socket_t *socks);
CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
size_t *nreadp);
CURLcode Curl_retry_request(struct Curl_easy *data, char **url);
CURLcode Curl_done_sending(struct Curl_easy *data,
struct SingleRequest *k);
+/**
+ * Write the transfer raw response bytes, as received from the connection.
+ * Will handle all passed bytes or return an error. By default, this will
+ * write the bytes as BODY to the client. Protocols may provide a
+ * "write_resp" callback in their handler to add specific treatment. E.g.
+ * HTTP parses response headers and passes them differently to the client.
+ * @param data the transfer
+ * @param buf the raw response bytes
+ * @param blen the amount of bytes in `buf`
+ * @param is_eos TRUE iff the connection indicates this to be the last
+ * bytes of the response
+ * @param done on returnm, TRUE iff the response is complete
+ */
+CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
+ char *buf, size_t blen,
+ bool is_eos, bool *done);
+
/* This sets up a forthcoming transfer */
void
Curl_setup_transfer (struct Curl_easy *data,
Curl_safefree(conn->sasl_authzid);
Curl_safefree(conn->options);
Curl_safefree(conn->oauth_bearer);
-#ifndef CURL_DISABLE_HTTP
- Curl_dyn_free(&conn->trailer);
-#endif
Curl_safefree(conn->host.rawalloc); /* host name buffer */
Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */
Curl_safefree(conn->hostname_resolve);
enum expect100 exp100; /* expect 100 continue state */
enum upgrade101 upgr101; /* 101 upgrade state */
- /* Content unencoding stack. See sec 3.5, RFC2616. */
+ /* Client Writer stack, handles trasnfer- and content-encodings, protocol
+ * checks, pausing by client callbacks. */
struct Curl_cwriter *writer_stack;
time_t timeofdoc;
long bodywrites;
#ifndef CURL_DISABLE_COOKIES
unsigned char setcookies;
#endif
- unsigned char writer_stack_depth; /* Unencoding stack depth. */
BIT(header); /* incoming data has HTTP header */
- BIT(badheader); /* header parsing found sth not a header */
BIT(content_range); /* set TRUE if Content-Range: was found */
BIT(download_done); /* set to TRUE when download is complete */
+ BIT(eos_written); /* iff EOS has been written to client */
BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
upload and we're uploading the last chunk */
BIT(ignorebody); /* we read a response-body but we ignore it! */
bool dead_connection);
/* If used, this function gets called from transfer.c:readwrite_data() to
- allow the protocol to do extra reads/writes */
- CURLcode (*readwrite)(struct Curl_easy *data, struct connectdata *conn,
- const char *buf, size_t blen,
- size_t *pconsumed, bool *readmore);
+ allow the protocol to do extra handling in writing response to
+ the client. */
+ CURLcode (*write_resp)(struct Curl_easy *data, const char *buf, size_t blen,
+ bool is_eos, bool *done);
/* This function can perform various checks on the connection. See
CONNCHECK_* for more information about the checks that can be performed,
struct connectdata {
struct Curl_llist_element bundle_node; /* conncache */
- /* chunk is for HTTP chunked encoding, but is in the general connectdata
- struct only because we can do just about any protocol through an HTTP
- proxy and an HTTP proxy may in fact respond using chunked encoding */
- struct Curl_chunker chunk;
-
curl_closesocket_callback fclosesocket; /* function closing the socket(s) */
void *closesocket_client;
struct negotiatedata proxyneg; /* state data for proxy Negotiate auth */
#endif
-#ifndef CURL_DISABLE_HTTP
- /* for chunked-encoded trailer */
- struct dynbuf trailer;
-#endif
-
union {
#ifndef CURL_DISABLE_FTP
struct ftp_conn ftpc;
ZERO_NULL, /* domore_getsock */
myssh_getsock, /* perform_getsock */
scp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SSH, /* defport */
ZERO_NULL, /* domore_getsock */
myssh_getsock, /* perform_getsock */
sftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SSH, /* defport */
ZERO_NULL, /* domore_getsock */
ssh_getsock, /* perform_getsock */
scp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ssh_attach, /* attach */
PORT_SSH, /* defport */
ZERO_NULL, /* domore_getsock */
ssh_getsock, /* perform_getsock */
sftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ssh_attach, /* attach */
PORT_SSH, /* defport */
ZERO_NULL, /* domore_getsock */
wssh_getsock, /* perform_getsock */
wscp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SSH, /* defport */
ZERO_NULL, /* domore_getsock */
wssh_getsock, /* perform_getsock */
wsftp_disconnect, /* disconnect */
- ZERO_NULL, /* readwrite */
+ ZERO_NULL, /* write_resp */
ZERO_NULL, /* connection_check */
ZERO_NULL, /* attach connection */
PORT_SSH, /* defport */
my %wl = (
'curlx_uztoso' => 'cmdline tool use',
+ 'Curl_xfer_write_resp' => 'internal api',
);
my %api = (
test_setopt(curl, CURLOPT_READDATA, sdpf);
test_setopt(curl, CURLOPT_UPLOAD, 1L);
test_setopt(curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t) file_info.st_size);
+ test_setopt(curl, CURLOPT_VERBOSE, 1L);
/* Do the ANNOUNCE */
res = curl_easy_perform(curl);