Curl_safefree(data->req.newurl);
}
#endif
- if(data->state.expect100header) {
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+ if(Curl_http_exp100_is_selected(data)) {
if(data->req.httpcode < 400) {
- k->exp100 = EXP100_SEND_DATA;
- if(data->hyp.exp100_waker) {
- hyper_waker_wake(data->hyp.exp100_waker);
- data->hyp.exp100_waker = NULL;
+ Curl_http_exp100_got100(data);
+ if(data->hyp.send_body_waker) {
+ hyper_waker_wake(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = NULL;
}
}
else { /* >= 4xx */
- k->exp100 = EXP100_FAILED;
+ Curl_req_abort_sending(data);
}
}
if(data->state.hconnect && (data->req.httpcode/100 != 2) &&
struct SingleRequest *k = &data->req;
(void)conn;
- if(k->exp100 > EXP100_SEND_DATA) {
- struct curltime now = Curl_now();
- timediff_t ms = Curl_timediff(now, k->start100);
- if(ms >= data->set.expect_100_timeout) {
- /* we've waited long enough, continue anyway */
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- infof(data, "Done waiting for 100-continue after %ldms", (long)ms);
- if(data->hyp.exp100_waker) {
- hyper_waker_wake(data->hyp.exp100_waker);
- data->hyp.exp100_waker = NULL;
- }
- }
+ if(data->hyp.send_body_waker) {
+ hyper_waker_wake(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = NULL;
}
if(select_res & CURL_CSELECT_IN) {
reasonp = hyper_response_reason_phrase(resp);
reason_len = hyper_response_reason_phrase_len(resp);
- if(http_status == 417 && data->state.expect100header) {
+ if(http_status == 417 && Curl_http_exp100_is_selected(data)) {
infof(data, "Got 417 while waiting for a 100");
data->state.disableexpect = TRUE;
data->req.newurl = strdup(data->state.url);
int rc = HYPER_POLL_ERROR;
(void)ctx;
- if(data->req.exp100 > EXP100_SEND_DATA) {
- if(data->req.exp100 == EXP100_FAILED)
- return HYPER_POLL_ERROR;
-
- /* still waiting confirmation */
- if(data->hyp.exp100_waker)
- hyper_waker_free(data->hyp.exp100_waker);
- data->hyp.exp100_waker = hyper_context_waker(ctx);
- return HYPER_POLL_PENDING;
- }
-
result = Curl_multi_xfer_ulbuf_borrow(data, &xfer_ulbuf, &xfer_ulblen);
if(result)
goto out;
goto error;
}
}
- else {
- if(!data->state.disableexpect) {
- data->state.expect100header = TRUE;
- }
- }
if(hyper_request_set_method(req, (uint8_t *)method, strlen(method))) {
failf(data, "error setting method");
if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) {
/* HTTP GET/HEAD download */
Curl_pgrsSetUploadSize(data, 0); /* nothing */
- Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, -1);
}
+
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
conn->datastream = Curl_hyper_stream;
- if(data->state.expect100header)
- /* Timeout count starts now since with Hyper we don't know exactly when
- the full request has been sent. */
- data->req.start100 = Curl_now();
/* clear userpwd and proxyuserpwd to avoid reusing old credentials
* from reused connections */
hyper_waker_free(h->write_waker);
h->write_waker = NULL;
}
- if(h->exp100_waker) {
- hyper_waker_free(h->exp100_waker);
- h->exp100_waker = NULL;
- }
if(h->send_body_waker) {
hyper_waker_free(h->send_body_waker);
h->send_body_waker = NULL;
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
cr_hyper_unpause,
+ Curl_creader_def_done,
sizeof(struct Curl_creader)
};
hyper_waker *write_waker;
hyper_waker *read_waker;
const hyper_executor *exec;
- hyper_waker *exp100_waker;
hyper_waker *send_body_waker;
struct hyp_io_ctx io_ctx;
};
*/
static bool http_should_fail(struct Curl_easy *data);
+static bool http_exp100_is_waiting(struct Curl_easy *data);
+static CURLcode http_exp100_add_reader(struct Curl_easy *data);
+static void http_exp100_send_anyway(struct Curl_easy *data);
/*
* HTTP handler interface.
}
#endif
-/* check and possibly add an Expect: header */
-static CURLcode expect100(struct Curl_easy *data, struct dynbuf *req)
-{
- CURLcode result = CURLE_OK;
- if(!data->state.disableexpect &&
- Curl_use_http_1_1plus(data, data->conn) &&
- (data->conn->httpversion < 20)) {
- /* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
- Expect: 100-continue to the headers which actually speeds up post
- operations (as there is one packet coming back from the web server) */
- const char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
- }
- else {
- result = Curl_dyn_addn(req, STRCONST("Expect: 100-continue\r\n"));
- if(!result)
- data->state.expect100header = TRUE;
- }
- }
-
- return result;
-}
-
enum proxy_use {
HEADER_SERVER, /* direct to server */
HEADER_PROXY, /* regular request to proxy */
return result;
}
-static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r)
+static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r,
+ bool *announced_exp100)
{
- data->state.expect100header = FALSE;
+ CURLcode result;
+ char *ptr;
+
+ *announced_exp100 = FALSE;
/* Avoid Expect: 100-continue if Upgrade: is used */
- if(data->req.upgr101 == UPGR101_INIT) {
- /* For really small puts we don't use Expect: headers at all, and for
- the somewhat bigger ones we allow the app to disable it. Just make
- sure that the expect100header is always set to the preferred value
- here. */
- char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, STRCONST("Expect:"),
- STRCONST("100-continue"));
+ if(data->req.upgr101 != UPGR101_INIT)
+ return CURLE_OK;
+
+ /* For really small puts we don't use Expect: headers at all, and for
+ the somewhat bigger ones we allow the app to disable it. Just make
+ sure that the expect100header is always set to the preferred value
+ here. */
+ ptr = Curl_checkheaders(data, STRCONST("Expect"));
+ if(ptr) {
+ *announced_exp100 =
+ Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
+ }
+ else if(!data->state.disableexpect &&
+ Curl_use_http_1_1plus(data, data->conn) &&
+ (data->conn->httpversion < 20)) {
+ /* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
+ Expect: 100-continue to the headers which actually speeds up post
+ operations (as there is one packet coming back from the web server) */
+ curl_off_t client_len = Curl_creader_client_length(data);
+ if(client_len > EXPECT_100_THRESHOLD || client_len < 0) {
+ result = Curl_dyn_addn(r, STRCONST("Expect: 100-continue\r\n"));
+ if(result)
+ return result;
+ *announced_exp100 = TRUE;
}
- else {
- curl_off_t client_len = Curl_creader_client_length(data);
- if(client_len > EXPECT_100_THRESHOLD || client_len < 0)
- return expect100(data, r);
- }
}
return CURLE_OK;
}
{
CURLcode result = CURLE_OK;
curl_off_t req_clen;
+ bool announced_exp100 = FALSE;
DEBUGASSERT(data->conn);
#ifndef USE_HYPER
goto out;
}
}
- result = addexpect(data, r);
+ result = addexpect(data, r, &announced_exp100);
if(result)
goto out;
break;
/* end of headers */
result = Curl_dyn_addn(r, STRCONST("\r\n"));
Curl_pgrsSetUploadSize(data, req_clen);
+ if(announced_exp100)
+ result = http_exp100_add_reader(data);
out:
if(!result) {
k->headerline = 0; /* restart the header line counter */
/* if we did wait for this do enable write now! */
- if(k->exp100 > EXP100_SEND_DATA) {
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- }
+ Curl_http_exp100_got100(data);
break;
case 101:
if(conn->httpversion == 11) {
* request body has been sent we stop sending and mark the
* connection for closure after we've read the entire response.
*/
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
if(!Curl_req_done_sending(data)) {
- if((k->httpcode == 417) && data->state.expect100header) {
+ if((k->httpcode == 417) && Curl_http_exp100_is_selected(data)) {
/* 417 Expectation Failed - try again without the Expect
header */
- if(!k->writebytecount &&
- k->exp100 == EXP100_AWAITING_CONTINUE) {
+ if(!k->writebytecount && http_exp100_is_waiting(data)) {
infof(data, "Got HTTP failure 417 while waiting for a 100");
}
else {
}
else if(data->set.http_keep_sending_on_error) {
infof(data, "HTTP error before end of send, keep sending");
- if(k->exp100 > EXP100_SEND_DATA) {
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- }
+ http_exp100_send_anyway(data);
}
else {
infof(data, "HTTP error before end of send, stop sending");
result = Curl_req_abort_sending(data);
if(result)
return result;
- if(data->state.expect100header)
- k->exp100 = EXP100_FAILED;
}
}
break;
}
}
+struct cr_exp100_ctx {
+ struct Curl_creader super;
+ struct curltime start; /* time started waiting */
+ enum expect100 state;
+};
+
+/* Expect: 100-continue client reader, blocking uploads */
+
+static void http_exp100_continue(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ if(ctx->state > EXP100_SEND_DATA) {
+ ctx->state = EXP100_SEND_DATA;
+ data->req.keepon |= KEEP_SEND;
+ data->req.keepon &= ~KEEP_SEND_TIMED;
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+ }
+}
+
+static CURLcode cr_exp100_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *nread, bool *eos)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ timediff_t ms;
+
+ switch(ctx->state) {
+ case EXP100_SENDING_REQUEST:
+ /* We are now waiting for a reply from the server or
+ * a timeout on our side */
+ DEBUGF(infof(data, "cr_exp100_read, start AWAITING_CONTINUE"));
+ ctx->state = EXP100_AWAITING_CONTINUE;
+ ctx->start = Curl_now();
+ Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
+ data->req.keepon &= ~KEEP_SEND;
+ data->req.keepon |= KEEP_SEND_TIMED;
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_OK;
+ case EXP100_FAILED:
+ DEBUGF(infof(data, "cr_exp100_read, expectation failed, error"));
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_READ_ERROR;
+ case EXP100_AWAITING_CONTINUE:
+ ms = Curl_timediff(Curl_now(), ctx->start);
+ if(ms < data->set.expect_100_timeout) {
+ DEBUGF(infof(data, "cr_exp100_read, AWAITING_CONTINUE, not expired"));
+ data->req.keepon &= ~KEEP_SEND;
+ data->req.keepon |= KEEP_SEND_TIMED;
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_OK;
+ }
+ /* we've waited long enough, continue anyway */
+ http_exp100_continue(data, reader);
+ infof(data, "Done waiting for 100-continue");
+ FALLTHROUGH();
+ default:
+ DEBUGF(infof(data, "cr_exp100_read, pass through"));
+ return Curl_creader_read(data, reader->next, buf, blen, nread, eos);
+ }
+}
+
+static void cr_exp100_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ ctx->state = premature? EXP100_FAILED : EXP100_SEND_DATA;
+ data->req.keepon &= ~KEEP_SEND_TIMED;
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+}
+
+static const struct Curl_crtype cr_exp100 = {
+ "cr-exp100",
+ Curl_creader_def_init,
+ cr_exp100_read,
+ Curl_creader_def_close,
+ Curl_creader_def_needs_rewind,
+ Curl_creader_def_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ cr_exp100_done,
+ sizeof(struct cr_exp100_ctx)
+};
+
+static CURLcode http_exp100_add_reader(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
+
+ result = Curl_creader_create(&reader, data, &cr_exp100,
+ CURL_CR_PROTOCOL);
+ if(!result)
+ result = Curl_creader_add(data, reader);
+ if(!result) {
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ ctx->state = EXP100_SENDING_REQUEST;
+ }
+
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
+}
+
+void Curl_http_exp100_got100(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r)
+ http_exp100_continue(data, r);
+}
+
+static bool http_exp100_is_waiting(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r) {
+ struct cr_exp100_ctx *ctx = r->ctx;
+ return (ctx->state == EXP100_AWAITING_CONTINUE);
+ }
+ return FALSE;
+}
+
+static void http_exp100_send_anyway(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r)
+ http_exp100_continue(data, r);
+}
+
+bool Curl_http_exp100_is_selected(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ return r? TRUE : FALSE;
+}
+
#endif /* CURL_DISABLE_HTTP */
version. This count includes CONNECT response headers. */
#define MAX_HTTP_RESP_HEADER_SIZE (300*1024)
+bool Curl_http_exp100_is_selected(struct Curl_easy *data);
+void Curl_http_exp100_got100(struct Curl_easy *data);
+
#endif /* CURL_DISABLE_HTTP */
/****************************************************************************
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct chunked_reader)
};
cr_mime_resume_from,
cr_mime_rewind,
cr_mime_unpause,
+ Curl_creader_def_done,
sizeof(struct cr_mime_ctx)
};
req->offset = 0;
req->httpcode = 0;
req->keepon = 0;
- req->start100 = t0;
- req->exp100 = EXP100_SEND_DATA;
req->upgr101 = UPGR101_INIT;
req->timeofdoc = 0;
req->bodywrites = 0;
Curl_bufq_skip(&data->req.sendbuf, nwritten);
if(hds_len) {
data->req.sendbuf_hds_len -= CURLMIN(hds_len, nwritten);
- if(!data->req.sendbuf_hds_len) {
- /* all request headers sent */
- if(data->req.exp100 == EXP100_SENDING_REQUEST) {
- /* We are now waiting for a reply from the server or
- * a timeout on our side */
- data->req.exp100 = EXP100_AWAITING_CONTINUE;
- data->req.start100 = Curl_now();
- Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
- }
- }
}
/* leave if we could not send all. Maybe network blocking or
* speed limits on transfer */
{
DEBUGASSERT(!data->req.upload_done);
data->req.upload_done = TRUE;
- data->req.keepon &= ~KEEP_SEND; /* we're done sending */
+ data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we're done sending */
- /* FIXME: http specific stuff, need to go somewhere else */
- data->req.exp100 = EXP100_SEND_DATA;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+ Curl_creader_done(data, data->req.upload_aborted);
if(data->req.upload_aborted) {
if(data->req.writebytecount)
{
CURLcode result;
- /* Fill our send buffer if more from client can be read and
- * we are not in a "expect-100" situation. */
- if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf) &&
- (data->req.exp100 == EXP100_SEND_DATA)) {
+ /* Fill our send buffer if more from client can be read. */
+ if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf)) {
ssize_t nread = Curl_bufq_sipn(&data->req.sendbuf, 0,
add_from_client, data, &result);
if(nread < 0 && result != CURLE_AGAIN)
int httpcode; /* error code from the 'HTTP/1.? XXX' or
'RTSP/1.? XXX' line */
int keepon;
- struct curltime start100; /* time stamp to wait for the 100 code from */
- enum expect100 exp100; /* expect 100 continue state */
enum upgrade101 upgr101; /* 101 upgrade state */
/* Client Writer stack, handles transfer- and content-encodings, protocol
goto out;
}
}
-
- data->state.expect100header = FALSE; /* RTSP posts are simple/small */
}
else if(rtspreq == RTSPREQ_GET_PARAMETER) {
/* Check for an empty GET_PARAMETER (heartbeat) request */
return CURLE_OK;
}
+void Curl_creader_def_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature)
+{
+ (void)data;
+ (void)reader;
+ (void)premature;
+}
+
struct cr_in_ctx {
struct Curl_creader super;
curl_read_callback read_cb;
cr_in_resume_from,
cr_in_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct cr_in_ctx)
};
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct cr_lc_ctx)
};
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct Curl_creader)
};
cr_buf_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct cr_buf_ctx)
};
}
return result;
}
+
+void Curl_creader_done(struct Curl_easy *data, int premature)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+ while(reader) {
+ reader->crt->done(data, reader, premature);
+ reader = reader->next;
+ }
+}
+
+struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
+ const struct Curl_crtype *crt)
+{
+ struct Curl_creader *r;
+ for(r = data->req.reader_stack; r; r = r->next) {
+ if(r->crt == crt)
+ return r;
+ }
+ return NULL;
+
+}
struct Curl_creader *reader, curl_off_t offset);
CURLcode (*rewind)(struct Curl_easy *data, struct Curl_creader *reader);
CURLcode (*unpause)(struct Curl_easy *data, struct Curl_creader *reader);
+ void (*done)(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature);
size_t creader_size; /* sizeof() allocated struct Curl_creader */
};
struct Curl_creader *reader);
CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
struct Curl_creader *reader);
+void Curl_creader_def_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature);
/**
* Convenience method for calling `reader->do_read()` that
*/
CURLcode Curl_creader_unpause(struct Curl_easy *data);
+/**
+ * Tell all client readers that they are done.
+ */
+void Curl_creader_done(struct Curl_easy *data, int premature);
+
+/**
+ * Look up an installed client reader on `data` by its type.
+ * @return first reader with that type or NULL
+ */
+struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
+ const struct Curl_crtype *crt);
+
+
/**
* Set the client reader to provide 0 bytes, immediate EOS.
*/
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_done,
sizeof(struct cr_eob_ctx)
};
}
/* If we still have writing to do, we check if we have a writable socket. */
- if((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) {
+ if(((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) ||
+ (k->keepon & KEEP_SEND_TIMED)) {
/* write */
result = readwrite_upload(data, &didwhat);
now = Curl_now();
if(!didwhat) {
- /* no read no write, this is a timeout? */
- if(k->exp100 == EXP100_AWAITING_CONTINUE) {
- /* This should allow some time for the header to arrive, but only a
- very short time as otherwise it'll be too much wasted time too
- often. */
-
- /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
-
- Therefore, when a client sends this header field to an origin server
- (possibly via a proxy) from which it has never seen a 100 (Continue)
- status, the client SHOULD NOT wait for an indefinite period before
- sending the request body.
-
- */
-
- timediff_t ms = Curl_timediff(now, k->start100);
- if(ms >= data->set.expect_100_timeout) {
- /* we've waited long enough, continue anyway */
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- infof(data, "Done waiting for 100-continue");
- }
- }
-
result = Curl_conn_ev_data_idle(data);
if(result)
goto out;
if(sockindex != -1)
k->keepon |= KEEP_RECV;
- if(writesockindex != -1) {
- /* HTTP 1.1 magic:
-
- Even if we require a 100-return code before uploading data, we might
- need to write data before that since the REQUEST may not have been
- finished sent off just yet.
-
- Thus, we must check if the request has been sent before we set the
- state info where we wait for the 100-return code
- */
- if((data->state.expect100header) &&
- (conn->handler->protocol&PROTO_FAMILY_HTTP)) {
- /* wait with write until we either got 100-continue or a timeout */
- k->exp100 = EXP100_AWAITING_CONTINUE;
- k->start100 = Curl_now();
-
- /* Set a timeout for the multi interface. Add the inaccuracy margin so
- that we don't fire slightly too early and get denied to run. */
- Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
- }
- else {
- if(data->state.expect100header)
- /* when we've sent off the rest of the headers, we must await a
- 100-continue but first finish sending the request */
- k->exp100 = EXP100_SENDING_REQUEST;
-
- /* enable the write bit when we're not waiting for continue */
- k->keepon |= KEEP_SEND;
- }
- } /* if(writesockindex != -1) */
+ if(writesockindex != -1)
+ k->keepon |= KEEP_SEND;
} /* if(k->getheader || !data->req.no_body) */
}
}
data->state.done = FALSE; /* *_done() is not called yet */
- data->state.expect100header = FALSE;
if(data->req.no_body)
/* in HTTP lingo, no body means using the HEAD request... */
#define KEEP_RECV_PAUSE (1<<4) /* reading is paused */
#define KEEP_SEND_PAUSE (1<<5) /* writing is paused */
+/* KEEP_SEND_TIMED is set when the transfer should attempt sending
+ * at timer (or other) events. A transfer waiting on a timer will
+ * remove KEEP_SEND to suppress POLLOUTs of the connection.
+ * Adding KEEP_SEND_TIMED will then attempt to send whenever the transfer
+ * enters the "readwrite" loop, e.g. when a timer fires.
+ * This is used in HTTP for 'Expect: 100-continue' waiting. */
+#define KEEP_SEND_TIMED (1<<6)
+
#define KEEP_RECVBITS (KEEP_RECV | KEEP_RECV_HOLD | KEEP_RECV_PAUSE)
#define KEEP_SENDBITS (KEEP_SEND | KEEP_SEND_HOLD | KEEP_SEND_PAUSE)
BIT(authproblem); /* TRUE if there's some problem authenticating */
/* set after initial USER failure, to prevent an authentication loop */
BIT(wildcardmatch); /* enable wildcard matching */
- BIT(expect100header); /* TRUE if we added Expect: 100-continue */
BIT(disableexpect); /* TRUE if Expect: is disabled due to a previous
417 response */
BIT(use_range);
env.make_data_file(indir=env.gen_dir, fname="data-63k", fsize=63*1024)
env.make_data_file(indir=env.gen_dir, fname="data-64k", fsize=64*1024)
env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024)
+ env.make_data_file(indir=env.gen_dir, fname="data-1m+", fsize=(1024*1024)+1)
env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024)
httpd.clear_extra_configs()
httpd.reload()
up_speed = r.stats[0]['speed_upload']
assert (speed_limit * 0.5) <= up_speed <= (speed_limit * 1.5), f'{r.stats[0]}'
+ # upload larger data, triggering "Expect: 100-continue" code paths
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_07_60_upload_exp100(self, env: Env, httpd, nghttpx, repeat, proto):
+ fdata = os.path.join(env.gen_dir, 'data-1m+')
+ read_delay = 1
+ curl = CurlClient(env=env)
+ url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-0]'\
+ f'&read_delay={read_delay}s'
+ r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=[
+ '--expect100-timeout', f'{read_delay+1}'
+ ])
+ r.check_stats(count=1, http_status=200, exitcode=0)
+
+ # upload larger data, triggering "Expect: 100-continue" code paths
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_07_61_upload_exp100_timeout(self, env: Env, httpd, nghttpx, repeat, proto):
+ fdata = os.path.join(env.gen_dir, 'data-1m+')
+ read_delay = 2
+ curl = CurlClient(env=env)
+ url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-0]'\
+ f'&read_delay={read_delay}s'
+ r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=[
+ '--expect100-timeout', f'{read_delay-1}'
+ ])
+ r.check_stats(count=1, http_status=200, exitcode=0)
+
apr_off_t rbody_len = 0;
const char *s_rbody_len;
const char *request_id = "none";
- apr_time_t chunk_delay = 0;
+ apr_time_t read_delay = 0, chunk_delay = 0;
apr_array_header_t *args = NULL;
long l;
int i;
request_id = val;
continue;
}
+ else if(!strcmp("read_delay", arg)) {
+ rv = duration_parse(&read_delay, val, "s");
+ if(APR_SUCCESS == rv) {
+ continue;
+ }
+ }
else if(!strcmp("chunk_delay", arg)) {
rv = duration_parse(&chunk_delay, val, "s");
if(APR_SUCCESS == rv) {
ct = apr_table_get(r->headers_in, "content-type");
ap_set_content_type(r, ct? ct : "text/plain");
+ if(read_delay) {
+ apr_sleep(read_delay);
+ }
bb = apr_brigade_create(r->pool, c->bucket_alloc);
/* copy any request body into the response */
if((rv = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK))) goto cleanup;