--- /dev/null
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER)
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#ifdef HAVE_NET_IF_H
+#include <net/if.h>
+#endif
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#include <hyper.h>
+#include "urldata.h"
+#include "sendf.h"
+#include "transfer.h"
+#include "multiif.h"
+#include "progress.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+static size_t read_cb(void *userp, hyper_context *ctx,
+ uint8_t *buf, size_t buflen)
+{
+ struct connectdata *conn = (struct connectdata *)userp;
+ struct Curl_easy *data = conn->data;
+ CURLcode result;
+ ssize_t nread;
+
+ (void)ctx;
+
+ result = Curl_read(conn, conn->sockfd, (char *)buf, buflen, &nread);
+ if(result == CURLE_AGAIN) {
+ /* would block, register interest */
+ if(data->hyp.read_waker)
+ hyper_waker_free(data->hyp.read_waker);
+ data->hyp.read_waker = hyper_context_waker(ctx);
+ if(!data->hyp.read_waker) {
+ failf(data, "Couldn't make the read hyper_context_waker");
+ return HYPER_IO_ERROR;
+ }
+ return HYPER_IO_PENDING;
+ }
+ else if(result) {
+ failf(data, "Curl_read failed");
+ return HYPER_IO_ERROR;
+ }
+ return (size_t)nread;
+}
+
+static size_t write_cb(void *userp, hyper_context *ctx,
+ const uint8_t *buf, size_t buflen)
+{
+ struct connectdata *conn = (struct connectdata *)userp;
+ struct Curl_easy *data = conn->data;
+ CURLcode result;
+ ssize_t nwrote;
+
+ result = Curl_write(conn, conn->sockfd, (void *)buf, buflen, &nwrote);
+ if(result == CURLE_AGAIN) {
+ /* would block, register interest */
+ if(data->hyp.write_waker)
+ hyper_waker_free(data->hyp.write_waker);
+ data->hyp.write_waker = hyper_context_waker(ctx);
+ if(!data->hyp.write_waker) {
+ failf(data, "Couldn't make the write hyper_context_waker");
+ return HYPER_IO_ERROR;
+ }
+ return HYPER_IO_PENDING;
+ }
+ else if(result) {
+ failf(data, "Curl_write failed");
+ return HYPER_IO_ERROR;
+ }
+ return (size_t)nwrote;
+}
+
+static int hyper_each_header(void *userdata,
+ const uint8_t *name,
+ size_t name_len,
+ const uint8_t *value,
+ size_t value_len)
+{
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ size_t wrote;
+ size_t len;
+ char *headp;
+ CURLcode result;
+ curl_write_callback writeheader =
+ data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func;
+ Curl_dyn_reset(&data->state.headerb);
+ if(name_len) {
+ if(Curl_dyn_addf(&data->state.headerb, "%.*s: %.*s\r\n",
+ (int) name_len, name, (int) value_len, value))
+ return HYPER_ITER_BREAK;
+ }
+ else {
+ if(Curl_dyn_add(&data->state.headerb, "\r\n"))
+ return HYPER_ITER_BREAK;
+ }
+ len = Curl_dyn_len(&data->state.headerb);
+ headp = Curl_dyn_ptr(&data->state.headerb);
+
+ result = Curl_http_header(data, data->conn, headp);
+ if(result) {
+ data->state.hresult = result;
+ return HYPER_ITER_BREAK;
+ }
+
+ Curl_debug(data, CURLINFO_HEADER_IN, headp, len);
+
+ Curl_set_in_callback(data, true);
+ wrote = writeheader(headp, 1, len, data->set.writeheader);
+ Curl_set_in_callback(data, false);
+ if(wrote != len) {
+ data->state.hresult = CURLE_ABORTED_BY_CALLBACK;
+ return HYPER_ITER_BREAK;
+ }
+
+ data->info.header_size += (long)len;
+ data->req.headerbytecount += (long)len;
+ return HYPER_ITER_CONTINUE;
+}
+
+static int hyper_body_chunk(void *userdata, const hyper_buf *chunk)
+{
+ char *buf = (char *)hyper_buf_bytes(chunk);
+ size_t len = hyper_buf_len(chunk);
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ curl_write_callback writebody = data->set.fwrite_func;
+ struct SingleRequest *k = &data->req;
+ size_t wrote;
+
+ if(0 == k->bodywrites++) {
+ bool done = FALSE;
+ CURLcode result = Curl_http_firstwrite(data, data->conn, &done);
+ if(result || done) {
+ infof(data, "Return early from hyper_body_chunk\n");
+ data->state.hresult = result;
+ return HYPER_ITER_BREAK;
+ }
+ }
+ if(k->ignorebody)
+ return HYPER_ITER_CONTINUE;
+ Curl_debug(data, CURLINFO_DATA_IN, buf, len);
+ Curl_set_in_callback(data, true);
+ wrote = writebody(buf, 1, len, data->set.out);
+ Curl_set_in_callback(data, false);
+
+ if(wrote != len) {
+ data->state.hresult = CURLE_WRITE_ERROR;
+ return HYPER_ITER_BREAK;
+ }
+
+ data->req.bytecount += len;
+ Curl_pgrsSetDownloadCounter(data, data->req.bytecount);
+ return HYPER_ITER_CONTINUE;
+}
+
+/*
+ * Hyper does not consider the status line, the first line in a HTTP/1
+ * response, to be a header. The libcurl API does. This function sends the
+ * status line in the header callback. */
+static CURLcode status_line(struct Curl_easy *data,
+ struct connectdata *conn,
+ uint16_t http_status,
+ int http_version,
+ const uint8_t *reason, size_t rlen)
+{
+ CURLcode result;
+ size_t wrote;
+ size_t len;
+ const char *vstr;
+ curl_write_callback writeheader =
+ data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func;
+ vstr = http_version == HYPER_HTTP_VERSION_1_1 ? "1.1" :
+ (http_version == HYPER_HTTP_VERSION_2 ? "2" : "1.0");
+ conn->httpversion =
+ http_version == HYPER_HTTP_VERSION_1_1 ? 11 :
+ (http_version == HYPER_HTTP_VERSION_2 ? 20 : 10);
+ data->req.httpcode = http_status;
+
+ result = Curl_http_statusline(data, conn);
+ if(result)
+ return result;
+
+ Curl_dyn_reset(&data->state.headerb);
+
+ result = Curl_dyn_addf(&data->state.headerb, "HTTP/%s %03d %.*s\r\n",
+ vstr,
+ (int)http_status,
+ (int)rlen, reason);
+ if(result)
+ return result;
+ len = Curl_dyn_len(&data->state.headerb);
+ Curl_debug(data, CURLINFO_HEADER_IN, Curl_dyn_ptr(&data->state.headerb),
+ len);
+ Curl_set_in_callback(data, true);
+ wrote = writeheader(Curl_dyn_ptr(&data->state.headerb), 1, len,
+ data->set.writeheader);
+ Curl_set_in_callback(data, false);
+ if(wrote != len)
+ return CURLE_WRITE_ERROR;
+
+ data->info.header_size += (long)len;
+ data->req.headerbytecount += (long)len;
+ data->req.httpcode = http_status;
+ return CURLE_OK;
+}
+
+/*
+ * Hyper does not pass on the last empty response header. The libcurl API
+ * does. This function sends an empty header in the header callback.
+ */
+static CURLcode empty_header(struct Curl_easy *data)
+{
+ return hyper_each_header(data, NULL, 0, NULL, 0) ?
+ CURLE_WRITE_ERROR : CURLE_OK;
+}
+
+static CURLcode hyperstream(struct Curl_easy *data,
+ struct connectdata *conn,
+ int *didwhat,
+ bool *done,
+ int select_res)
+{
+ hyper_response *resp = NULL;
+ uint16_t http_status;
+ int http_version;
+ hyper_headers *headers = NULL;
+ hyper_body *resp_body = NULL;
+ struct hyptransfer *h = &data->hyp;
+ hyper_task *task;
+ hyper_task *foreach;
+ hyper_error *hypererr = NULL;
+ const uint8_t *reasonp;
+ size_t reason_len;
+ CURLcode result = CURLE_OK;
+ (void)conn;
+
+ if(select_res & CURL_CSELECT_IN) {
+ if(h->read_waker)
+ hyper_waker_wake(h->read_waker);
+ h->read_waker = NULL;
+ }
+ if(select_res & CURL_CSELECT_OUT) {
+ if(h->write_waker)
+ hyper_waker_wake(h->write_waker);
+ h->write_waker = NULL;
+ }
+
+ *done = FALSE;
+ do {
+ hyper_task_return_type t;
+ task = hyper_executor_poll(h->exec);
+ if(!task) {
+ *didwhat = KEEP_RECV;
+ break;
+ }
+ t = hyper_task_type(task);
+ switch(t) {
+ case HYPER_TASK_ERROR:
+ hypererr = hyper_task_value(task);
+ break;
+ case HYPER_TASK_RESPONSE:
+ resp = hyper_task_value(task);
+ break;
+ default:
+ break;
+ }
+ hyper_task_free(task);
+
+ if(t == HYPER_TASK_ERROR) {
+ hyper_code errnum = hyper_error_code(hypererr);
+ if(errnum == HYPERE_ABORTED_BY_CALLBACK) {
+ /* override Hyper's view, might not even be an error */
+ result = data->state.hresult;
+ infof(data, "hyperstream is done (by early callback)\n");
+ }
+ else {
+ uint8_t errbuf[256];
+ size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
+ failf(data, "Hyper: %.*s", (int)errlen, errbuf);
+ result = CURLE_RECV_ERROR; /* not a very good return code */
+ }
+ *done = TRUE;
+ hyper_error_free(hypererr);
+ break;
+ }
+ else if(h->init) {
+ /* end of transfer */
+ *done = TRUE;
+ infof(data, "hyperstream is done!\n");
+ break;
+ }
+ else if(t != HYPER_TASK_RESPONSE) {
+ *didwhat = KEEP_RECV;
+ break;
+ }
+ /* HYPER_TASK_RESPONSE */
+
+ h->init = TRUE;
+ *didwhat = KEEP_RECV;
+ if(!resp) {
+ failf(data, "hyperstream: couldn't get response\n");
+ return CURLE_RECV_ERROR;
+ }
+
+ http_status = hyper_response_status(resp);
+ http_version = hyper_response_version(resp);
+ reasonp = hyper_response_reason_phrase(resp);
+ reason_len = hyper_response_reason_phrase_len(resp);
+
+ result = status_line(data, conn,
+ http_status, http_version, reasonp, reason_len);
+ if(result)
+ break;
+
+ headers = hyper_response_headers(resp);
+ if(!headers) {
+ failf(data, "hyperstream: couldn't get response headers\n");
+ result = CURLE_RECV_ERROR;
+ break;
+ }
+
+ /* the headers are already received */
+ hyper_headers_foreach(headers, hyper_each_header, data);
+ if(data->state.hresult) {
+ result = data->state.hresult;
+ break;
+ }
+
+ if(empty_header(data)) {
+ failf(data, "hyperstream: couldn't pass blank header\n");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+
+ resp_body = hyper_response_body(resp);
+ if(!resp_body) {
+ failf(data, "hyperstream: couldn't get response body\n");
+ result = CURLE_RECV_ERROR;
+ break;
+ }
+ foreach = hyper_body_foreach(resp_body, hyper_body_chunk, data);
+ if(!foreach) {
+ failf(data, "hyperstream: body foreach failed\n");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+ DEBUGASSERT(hyper_task_type(foreach) == HYPER_TASK_EMPTY);
+ if(HYPERE_OK != hyper_executor_push(h->exec, foreach)) {
+ failf(data, "Couldn't hyper_executor_push the body-foreach");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+
+ hyper_response_free(resp);
+ resp = NULL;
+ } while(1);
+ if(resp)
+ hyper_response_free(resp);
+ return result;
+}
+
+static CURLcode debug_request(struct Curl_easy *data,
+ const char *method,
+ const char *path,
+ bool h2)
+{
+ char *req = aprintf("%s %s HTTP/%s\r\n", method, path,
+ h2?"2":"1.1");
+ if(!req)
+ return CURLE_OUT_OF_MEMORY;
+ Curl_debug(data, CURLINFO_HEADER_OUT, req, strlen(req));
+ free(req);
+ return CURLE_OK;
+}
+
+/*
+ * Given a full header line "name: value" (optional CRLF in the input, should
+ * be in the output), add to Hyper and send to the debug callback.
+ *
+ * Supports multiple headers.
+ */
+
+CURLcode Curl_hyper_header(struct Curl_easy *data, hyper_headers *headers,
+ const char *line)
+{
+ const char *p;
+ const char *n;
+ size_t nlen;
+ const char *v;
+ size_t vlen;
+ bool newline = TRUE;
+ int numh = 0;
+
+ if(!line)
+ return CURLE_OK;
+ n = line;
+ do {
+ size_t linelen = 0;
+
+ p = strchr(n, ':');
+ if(!p)
+ /* this is fine if we already added at least one header */
+ return numh ? CURLE_OK : CURLE_BAD_FUNCTION_ARGUMENT;
+ nlen = p - n;
+ p++; /* move past the colon */
+ while(*p == ' ')
+ p++;
+ v = p;
+ p = strchr(v, '\r');
+ if(!p) {
+ p = strchr(v, '\n');
+ if(p)
+ linelen = 1; /* LF only */
+ else {
+ p = strchr(v, '\0');
+ newline = FALSE; /* no newline */
+ }
+ }
+ else
+ linelen = 2; /* CRLF ending */
+ linelen += (p - n);
+ if(!n)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ vlen = p - v;
+
+ if(HYPERE_OK != hyper_headers_add(headers, (uint8_t *)n, nlen,
+ (uint8_t *)v, vlen)) {
+ failf(data, "hyper_headers_add host\n");
+ return CURLE_OUT_OF_MEMORY;
+ }
+ if(data->set.verbose) {
+ char *ptr = NULL;
+ if(!newline) {
+ ptr = aprintf("%.*s\r\n", (int)linelen, line);
+ if(!ptr)
+ return CURLE_OUT_OF_MEMORY;
+ Curl_debug(data, CURLINFO_HEADER_OUT, ptr, linelen + 2);
+ free(ptr);
+ }
+ else
+ Curl_debug(data, CURLINFO_HEADER_OUT, (char *)line, linelen);
+ }
+ numh++;
+ n += linelen;
+ } while(newline);
+ return CURLE_OK;
+}
+
+static CURLcode request_target(struct Curl_easy *data,
+ struct connectdata *conn,
+ const char *method,
+ bool h2,
+ hyper_request *req)
+{
+ CURLcode result;
+ struct dynbuf r;
+
+ Curl_dyn_init(&r, DYN_HTTP_REQUEST);
+
+ result = Curl_http_target(data, conn, &r);
+ if(result)
+ return result;
+
+ if(hyper_request_set_uri(req, (uint8_t *)Curl_dyn_uptr(&r),
+ Curl_dyn_len(&r))) {
+ failf(data, "error setting path\n");
+ result = CURLE_OUT_OF_MEMORY;
+ }
+ else
+ result = debug_request(data, method, Curl_dyn_ptr(&r), h2);
+
+ Curl_dyn_free(&r);
+
+ return result;
+}
+
+static int uploadpostfields(void *userdata, hyper_context *ctx,
+ hyper_buf **chunk)
+{
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ (void)ctx;
+ if(data->req.upload_done)
+ *chunk = NULL; /* nothing more to deliver */
+ else {
+ /* send everything off in a single go */
+ *chunk = hyper_buf_copy(data->set.postfields, data->req.p.http->postsize);
+ data->req.upload_done = TRUE;
+ }
+ return HYPER_POLL_READY;
+}
+
+static int uploadstreamed(void *userdata, hyper_context *ctx,
+ hyper_buf **chunk)
+{
+ size_t fillcount;
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ CURLcode result =
+ Curl_fillreadbuffer(data->conn, data->set.upload_buffer_size,
+ &fillcount);
+ (void)ctx;
+ if(result)
+ return HYPER_POLL_ERROR;
+ if(!fillcount)
+ /* done! */
+ *chunk = NULL;
+ else
+ *chunk = hyper_buf_copy((uint8_t *)data->state.ulbuf, fillcount);
+ return HYPER_POLL_READY;
+}
+
+/*
+ * bodysend() sets up headers in the outgoing request for a HTTP transfer that
+ * sends a body
+ */
+
+static CURLcode bodysend(struct Curl_easy *data,
+ struct connectdata *conn,
+ hyper_headers *headers,
+ hyper_request *hyperreq,
+ Curl_HttpReq httpreq)
+{
+ CURLcode result;
+ struct dynbuf req;
+ if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD))
+ Curl_pgrsSetUploadSize(data, 0); /* no request body */
+ else {
+ hyper_body *body;
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
+ result = Curl_http_bodysend(data, conn, &req, httpreq);
+
+ if(!result)
+ result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&req));
+
+ Curl_dyn_free(&req);
+
+ body = hyper_body_new();
+ hyper_body_set_userdata(body, data);
+ if(data->set.postfields)
+ hyper_body_set_data_func(body, uploadpostfields);
+ else {
+ result = Curl_get_upload_buffer(data);
+ if(result)
+ return result;
+ /* init the "upload from here" pointer */
+ data->req.upload_fromhere = data->state.ulbuf;
+ hyper_body_set_data_func(body, uploadstreamed);
+ }
+ if(HYPERE_OK != hyper_request_set_body(hyperreq, body)) {
+ /* fail */
+ hyper_body_free(body);
+ result = CURLE_OUT_OF_MEMORY;
+ }
+ }
+ return result;
+}
+
+static CURLcode cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ hyper_headers *headers)
+{
+ struct dynbuf req;
+ CURLcode result;
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
+
+ result = Curl_http_cookies(data, conn, &req);
+ if(!result)
+ result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&req));
+ Curl_dyn_free(&req);
+ return result;
+}
+
+/*
+ * Curl_http() gets called from the generic multi_do() function when a HTTP
+ * request is to be performed. This creates and sends a properly constructed
+ * HTTP request.
+ */
+CURLcode Curl_http(struct connectdata *conn, bool *done)
+{
+ struct Curl_easy *data = conn->data;
+ struct hyptransfer *h = &data->hyp;
+ hyper_io *io = NULL;
+ hyper_clientconn_options *options = NULL;
+ hyper_task *task = NULL; /* for the handshake */
+ hyper_task *sendtask = NULL; /* for the send */
+ hyper_clientconn *client = NULL;
+ hyper_request *req = NULL;
+ hyper_headers *headers = NULL;
+ hyper_task *handshake = NULL;
+ hyper_error *hypererr = NULL;
+ CURLcode result;
+ const char *p_accept; /* Accept: string */
+ const char *method;
+ Curl_HttpReq httpreq;
+ bool h2 = FALSE;
+ const char *te = NULL; /* transfer-encoding */
+
+ /* Always consider the DO phase done after this function call, even if there
+ may be parts of the request that is not yet sent, since we can deal with
+ the rest of the request in the PERFORM phase. */
+ *done = TRUE;
+
+ infof(data, "Time for the Hyper dance\n");
+ memset(h, 0, sizeof(struct hyptransfer));
+
+ result = Curl_http_host(data, conn);
+ if(result)
+ return result;
+
+ Curl_http_method(data, conn, &method, &httpreq);
+
+ /* setup the authentication headers */
+ {
+ char *pq = NULL;
+ if(data->state.up.query) {
+ pq = aprintf("%s?%s", data->state.up.path, data->state.up.query);
+ if(!pq)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ result = Curl_http_output_auth(conn, method,
+ (pq ? pq : data->state.up.path), FALSE);
+ free(pq);
+ if(result)
+ return result;
+ }
+
+ result = Curl_http_resume(data, conn, httpreq);
+ if(result)
+ return result;
+
+ result = Curl_http_range(data, conn, httpreq);
+ if(result)
+ return result;
+
+ result = Curl_http_useragent(data, conn);
+ if(result)
+ return result;
+
+ io = hyper_io_new();
+ if(!io) {
+ failf(data, "Couldn't create hyper IO");
+ goto error;
+ }
+ /* tell Hyper how to read/write network data */
+ hyper_io_set_userdata(io, conn);
+ hyper_io_set_read(io, read_cb);
+ hyper_io_set_write(io, write_cb);
+
+ /* create an executor to poll futures */
+ if(!h->exec) {
+ h->exec = hyper_executor_new();
+ if(!h->exec) {
+ failf(data, "Couldn't create hyper executor");
+ goto error;
+ }
+ }
+
+ options = hyper_clientconn_options_new();
+ if(!options) {
+ failf(data, "Couldn't create hyper client options");
+ goto error;
+ }
+ if(conn->negnpn == CURL_HTTP_VERSION_2) {
+ hyper_clientconn_options_http2(options, 1);
+ h2 = TRUE;
+ }
+
+ hyper_clientconn_options_exec(options, h->exec);
+
+ /* "Both the `io` and the `options` are consumed in this function call" */
+ handshake = hyper_clientconn_handshake(io, options);
+ if(!handshake) {
+ failf(data, "Couldn't create hyper client handshake");
+ goto error;
+ }
+ io = NULL;
+ options = NULL;
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) {
+ failf(data, "Couldn't hyper_executor_push the handshake");
+ goto error;
+ }
+ handshake = NULL; /* ownership passed on */
+
+ task = hyper_executor_poll(h->exec);
+ if(!task) {
+ failf(data, "Couldn't hyper_executor_poll the handshake");
+ goto error;
+ }
+
+ client = hyper_task_value(task);
+ hyper_task_free(task);
+
+ req = hyper_request_new();
+ if(!req) {
+ failf(data, "Couldn't hyper_request_new");
+ goto error;
+ }
+
+ if(data->set.httpversion == CURL_HTTP_VERSION_1_0) {
+ if(HYPERE_OK != hyper_request_set_version(req,
+ HYPER_HTTP_VERSION_1_0)) {
+ failf(data, "error settting HTTP version");
+ goto error;
+ }
+ }
+
+ if(hyper_request_set_method(req, (uint8_t *)method, strlen(method))) {
+ failf(data, "error setting method");
+ goto error;
+ }
+
+ result = request_target(data, conn, method, h2, req);
+ if(result)
+ goto error;
+
+ headers = hyper_request_headers(req);
+ if(!headers) {
+ failf(data, "hyper_request_headers\n");
+ goto error;
+ }
+
+ result = Curl_http_body(data, conn, httpreq, &te);
+ if(result)
+ return result;
+
+ if(data->state.aptr.host &&
+ Curl_hyper_header(data, headers, data->state.aptr.host))
+ goto error;
+
+ if(data->state.aptr.proxyuserpwd &&
+ Curl_hyper_header(data, headers, data->state.aptr.proxyuserpwd))
+ goto error;
+
+ if(data->state.aptr.userpwd &&
+ Curl_hyper_header(data, headers, data->state.aptr.userpwd))
+ goto error;
+
+ if((data->state.use_range && data->state.aptr.rangeline) &&
+ Curl_hyper_header(data, headers, data->state.aptr.rangeline))
+ goto error;
+
+ if(data->set.str[STRING_USERAGENT] &&
+ *data->set.str[STRING_USERAGENT] &&
+ data->state.aptr.uagent &&
+ Curl_hyper_header(data, headers, data->state.aptr.uagent))
+ goto error;
+
+ p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
+ if(p_accept && Curl_hyper_header(data, headers, p_accept))
+ goto error;
+
+ if(te && Curl_hyper_header(data, headers, te))
+ goto error;
+
+#ifndef CURL_DISABLE_PROXY
+ if(conn->bits.httpproxy && !conn->bits.tunnel_proxy &&
+ !Curl_checkProxyheaders(conn, "Proxy-Connection")) {
+ if(Curl_hyper_header(data, headers, "Proxy-Connection: Keep-Alive"))
+ goto error;
+ }
+#endif
+
+ Curl_safefree(data->state.aptr.ref);
+ if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
+ data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
+ if(!data->state.aptr.ref)
+ return CURLE_OUT_OF_MEMORY;
+ if(Curl_hyper_header(data, headers, data->state.aptr.ref))
+ goto error;
+ }
+
+ result = cookies(data, conn, headers);
+ if(result)
+ return result;
+
+ result = Curl_add_custom_headers(conn, FALSE, headers);
+ if(result)
+ return result;
+
+ if((httpreq != HTTPREQ_GET) && (httpreq != HTTPREQ_HEAD)) {
+ result = bodysend(data, conn, headers, req, httpreq);
+ if(result)
+ return result;
+ }
+
+ Curl_debug(data, CURLINFO_HEADER_OUT, (char *)"\r\n", 2);
+
+ data->req.upload_chunky = FALSE;
+ sendtask = hyper_clientconn_send(client, req);
+ if(!sendtask) {
+ failf(data, "hyper_clientconn_send\n");
+ goto error;
+ }
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) {
+ failf(data, "Couldn't hyper_executor_push the send");
+ goto error;
+ }
+
+ hyper_clientconn_free(client);
+
+ do {
+ task = hyper_executor_poll(h->exec);
+ if(task) {
+ bool error = hyper_task_type(task) == HYPER_TASK_ERROR;
+ if(error)
+ hypererr = hyper_task_value(task);
+ hyper_task_free(task);
+ if(error)
+ goto error;
+ }
+ } while(task);
+
+ if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) {
+ /* HTTP GET/HEAD download */
+ Curl_pgrsSetUploadSize(data, 0); /* nothing */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
+ }
+ conn->datastream = hyperstream;
+
+ return CURLE_OK;
+ error:
+
+ if(io)
+ hyper_io_free(io);
+
+ if(options)
+ hyper_clientconn_options_free(options);
+
+ if(handshake)
+ hyper_task_free(handshake);
+
+ if(hypererr) {
+ uint8_t errbuf[256];
+ size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
+ failf(data, "Hyper: %.*s", (int)errlen, errbuf);
+ hyper_error_free(hypererr);
+ }
+ return CURLE_OUT_OF_MEMORY;
+}
+
+void Curl_hyper_done(struct Curl_easy *data)
+{
+ struct hyptransfer *h = &data->hyp;
+ if(h->exec) {
+ hyper_executor_free(h->exec);
+ h->exec = NULL;
+ }
+ if(h->read_waker) {
+ hyper_waker_free(h->read_waker);
+ h->read_waker = NULL;
+ }
+ if(h->write_waker) {
+ hyper_waker_free(h->write_waker);
+ h->write_waker = NULL;
+ }
+}
+
+#endif /* !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER) */
#include <sys/param.h>
#endif
+#ifdef USE_HYPER
+#include <hyper.h>
+#endif
+
#include "urldata.h"
#include <curl/curl.h>
#include "transfer.h"
#include "strdup.h"
#include "altsvc.h"
#include "hsts.h"
+#include "c-hyper.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
return data->state.authproblem;
}
+#ifndef USE_HYPER
/*
* readmoredata() is a "fread() emulation" to provide POST and/or request
* data. It is used when a huge POST is to be made and the entire chunk wasn't
return result;
}
+#endif
+
/* end of the add_buffer functions */
/* ------------------------------------------------------------------------- */
Curl_quic_done(data, premature);
Curl_mime_cleanpart(&http->form);
Curl_dyn_reset(&data->state.headerb);
+ Curl_hyper_done(data);
if(status)
return status;
(data->set.httpversion >= CURL_HTTP_VERSION_1_1));
}
+#ifndef USE_HYPER
static const char *get_http_string(const struct Curl_easy *data,
const struct connectdata *conn)
{
return "1.0";
}
+#endif
/* check and possibly add an Expect: header */
static CURLcode expect100(struct Curl_easy *data,
CURLcode Curl_add_custom_headers(struct connectdata *conn,
bool is_connect,
- struct dynbuf *req)
+#ifndef USE_HYPER
+ struct dynbuf *req
+#else
+ void *req
+#endif
+ )
{
char *ptr;
struct curl_slist *h[2];
/* copy the source */
semicolonp = strdup(headers->data);
if(!semicolonp) {
+#ifndef USE_HYPER
Curl_dyn_free(req);
+#endif
return CURLE_OUT_OF_MEMORY;
}
/* put a colon where the semicolon is */
!strcasecompare(data->state.first_host, conn->host.name)))
;
else {
+#ifdef USE_HYPER
+ result = Curl_hyper_header(data, req, compare);
+#else
result = Curl_dyn_addf(req, "%s\r\n", compare);
+#endif
}
if(semicolonp)
free(semicolonp);
}
#endif
-/*
- * Curl_http() gets called from the generic multi_do() function when a HTTP
- * request is to be performed. This creates and sends a properly constructed
- * HTTP request.
- */
-CURLcode Curl_http(struct connectdata *conn, bool *done)
+void Curl_http_method(struct Curl_easy *data, struct connectdata *conn,
+ const char **method, Curl_HttpReq *reqp)
{
- struct Curl_easy *data = conn->data;
- CURLcode result = CURLE_OK;
- struct HTTP *http;
- const char *path = data->state.up.path;
- const char *query = data->state.up.query;
- bool paste_ftp_userpwd = FALSE;
- char ftp_typecode[sizeof("/;type=?")] = "";
- const char *host = conn->host.name;
- const char *te = ""; /* transfer-encoding */
- const char *ptr;
- const char *request;
Curl_HttpReq httpreq = data->state.httpreq;
-#if !defined(CURL_DISABLE_COOKIES)
- char *addcookies = NULL;
-#endif
- curl_off_t included_body = 0;
- const char *httpstring;
- struct dynbuf req;
- curl_off_t postsize = 0; /* curl_off_t to handle large file sizes */
- char *altused = NULL;
-
- /* Always consider the DO phase done after this function call, even if there
- may be parts of the request that is not yet sent, since we can deal with
- the rest of the request in the PERFORM phase. */
- *done = TRUE;
-
- if(conn->transport != TRNSPRT_QUIC) {
- if(conn->httpversion < 20) { /* unless the connection is re-used and
- already http2 */
- switch(conn->negnpn) {
- case CURL_HTTP_VERSION_2:
- conn->httpversion = 20; /* we know we're on HTTP/2 now */
-
- result = Curl_http2_switched(conn, NULL, 0);
- if(result)
- return result;
- break;
- case CURL_HTTP_VERSION_1_1:
- /* continue with HTTP/1.1 when explicitly requested */
- break;
- default:
- /* Check if user wants to use HTTP/2 with clear TCP*/
-#ifdef USE_NGHTTP2
- if(conn->data->set.httpversion ==
- CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
-#ifndef CURL_DISABLE_PROXY
- if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- /* We don't support HTTP/2 proxies yet. Also it's debatable
- whether or not this setting should apply to HTTP/2 proxies. */
- infof(data, "Ignoring HTTP/2 prior knowledge due to proxy\n");
- break;
- }
-#endif
- DEBUGF(infof(data, "HTTP/2 over clean TCP\n"));
- conn->httpversion = 20;
-
- result = Curl_http2_switched(conn, NULL, 0);
- if(result)
- return result;
- }
-#endif
- break;
- }
- }
- else {
- /* prepare for a http2 request */
- result = Curl_http2_setup(conn);
- if(result)
- return result;
- }
- }
- http = data->req.p.http;
- DEBUGASSERT(http);
-
- if(!data->state.this_is_a_follow) {
- /* Free to avoid leaking memory on multiple requests*/
- free(data->state.first_host);
-
- data->state.first_host = strdup(conn->host.name);
- if(!data->state.first_host)
- return CURLE_OUT_OF_MEMORY;
-
- data->state.first_remote_port = conn->remote_port;
- }
-
+ const char *request;
if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
- data->set.upload) {
+ data->set.upload)
httpreq = HTTPREQ_PUT;
- }
/* Now set the 'request' pointer to the proper request string */
if(data->set.str[STRING_CUSTOMREQUEST])
}
}
}
+ *method = request;
+ *reqp = httpreq;
+}
+CURLcode Curl_http_useragent(struct Curl_easy *data, struct connectdata *conn)
+{
/* The User-Agent string might have been allocated in url.c already, because
it might have been used in the proxy connect, but if we have got a header
with the user-agent string specified, we erase the previously made string
free(data->state.aptr.uagent);
data->state.aptr.uagent = NULL;
}
+ return CURLE_OK;
+}
- /* setup the authentication headers */
- {
- char *pq = NULL;
- if(query && *query) {
- pq = aprintf("%s?%s", path, query);
- if(!pq)
- return CURLE_OUT_OF_MEMORY;
- }
- result = Curl_http_output_auth(conn, request, (pq ? pq : path), FALSE);
- free(pq);
- if(result)
- return result;
- }
- if(((data->state.authhost.multipass && !data->state.authhost.done)
- || (data->state.authproxy.multipass && !data->state.authproxy.done)) &&
- (httpreq != HTTPREQ_GET) &&
- (httpreq != HTTPREQ_HEAD)) {
- /* Auth is required and we are not authenticated yet. Make a PUT or POST
- with content-length zero as a "probe". */
- conn->bits.authneg = TRUE;
- }
- else
- conn->bits.authneg = FALSE;
+CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
+{
+ const char *ptr;
+ if(!data->state.this_is_a_follow) {
+ /* Free to avoid leaking memory on multiple requests*/
+ free(data->state.first_host);
- Curl_safefree(data->state.aptr.ref);
- if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
- data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
- if(!data->state.aptr.ref)
+ data->state.first_host = strdup(conn->host.name);
+ if(!data->state.first_host)
return CURLE_OUT_OF_MEMORY;
+
+ data->state.first_remote_port = conn->remote_port;
}
- else
- data->state.aptr.ref = NULL;
+ Curl_safefree(data->state.aptr.host);
+ ptr = Curl_checkheaders(conn, "Host");
+ if(ptr && (!data->state.this_is_a_follow ||
+ strcasecompare(data->state.first_host, conn->host.name))) {
#if !defined(CURL_DISABLE_COOKIES)
- if(data->set.str[STRING_COOKIE] && !Curl_checkheaders(conn, "Cookie"))
- addcookies = data->set.str[STRING_COOKIE];
+ /* If we have a given custom Host: header, we extract the host name in
+ order to possibly use it for cookie reasons later on. We only allow the
+ custom Host: header if this is NOT a redirect, as setting Host: in the
+ redirected request is being out on thin ice. Except if the host name
+ is the same as the first one! */
+ char *cookiehost = Curl_copy_header_value(ptr);
+ if(!cookiehost)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*cookiehost)
+ /* ignore empty data */
+ free(cookiehost);
+ else {
+ /* If the host begins with '[', we start searching for the port after
+ the bracket has been closed */
+ if(*cookiehost == '[') {
+ char *closingbracket;
+ /* since the 'cookiehost' is an allocated memory area that will be
+ freed later we cannot simply increment the pointer */
+ memmove(cookiehost, cookiehost + 1, strlen(cookiehost) - 1);
+ closingbracket = strchr(cookiehost, ']');
+ if(closingbracket)
+ *closingbracket = 0;
+ }
+ else {
+ int startsearch = 0;
+ char *colon = strchr(cookiehost + startsearch, ':');
+ if(colon)
+ *colon = 0; /* The host must not include an embedded port number */
+ }
+ Curl_safefree(data->state.aptr.cookiehost);
+ data->state.aptr.cookiehost = cookiehost;
+ }
#endif
- if(!Curl_checkheaders(conn, "Accept-Encoding") &&
- data->set.str[STRING_ENCODING]) {
- Curl_safefree(data->state.aptr.accept_encoding);
- data->state.aptr.accept_encoding =
- aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
- if(!data->state.aptr.accept_encoding)
- return CURLE_OUT_OF_MEMORY;
+ if(strcmp("Host:", ptr)) {
+ data->state.aptr.host = aprintf("Host:%s\r\n", &ptr[5]);
+ if(!data->state.aptr.host)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else
+ /* when clearing the header */
+ data->state.aptr.host = NULL;
}
else {
- Curl_safefree(data->state.aptr.accept_encoding);
- data->state.aptr.accept_encoding = NULL;
- }
-
-#ifdef HAVE_LIBZ
- /* we only consider transfer-encoding magic if libz support is built-in */
-
- if(!Curl_checkheaders(conn, "TE") &&
- data->set.http_transfer_encoding) {
- /* When we are to insert a TE: header in the request, we must also insert
- TE in a Connection: header, so we need to merge the custom provided
- Connection: header and prevent the original to get sent. Note that if
- the user has inserted his/hers own TE: header we don't do this magic
- but then assume that the user will handle it all! */
- char *cptr = Curl_checkheaders(conn, "Connection");
-#define TE_HEADER "TE: gzip\r\n"
+ /* When building Host: headers, we must put the host name within
+ [brackets] if the host name is a plain IPv6-address. RFC2732-style. */
+ const char *host = conn->host.name;
- Curl_safefree(data->state.aptr.te);
+ if(((conn->given->protocol&CURLPROTO_HTTPS) &&
+ (conn->remote_port == PORT_HTTPS)) ||
+ ((conn->given->protocol&CURLPROTO_HTTP) &&
+ (conn->remote_port == PORT_HTTP)) )
+ /* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
+ the port number in the host string */
+ data->state.aptr.host = aprintf("Host: %s%s%s\r\n",
+ conn->bits.ipv6_ip?"[":"",
+ host,
+ conn->bits.ipv6_ip?"]":"");
+ else
+ data->state.aptr.host = aprintf("Host: %s%s%s:%d\r\n",
+ conn->bits.ipv6_ip?"[":"",
+ host,
+ conn->bits.ipv6_ip?"]":"",
+ conn->remote_port);
- if(cptr) {
- cptr = Curl_copy_header_value(cptr);
- if(!cptr)
- return CURLE_OUT_OF_MEMORY;
- }
+ if(!data->state.aptr.host)
+ /* without Host: we can't make a nice request */
+ return CURLE_OUT_OF_MEMORY;
+ }
+ return CURLE_OK;
+}
- /* Create the (updated) Connection: header */
- data->state.aptr.te = aprintf("Connection: %s%sTE\r\n" TE_HEADER,
- cptr ? cptr : "", (cptr && *cptr) ? ", ":"");
+/*
+ * Append the request-target to the HTTP request
+ */
+CURLcode Curl_http_target(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r)
+{
+ CURLcode result = CURLE_OK;
+ const char *path = data->state.up.path;
+ const char *query = data->state.up.query;
- free(cptr);
- if(!data->state.aptr.te)
- return CURLE_OUT_OF_MEMORY;
+ if(data->set.str[STRING_TARGET]) {
+ path = data->set.str[STRING_TARGET];
+ query = NULL;
}
-#endif
- switch(httpreq) {
- case HTTPREQ_POST_MIME:
- http->sendit = &data->set.mimepost;
- break;
+#ifndef CURL_DISABLE_PROXY
+ if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
+ /* Using a proxy but does not tunnel through it */
+
+ /* The path sent to the proxy is in fact the entire URL. But if the remote
+ host is a IDN-name, we must make sure that the request we produce only
+ uses the encoded host name! */
+
+ /* and no fragment part */
+ CURLUcode uc;
+ char *url;
+ CURLU *h = curl_url_dup(data->state.uh);
+ if(!h)
+ return CURLE_OUT_OF_MEMORY;
+
+ if(conn->host.dispname != conn->host.name) {
+ uc = curl_url_set(h, CURLUPART_HOST, conn->host.name, 0);
+ if(uc) {
+ curl_url_cleanup(h);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ uc = curl_url_set(h, CURLUPART_FRAGMENT, NULL, 0);
+ if(uc) {
+ curl_url_cleanup(h);
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ if(strcasecompare("http", data->state.up.scheme)) {
+ /* when getting HTTP, we don't want the userinfo the URL */
+ uc = curl_url_set(h, CURLUPART_USER, NULL, 0);
+ if(uc) {
+ curl_url_cleanup(h);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ uc = curl_url_set(h, CURLUPART_PASSWORD, NULL, 0);
+ if(uc) {
+ curl_url_cleanup(h);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ /* Extract the URL to use in the request. Store in STRING_TEMP_URL for
+ clean-up reasons if the function returns before the free() further
+ down. */
+ uc = curl_url_get(h, CURLUPART_URL, &url, 0);
+ if(uc) {
+ curl_url_cleanup(h);
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ curl_url_cleanup(h);
+
+ /* url */
+ result = Curl_dyn_add(r, url);
+ free(url);
+ if(result)
+ return (result);
+
+ if(strcasecompare("ftp", data->state.up.scheme)) {
+ if(data->set.proxy_transfer_mode) {
+ /* when doing ftp, append ;type=<a|i> if not present */
+ char *type = strstr(path, ";type=");
+ if(type && type[6] && type[7] == 0) {
+ switch(Curl_raw_toupper(type[6])) {
+ case 'A':
+ case 'D':
+ case 'I':
+ break;
+ default:
+ type = NULL;
+ }
+ }
+ if(!type) {
+ result = Curl_dyn_addf(r, ";type=%c",
+ data->set.prefer_ascii ? 'a' : 'i');
+ if(result)
+ return result;
+ }
+ }
+ }
+ }
+
+ else
+#else
+ (void)conn; /* not used in disabled-proxy builds */
+#endif
+ {
+ result = Curl_dyn_add(r, path);
+ if(result)
+ return result;
+ if(query)
+ result = Curl_dyn_addf(r, "?%s", query);
+ }
+
+ return result;
+}
+
+CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
+ Curl_HttpReq httpreq, const char **tep)
+{
+ CURLcode result = CURLE_OK;
+ const char *ptr;
+ struct HTTP *http = data->req.p.http;
+ http->postsize = 0;
+
+ switch(httpreq) {
+ case HTTPREQ_POST_MIME:
+ http->sendit = &data->set.mimepost;
+ break;
case HTTPREQ_POST_FORM:
/* Convert the form structure into a mime structure. */
Curl_mime_cleanpart(&http->form);
}
if(data->req.upload_chunky)
- te = "Transfer-Encoding: chunked\r\n";
+ *tep = "Transfer-Encoding: chunked\r\n";
}
+ return result;
+}
- Curl_safefree(data->state.aptr.host);
+CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
+ struct dynbuf *r, Curl_HttpReq httpreq)
+{
+#ifndef USE_HYPER
+ /* Hyper always handles the body separately */
+ curl_off_t included_body = 0;
+#endif
+ CURLcode result = CURLE_OK;
+ struct HTTP *http = data->req.p.http;
+ const char *ptr;
- ptr = Curl_checkheaders(conn, "Host");
- if(ptr && (!data->state.this_is_a_follow ||
- strcasecompare(data->state.first_host, conn->host.name))) {
-#if !defined(CURL_DISABLE_COOKIES)
- /* If we have a given custom Host: header, we extract the host name in
- order to possibly use it for cookie reasons later on. We only allow the
- custom Host: header if this is NOT a redirect, as setting Host: in the
- redirected request is being out on thin ice. Except if the host name
- is the same as the first one! */
- char *cookiehost = Curl_copy_header_value(ptr);
- if(!cookiehost)
- return CURLE_OUT_OF_MEMORY;
- if(!*cookiehost)
- /* ignore empty data */
- free(cookiehost);
- else {
- /* If the host begins with '[', we start searching for the port after
- the bracket has been closed */
- if(*cookiehost == '[') {
- char *closingbracket;
- /* since the 'cookiehost' is an allocated memory area that will be
- freed later we cannot simply increment the pointer */
- memmove(cookiehost, cookiehost + 1, strlen(cookiehost) - 1);
- closingbracket = strchr(cookiehost, ']');
- if(closingbracket)
- *closingbracket = 0;
- }
- else {
- int startsearch = 0;
- char *colon = strchr(cookiehost + startsearch, ':');
- if(colon)
- *colon = 0; /* The host must not include an embedded port number */
- }
- Curl_safefree(data->state.aptr.cookiehost);
- data->state.aptr.cookiehost = cookiehost;
+ /* If 'authdone' is FALSE, we must not set the write socket index to the
+ Curl_transfer() call below, as we're not ready to actually upload any
+ data yet. */
+
+ switch(httpreq) {
+
+ case HTTPREQ_PUT: /* Let's PUT the data to the server! */
+
+ if(conn->bits.authneg)
+ http->postsize = 0;
+ else
+ http->postsize = data->state.infilesize;
+
+ if((http->postsize != -1) && !data->req.upload_chunky &&
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
+ /* only add Content-Length if not uploading chunked */
+ result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
+ "\r\n", http->postsize);
+ if(result)
+ return result;
}
-#endif
- if(strcmp("Host:", ptr)) {
- data->state.aptr.host = aprintf("Host:%s\r\n", &ptr[5]);
- if(!data->state.aptr.host)
- return CURLE_OUT_OF_MEMORY;
+ if(http->postsize) {
+ result = expect100(data, conn, r);
+ if(result)
+ return result;
}
- else
- /* when clearing the header */
- data->state.aptr.host = NULL;
- }
- else {
- /* When building Host: headers, we must put the host name within
- [brackets] if the host name is a plain IPv6-address. RFC2732-style. */
- if(((conn->given->protocol&CURLPROTO_HTTPS) &&
- (conn->remote_port == PORT_HTTPS)) ||
- ((conn->given->protocol&CURLPROTO_HTTP) &&
- (conn->remote_port == PORT_HTTP)) )
- /* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
- the port number in the host string */
- data->state.aptr.host = aprintf("Host: %s%s%s\r\n",
- conn->bits.ipv6_ip?"[":"",
- host,
- conn->bits.ipv6_ip?"]":"");
+ /* end of headers */
+ result = Curl_dyn_add(r, "\r\n");
+ if(result)
+ return result;
+
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, http->postsize);
+
+ /* this sends the buffer and frees all the buffer resources */
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
+ FIRSTSOCKET);
+ if(result)
+ failf(data, "Failed sending PUT request");
else
- data->state.aptr.host = aprintf("Host: %s%s%s:%d\r\n",
- conn->bits.ipv6_ip?"[":"",
- host,
- conn->bits.ipv6_ip?"]":"",
- conn->remote_port);
+ /* prepare for transfer */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
+ http->postsize?FIRSTSOCKET:-1);
+ if(result)
+ return result;
+ break;
- if(!data->state.aptr.host)
- /* without Host: we can't make a nice request */
- return CURLE_OUT_OF_MEMORY;
- }
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ /* This is form posting using mime data. */
+ if(conn->bits.authneg) {
+ /* nothing to post! */
+ result = Curl_dyn_add(r, "Content-Length: 0\r\n\r\n");
+ if(result)
+ return result;
-#ifndef CURL_DISABLE_PROXY
- if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- /* Using a proxy but does not tunnel through it */
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
+ FIRSTSOCKET);
+ if(result)
+ failf(data, "Failed sending POST request");
+ else
+ /* setup variables for the upcoming transfer */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
+ break;
+ }
- /* The path sent to the proxy is in fact the entire URL. But if the remote
- host is a IDN-name, we must make sure that the request we produce only
- uses the encoded host name! */
+ data->state.infilesize = http->postsize;
- /* and no fragment part */
- CURLUcode uc;
- CURLU *h = curl_url_dup(data->state.uh);
- if(!h)
- return CURLE_OUT_OF_MEMORY;
+ /* We only set Content-Length and allow a custom Content-Length if
+ we don't upload data chunked, as RFC2616 forbids us to set both
+ kinds of headers (Transfer-Encoding: chunked and Content-Length) */
+ if(http->postsize != -1 && !data->req.upload_chunky &&
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
+ /* we allow replacing this header if not during auth negotiation,
+ although it isn't very wise to actually set your own */
+ result = Curl_dyn_addf(r,
+ "Content-Length: %" CURL_FORMAT_CURL_OFF_T
+ "\r\n", http->postsize);
+ if(result)
+ return result;
+ }
- if(conn->host.dispname != conn->host.name) {
- uc = curl_url_set(h, CURLUPART_HOST, conn->host.name, 0);
- if(uc) {
- curl_url_cleanup(h);
- return CURLE_OUT_OF_MEMORY;
+#ifndef CURL_DISABLE_MIME
+ /* Output mime-generated headers. */
+ {
+ struct curl_slist *hdr;
+
+ for(hdr = http->sendit->curlheaders; hdr; hdr = hdr->next) {
+ result = Curl_dyn_addf(r, "%s\r\n", hdr->data);
+ if(result)
+ return result;
}
}
- uc = curl_url_set(h, CURLUPART_FRAGMENT, NULL, 0);
- if(uc) {
- curl_url_cleanup(h);
- return CURLE_OUT_OF_MEMORY;
- }
+#endif
- if(strcasecompare("http", data->state.up.scheme)) {
- /* when getting HTTP, we don't want the userinfo the URL */
- uc = curl_url_set(h, CURLUPART_USER, NULL, 0);
- if(uc) {
- curl_url_cleanup(h);
- return CURLE_OUT_OF_MEMORY;
- }
- uc = curl_url_set(h, CURLUPART_PASSWORD, NULL, 0);
- if(uc) {
- curl_url_cleanup(h);
- return CURLE_OUT_OF_MEMORY;
- }
+ /* For really small posts we don't use Expect: headers at all, and for
+ the somewhat bigger ones we allow the app to disable it. Just make
+ sure that the expect100header is always set to the preferred value
+ here. */
+ ptr = Curl_checkheaders(conn, "Expect");
+ if(ptr) {
+ data->state.expect100header =
+ Curl_compareheader(ptr, "Expect:", "100-continue");
}
- /* Extract the URL to use in the request. Store in STRING_TEMP_URL for
- clean-up reasons if the function returns before the free() further
- down. */
- uc = curl_url_get(h, CURLUPART_URL, &data->set.str[STRING_TEMP_URL], 0);
- if(uc) {
- curl_url_cleanup(h);
- return CURLE_OUT_OF_MEMORY;
+ else if(http->postsize > EXPECT_100_THRESHOLD || http->postsize < 0) {
+ result = expect100(data, conn, r);
+ if(result)
+ return result;
}
+ else
+ data->state.expect100header = FALSE;
- curl_url_cleanup(h);
+ /* make the request end in a true CRLF */
+ result = Curl_dyn_add(r, "\r\n");
+ if(result)
+ return result;
- if(strcasecompare("ftp", data->state.up.scheme)) {
- if(data->set.proxy_transfer_mode) {
- /* when doing ftp, append ;type=<a|i> if not present */
- char *type = strstr(path, ";type=");
- if(type && type[6] && type[7] == 0) {
- switch(Curl_raw_toupper(type[6])) {
- case 'A':
- case 'D':
- case 'I':
- break;
- default:
- type = NULL;
- }
- }
- if(!type) {
- char *p = ftp_typecode;
- /* avoid sending invalid URLs like ftp://example.com;type=i if the
- * user specified ftp://example.com without the slash */
- if(!*data->state.up.path && path[strlen(path) - 1] != '/') {
- *p++ = '/';
- }
- msnprintf(p, sizeof(ftp_typecode) - 1, ";type=%c",
- data->set.prefer_ascii ? 'a' : 'i');
- }
- }
- if(conn->bits.user_passwd)
- paste_ftp_userpwd = TRUE;
- }
- }
-#endif /* CURL_DISABLE_PROXY */
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, http->postsize);
- http->p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
+ /* Read from mime structure. */
+ data->state.fread_func = (curl_read_callback) Curl_mime_read;
+ data->state.in = (void *) http->sendit;
+ http->sending = HTTPSEND_BODY;
- if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
- data->state.resume_from) {
- /**********************************************************************
- * Resuming upload in HTTP means that we PUT or POST and that we have
- * got a resume_from value set. The resume value has already created
- * a Range: header that will be passed along. We need to "fast forward"
- * the file the given number of bytes and decrease the assume upload
- * file size before we continue this venture in the dark lands of HTTP.
- * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
- *********************************************************************/
+ /* this sends the buffer and frees all the buffer resources */
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
+ FIRSTSOCKET);
+ if(result)
+ failf(data, "Failed sending POST request");
+ else
+ /* prepare for transfer */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
+ http->postsize?FIRSTSOCKET:-1);
+ if(result)
+ return result;
- if(data->state.resume_from < 0) {
- /*
- * This is meant to get the size of the present remote-file by itself.
- * We don't support this now. Bail out!
- */
- data->state.resume_from = 0;
+ break;
+
+ case HTTPREQ_POST:
+ /* this is the simple POST, using x-www-form-urlencoded style */
+
+ if(conn->bits.authneg)
+ http->postsize = 0;
+ else
+ /* the size of the post body */
+ http->postsize = data->state.infilesize;
+
+ /* We only set Content-Length and allow a custom Content-Length if
+ we don't upload data chunked, as RFC2616 forbids us to set both
+ kinds of headers (Transfer-Encoding: chunked and Content-Length) */
+ if((http->postsize != -1) && !data->req.upload_chunky &&
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
+ /* we allow replacing this header if not during auth negotiation,
+ although it isn't very wise to actually set your own */
+ result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
+ "\r\n", http->postsize);
+ if(result)
+ return result;
}
- if(data->state.resume_from && !data->state.this_is_a_follow) {
- /* do we still game? */
+ if(!Curl_checkheaders(conn, "Content-Type")) {
+ result = Curl_dyn_add(r, "Content-Type: application/"
+ "x-www-form-urlencoded\r\n");
+ if(result)
+ return result;
+ }
- /* Now, let's read off the proper amount of bytes from the
- input. */
- int seekerr = CURL_SEEKFUNC_CANTSEEK;
- if(conn->seek_func) {
- Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
- Curl_set_in_callback(data, false);
- }
+ /* For really small posts we don't use Expect: headers at all, and for
+ the somewhat bigger ones we allow the app to disable it. Just make
+ sure that the expect100header is always set to the preferred value
+ here. */
+ ptr = Curl_checkheaders(conn, "Expect");
+ if(ptr) {
+ data->state.expect100header =
+ Curl_compareheader(ptr, "Expect:", "100-continue");
+ }
+ else if(http->postsize > EXPECT_100_THRESHOLD || http->postsize < 0) {
+ result = expect100(data, conn, r);
+ if(result)
+ return result;
+ }
+ else
+ data->state.expect100header = FALSE;
- if(seekerr != CURL_SEEKFUNC_OK) {
- curl_off_t passed = 0;
+#ifndef USE_HYPER
+ /* With Hyper the body is always passed on separately */
+ if(data->set.postfields) {
- if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
- failf(data, "Could not seek stream");
- return CURLE_READ_ERROR;
- }
- /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
- do {
- size_t readthisamountnow =
- (data->state.resume_from - passed > data->set.buffer_size) ?
- (size_t)data->set.buffer_size :
- curlx_sotouz(data->state.resume_from - passed);
+ /* In HTTP2, we send request body in DATA frame regardless of
+ its size. */
+ if(conn->httpversion != 20 &&
+ !data->state.expect100header &&
+ (http->postsize < MAX_INITIAL_POST_SIZE)) {
+ /* if we don't use expect: 100 AND
+ postsize is less than MAX_INITIAL_POST_SIZE
- size_t actuallyread =
- data->state.fread_func(data->state.buffer, 1, readthisamountnow,
- data->state.in);
+ then append the post data to the HTTP request header. This limit
+ is no magic limit but only set to prevent really huge POSTs to
+ get the data duplicated with malloc() and family. */
- passed += actuallyread;
- if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
- /* this checks for greater-than only to make sure that the
- CURL_READFUNC_ABORT return code still aborts */
- failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
- " bytes from the input", passed);
- return CURLE_READ_ERROR;
+ /* end of headers! */
+ result = Curl_dyn_add(r, "\r\n");
+ if(result)
+ return result;
+
+ if(!data->req.upload_chunky) {
+ /* We're not sending it 'chunked', append it to the request
+ already now to reduce the number if send() calls */
+ result = Curl_dyn_addn(r, data->set.postfields,
+ (size_t)http->postsize);
+ included_body = http->postsize;
+ }
+ else {
+ if(http->postsize) {
+ char chunk[16];
+ /* Append the POST data chunky-style */
+ msnprintf(chunk, sizeof(chunk), "%x\r\n", (int)http->postsize);
+ result = Curl_dyn_add(r, chunk);
+ if(!result) {
+ included_body = http->postsize + strlen(chunk);
+ result = Curl_dyn_addn(r, data->set.postfields,
+ (size_t)http->postsize);
+ if(!result)
+ result = Curl_dyn_add(r, "\r\n");
+ included_body += 2;
+ }
}
- } while(passed < data->state.resume_from);
+ if(!result) {
+ result = Curl_dyn_add(r, "\x30\x0d\x0a\x0d\x0a");
+ /* 0 CR LF CR LF */
+ included_body += 5;
+ }
+ }
+ if(result)
+ return result;
+ /* Make sure the progress information is accurate */
+ Curl_pgrsSetUploadSize(data, http->postsize);
}
+ else {
+ /* A huge POST coming up, do data separate from the request */
+ http->postdata = data->set.postfields;
- /* now, decrease the size of the read */
- if(data->state.infilesize>0) {
- data->state.infilesize -= data->state.resume_from;
+ http->sending = HTTPSEND_BODY;
- if(data->state.infilesize <= 0) {
- failf(data, "File already completely uploaded");
- return CURLE_PARTIAL_FILE;
- }
+ data->state.fread_func = (curl_read_callback)readmoredata;
+ data->state.in = (void *)conn;
+
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, http->postsize);
+
+ /* end of headers! */
+ result = Curl_dyn_add(r, "\r\n");
+ if(result)
+ return result;
}
- /* we've passed, proceed as normal */
- }
- }
- if(data->state.use_range) {
- /*
- * A range is selected. We use different headers whether we're downloading
- * or uploading and we always let customized headers override our internal
- * ones if any such are specified.
- */
- if(((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) &&
- !Curl_checkheaders(conn, "Range")) {
- /* if a line like this was already allocated, free the previous one */
- free(data->state.aptr.rangeline);
- data->state.aptr.rangeline = aprintf("Range: bytes=%s\r\n",
- data->state.range);
}
- else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
- !Curl_checkheaders(conn, "Content-Range")) {
+ else
+#endif
+ {
+ /* end of headers! */
+ result = Curl_dyn_add(r, "\r\n");
+ if(result)
+ return result;
- /* if a line like this was already allocated, free the previous one */
- free(data->state.aptr.rangeline);
+ if(data->req.upload_chunky && conn->bits.authneg) {
+ /* Chunky upload is selected and we're negotiating auth still, send
+ end-of-data only */
+ result = Curl_dyn_add(r, (char *)"\x30\x0d\x0a\x0d\x0a");
+ /* 0 CR LF CR LF */
+ if(result)
+ return result;
+ }
- if(data->set.set_resume_from < 0) {
- /* Upload resume was asked for, but we don't know the size of the
- remote part so we tell the server (and act accordingly) that we
- upload the whole file (again) */
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
- "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.infilesize - 1, data->state.infilesize);
+ else if(data->state.infilesize) {
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, http->postsize?http->postsize:-1);
+ /* set the pointer to mark that we will send the post body using the
+ read callback, but only if we're not in authenticate negotiation */
+ if(!conn->bits.authneg)
+ http->postdata = (char *)&http->postdata;
}
- else if(data->state.resume_from) {
- /* This is because "resume" was selected */
- curl_off_t total_expected_size =
- data->state.resume_from + data->state.infilesize;
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
- "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, total_expected_size-1,
- total_expected_size);
- }
- else {
- /* Range was selected and then we just pass the incoming range and
- append total size */
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, data->state.infilesize);
- }
- if(!data->state.aptr.rangeline)
- return CURLE_OUT_OF_MEMORY;
}
- }
-
- httpstring = get_http_string(data, conn);
-
- /* initialize a dynamic send-buffer */
- Curl_dyn_init(&req, DYN_HTTP_REQUEST);
-
- /* add the main request stuff */
- /* GET/HEAD/POST/PUT */
- result = Curl_dyn_addf(&req, "%s ", request);
- if(result)
- return result;
+ /* issue the request */
+ result = Curl_buffer_send(r, conn, &data->info.request_size,
+ (size_t)included_body, FIRSTSOCKET);
- if(data->set.str[STRING_TARGET]) {
- path = data->set.str[STRING_TARGET];
- query = NULL;
- }
+ if(result)
+ failf(data, "Failed sending HTTP POST request");
+ else
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
+ http->postdata?FIRSTSOCKET:-1);
+ break;
-#ifndef CURL_DISABLE_PROXY
- /* url */
- if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- char *url = data->set.str[STRING_TEMP_URL];
- result = Curl_dyn_add(&req, url);
- Curl_safefree(data->set.str[STRING_TEMP_URL]);
- }
- else
-#endif
- if(paste_ftp_userpwd)
- result = Curl_dyn_addf(&req, "ftp://%s:%s@%s", conn->user, conn->passwd,
- path + sizeof("ftp://") - 1);
- else {
- result = Curl_dyn_add(&req, path);
+ default:
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
- if(query)
- result = Curl_dyn_addf(&req, "?%s", query);
- }
- if(result)
- return result;
-
-#ifndef CURL_DISABLE_ALTSVC
- if(conn->bits.altused && !Curl_checkheaders(conn, "Alt-Used")) {
- altused = aprintf("Alt-Used: %s:%d\r\n",
- conn->conn_to_host.name, conn->conn_to_port);
- if(!altused) {
- Curl_dyn_free(&req);
- return CURLE_OUT_OF_MEMORY;
- }
- }
-#endif
- result =
- Curl_dyn_addf(&req,
- "%s" /* ftp typecode (;type=x) */
- " HTTP/%s\r\n" /* HTTP version */
- "%s" /* host */
- "%s" /* proxyuserpwd */
- "%s" /* userpwd */
- "%s" /* range */
- "%s" /* user agent */
- "%s" /* accept */
- "%s" /* TE: */
- "%s" /* accept-encoding */
- "%s" /* referer */
- "%s" /* Proxy-Connection */
- "%s" /* transfer-encoding */
- "%s",/* Alt-Used */
-
- ftp_typecode,
- httpstring,
- (data->state.aptr.host?data->state.aptr.host:""),
- data->state.aptr.proxyuserpwd?
- data->state.aptr.proxyuserpwd:"",
- data->state.aptr.userpwd?data->state.aptr.userpwd:"",
- (data->state.use_range && data->state.aptr.rangeline)?
- data->state.aptr.rangeline:"",
- (data->set.str[STRING_USERAGENT] &&
- *data->set.str[STRING_USERAGENT] &&
- data->state.aptr.uagent)?
- data->state.aptr.uagent:"",
- http->p_accept?http->p_accept:"",
- data->state.aptr.te?data->state.aptr.te:"",
- (data->set.str[STRING_ENCODING] &&
- *data->set.str[STRING_ENCODING] &&
- data->state.aptr.accept_encoding)?
- data->state.aptr.accept_encoding:"",
- (data->change.referer && data->state.aptr.ref)?
- data->state.aptr.ref:"" /* Referer: <data> */,
-#ifndef CURL_DISABLE_PROXY
- (conn->bits.httpproxy &&
- !conn->bits.tunnel_proxy &&
- !Curl_checkProxyheaders(conn, "Proxy-Connection"))?
- "Proxy-Connection: Keep-Alive\r\n":"",
-#else
- "",
-#endif
- te,
- altused ? altused : ""
- );
-
- /* clear userpwd and proxyuserpwd to avoid re-using old credentials
- * from re-used connections */
- Curl_safefree(data->state.aptr.userpwd);
- Curl_safefree(data->state.aptr.proxyuserpwd);
- free(altused);
- if(result)
- return result;
+ /* issue the request */
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
+ FIRSTSOCKET);
- if(!(conn->handler->flags&PROTOPT_SSL) &&
- conn->httpversion != 20 &&
- (data->set.httpversion == CURL_HTTP_VERSION_2)) {
- /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
- over SSL */
- result = Curl_http2_request_upgrade(&req, conn);
if(result)
- return result;
+ failf(data, "Failed sending HTTP request");
+ else
+ /* HTTP GET/HEAD download: */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
}
+ return result;
+}
+
#if !defined(CURL_DISABLE_COOKIES)
+CURLcode Curl_http_cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r)
+{
+ CURLcode result = CURLE_OK;
+ char *addcookies = NULL;
+ if(data->set.str[STRING_COOKIE] && !Curl_checkheaders(conn, "Cookie"))
+ addcookies = data->set.str[STRING_COOKIE];
+
if(data->cookies || addcookies) {
struct Cookie *co = NULL; /* no cookies from start */
int count = 0;
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
co = Curl_cookie_getlist(data->cookies,
data->state.aptr.cookiehost?
- data->state.aptr.cookiehost:host,
+ data->state.aptr.cookiehost:
+ conn->host.name,
data->state.up.path,
(conn->handler->protocol&CURLPROTO_HTTPS)?
TRUE:FALSE);
while(co) {
if(co->value) {
if(0 == count) {
- result = Curl_dyn_add(&req, "Cookie: ");
+ result = Curl_dyn_add(r, "Cookie: ");
if(result)
break;
}
- result = Curl_dyn_addf(&req, "%s%s=%s", count?"; ":"",
+ result = Curl_dyn_addf(r, "%s%s=%s", count?"; ":"",
co->name, co->value);
if(result)
break;
}
if(addcookies && !result) {
if(!count)
- result = Curl_dyn_add(&req, "Cookie: ");
+ result = Curl_dyn_add(r, "Cookie: ");
if(!result) {
- result = Curl_dyn_addf(&req, "%s%s", count?"; ":"", addcookies);
+ result = Curl_dyn_addf(r, "%s%s", count?"; ":"", addcookies);
count++;
}
}
if(count && !result)
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
}
+ return result;
+}
#endif
- result = Curl_add_timecondition(conn, &req);
- if(result)
- return result;
-
- result = Curl_add_custom_headers(conn, FALSE, &req);
- if(result)
- return result;
-
- http->postdata = NULL; /* nothing to post at this point */
- Curl_pgrsSetUploadSize(data, -1); /* upload size is unknown atm */
-
- /* If 'authdone' is FALSE, we must not set the write socket index to the
- Curl_transfer() call below, as we're not ready to actually upload any
- data yet. */
-
- switch(httpreq) {
+CURLcode Curl_http_range(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq)
+{
+ if(data->state.use_range) {
+ /*
+ * A range is selected. We use different headers whether we're downloading
+ * or uploading and we always let customized headers override our internal
+ * ones if any such are specified.
+ */
+ if(((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) &&
+ !Curl_checkheaders(conn, "Range")) {
+ /* if a line like this was already allocated, free the previous one */
+ free(data->state.aptr.rangeline);
+ data->state.aptr.rangeline = aprintf("Range: bytes=%s\r\n",
+ data->state.range);
+ }
+ else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
+ !Curl_checkheaders(conn, "Content-Range")) {
- case HTTPREQ_PUT: /* Let's PUT the data to the server! */
+ /* if a line like this was already allocated, free the previous one */
+ free(data->state.aptr.rangeline);
- if(conn->bits.authneg)
- postsize = 0;
- else
- postsize = data->state.infilesize;
+ if(data->set.set_resume_from < 0) {
+ /* Upload resume was asked for, but we don't know the size of the
+ remote part so we tell the server (and act accordingly) that we
+ upload the whole file (again) */
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
+ "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.infilesize - 1, data->state.infilesize);
- if((postsize != -1) && !data->req.upload_chunky &&
- (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
- /* only add Content-Length if not uploading chunked */
- result = Curl_dyn_addf(&req, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
- if(result)
- return result;
+ }
+ else if(data->state.resume_from) {
+ /* This is because "resume" was selected */
+ curl_off_t total_expected_size =
+ data->state.resume_from + data->state.infilesize;
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
+ "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.range, total_expected_size-1,
+ total_expected_size);
+ }
+ else {
+ /* Range was selected and then we just pass the incoming range and
+ append total size */
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.range, data->state.infilesize);
+ }
+ if(!data->state.aptr.rangeline)
+ return CURLE_OUT_OF_MEMORY;
}
+ }
+ return CURLE_OK;
+}
- if(postsize != 0) {
- result = expect100(data, conn, &req);
- if(result)
- return result;
+CURLcode Curl_http_resume(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq)
+{
+ if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
+ data->state.resume_from) {
+ /**********************************************************************
+ * Resuming upload in HTTP means that we PUT or POST and that we have
+ * got a resume_from value set. The resume value has already created
+ * a Range: header that will be passed along. We need to "fast forward"
+ * the file the given number of bytes and decrease the assume upload
+ * file size before we continue this venture in the dark lands of HTTP.
+ * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
+ *********************************************************************/
+
+ if(data->state.resume_from < 0) {
+ /*
+ * This is meant to get the size of the present remote-file by itself.
+ * We don't support this now. Bail out!
+ */
+ data->state.resume_from = 0;
}
- /* end of headers */
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
- return result;
+ if(data->state.resume_from && !data->state.this_is_a_follow) {
+ /* do we still game? */
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize);
+ /* Now, let's read off the proper amount of bytes from the
+ input. */
+ int seekerr = CURL_SEEKFUNC_CANTSEEK;
+ if(conn->seek_func) {
+ Curl_set_in_callback(data, true);
+ seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
+ SEEK_SET);
+ Curl_set_in_callback(data, false);
+ }
- /* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending PUT request");
- else
- /* prepare for transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- postsize?FIRSTSOCKET:-1);
- if(result)
- return result;
- break;
+ if(seekerr != CURL_SEEKFUNC_OK) {
+ curl_off_t passed = 0;
- case HTTPREQ_POST_FORM:
- case HTTPREQ_POST_MIME:
- /* This is form posting using mime data. */
- if(conn->bits.authneg) {
- /* nothing to post! */
- result = Curl_dyn_add(&req, "Content-Length: 0\r\n\r\n");
- if(result)
- return result;
+ if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
+ failf(data, "Could not seek stream");
+ return CURLE_READ_ERROR;
+ }
+ /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ do {
+ size_t readthisamountnow =
+ (data->state.resume_from - passed > data->set.buffer_size) ?
+ (size_t)data->set.buffer_size :
+ curlx_sotouz(data->state.resume_from - passed);
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending POST request");
- else
- /* setup variables for the upcoming transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
- break;
+ size_t actuallyread =
+ data->state.fread_func(data->state.buffer, 1, readthisamountnow,
+ data->state.in);
+
+ passed += actuallyread;
+ if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
+ /* this checks for greater-than only to make sure that the
+ CURL_READFUNC_ABORT return code still aborts */
+ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
+ " bytes from the input", passed);
+ return CURLE_READ_ERROR;
+ }
+ } while(passed < data->state.resume_from);
+ }
+
+ /* now, decrease the size of the read */
+ if(data->state.infilesize>0) {
+ data->state.infilesize -= data->state.resume_from;
+
+ if(data->state.infilesize <= 0) {
+ failf(data, "File already completely uploaded");
+ return CURLE_PARTIAL_FILE;
+ }
+ }
+ /* we've passed, proceed as normal */
+ }
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_http_firstwrite(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done)
+{
+ struct SingleRequest *k = &data->req;
+ DEBUGASSERT(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP));
+ if(data->req.newurl) {
+ if(conn->bits.close) {
+ /* Abort after the headers if "follow Location" is set
+ and we're set to close anyway. */
+ k->keepon &= ~KEEP_RECV;
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ /* We have a new url to load, but since we want to be able to re-use this
+ connection properly, we read the full response in "ignore more" */
+ k->ignorebody = TRUE;
+ infof(data, "Ignoring the response-body\n");
+ }
+ if(data->state.resume_from && !k->content_range &&
+ (data->state.httpreq == HTTPREQ_GET) &&
+ !k->ignorebody) {
+
+ if(k->size == data->state.resume_from) {
+ /* The resume point is at the end of file, consider this fine even if it
+ doesn't allow resume from here. */
+ infof(data, "The entire document is already downloaded");
+ connclose(conn, "already downloaded");
+ /* Abort download */
+ k->keepon &= ~KEEP_RECV;
+ *done = TRUE;
+ return CURLE_OK;
}
- data->state.infilesize = postsize = http->postsize;
+ /* we wanted to resume a download, although the server doesn't seem to
+ * support this and we did this with a GET (if it wasn't a GET we did a
+ * POST or PUT resume) */
+ failf(data, "HTTP server doesn't seem to support "
+ "byte ranges. Cannot resume.");
+ return CURLE_RANGE_ERROR;
+ }
- /* We only set Content-Length and allow a custom Content-Length if
- we don't upload data chunked, as RFC2616 forbids us to set both
- kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if(postsize != -1 && !data->req.upload_chunky &&
- (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
- /* we allow replacing this header if not during auth negotiation,
- although it isn't very wise to actually set your own */
- result = Curl_dyn_addf(&req,
- "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
- if(result)
- return result;
+ if(data->set.timecondition && !data->state.range) {
+ /* A time condition has been set AND no ranges have been requested. This
+ seems to be what chapter 13.3.4 of RFC 2616 defines to be the correct
+ action for a HTTP/1.1 client */
+
+ if(!Curl_meets_timecondition(data, k->timeofdoc)) {
+ *done = TRUE;
+ /* We're simulating a http 304 from server so we return
+ what should have been returned from the server */
+ data->info.httpcode = 304;
+ infof(data, "Simulate a HTTP 304 response!\n");
+ /* we abort the transfer before it is completed == we ruin the
+ re-use ability. Close the connection */
+ connclose(conn, "Simulated 304 handling");
+ return CURLE_OK;
}
+ } /* we have a time condition */
-#ifndef CURL_DISABLE_MIME
- /* Output mime-generated headers. */
- {
- struct curl_slist *hdr;
+ return CURLE_OK;
+}
- for(hdr = http->sendit->curlheaders; hdr; hdr = hdr->next) {
- result = Curl_dyn_addf(&req, "%s\r\n", hdr->data);
+#ifndef USE_HYPER
+/*
+ * Curl_http() gets called from the generic multi_do() function when a HTTP
+ * request is to be performed. This creates and sends a properly constructed
+ * HTTP request.
+ */
+CURLcode Curl_http(struct connectdata *conn, bool *done)
+{
+ struct Curl_easy *data = conn->data;
+ CURLcode result = CURLE_OK;
+ struct HTTP *http;
+ Curl_HttpReq httpreq;
+ const char *te = ""; /* transfer-encoding */
+ const char *request;
+ const char *httpstring;
+ struct dynbuf req;
+ char *altused = NULL;
+ const char *p_accept; /* Accept: string */
+
+ /* Always consider the DO phase done after this function call, even if there
+ may be parts of the request that is not yet sent, since we can deal with
+ the rest of the request in the PERFORM phase. */
+ *done = TRUE;
+
+ if(conn->transport != TRNSPRT_QUIC) {
+ if(conn->httpversion < 20) { /* unless the connection is re-used and
+ already http2 */
+ switch(conn->negnpn) {
+ case CURL_HTTP_VERSION_2:
+ conn->httpversion = 20; /* we know we're on HTTP/2 now */
+
+ result = Curl_http2_switched(conn, NULL, 0);
if(result)
return result;
- }
- }
+ break;
+ case CURL_HTTP_VERSION_1_1:
+ /* continue with HTTP/1.1 when explicitly requested */
+ break;
+ default:
+ /* Check if user wants to use HTTP/2 with clear TCP*/
+#ifdef USE_NGHTTP2
+ if(conn->data->set.httpversion ==
+ CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
+#ifndef CURL_DISABLE_PROXY
+ if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
+ /* We don't support HTTP/2 proxies yet. Also it's debatable
+ whether or not this setting should apply to HTTP/2 proxies. */
+ infof(data, "Ignoring HTTP/2 prior knowledge due to proxy\n");
+ break;
+ }
#endif
+ DEBUGF(infof(data, "HTTP/2 over clean TCP\n"));
+ conn->httpversion = 20;
- /* For really small posts we don't use Expect: headers at all, and for
- the somewhat bigger ones we allow the app to disable it. Just make
- sure that the expect100header is always set to the preferred value
- here. */
- ptr = Curl_checkheaders(conn, "Expect");
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, "Expect:", "100-continue");
+ result = Curl_http2_switched(conn, NULL, 0);
+ if(result)
+ return result;
+ }
+#endif
+ break;
+ }
}
- else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
- result = expect100(data, conn, &req);
+ else {
+ /* prepare for a http2 request */
+ result = Curl_http2_setup(conn);
if(result)
return result;
}
- else
- data->state.expect100header = FALSE;
+ }
+ http = data->req.p.http;
+ DEBUGASSERT(http);
- /* make the request end in a true CRLF */
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
- return result;
+ result = Curl_http_host(data, conn);
+ if(result)
+ return result;
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize);
+ result = Curl_http_useragent(data, conn);
+ if(result)
+ return result;
- /* Read from mime structure. */
- data->state.fread_func = (curl_read_callback) Curl_mime_read;
- data->state.in = (void *) http->sendit;
- http->sending = HTTPSEND_BODY;
+ Curl_http_method(data, conn, &request, &httpreq);
- /* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending POST request");
- else
- /* prepare for transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- postsize?FIRSTSOCKET:-1);
+ /* setup the authentication headers */
+ {
+ char *pq = NULL;
+ if(data->state.up.query) {
+ pq = aprintf("%s?%s", data->state.up.path, data->state.up.query);
+ if(!pq)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ result = Curl_http_output_auth(conn, request,
+ (pq ? pq : data->state.up.path), FALSE);
+ free(pq);
if(result)
return result;
+ }
- break;
-
- case HTTPREQ_POST:
- /* this is the simple POST, using x-www-form-urlencoded style */
+ if(((data->state.authhost.multipass && !data->state.authhost.done)
+ || (data->state.authproxy.multipass && !data->state.authproxy.done)) &&
+ (httpreq != HTTPREQ_GET) &&
+ (httpreq != HTTPREQ_HEAD)) {
+ /* Auth is required and we are not authenticated yet. Make a PUT or POST
+ with content-length zero as a "probe". */
+ conn->bits.authneg = TRUE;
+ }
+ else
+ conn->bits.authneg = FALSE;
- if(conn->bits.authneg)
- postsize = 0;
- else
- /* the size of the post body */
- postsize = data->state.infilesize;
+ Curl_safefree(data->state.aptr.ref);
+ if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
+ data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
+ if(!data->state.aptr.ref)
+ return CURLE_OUT_OF_MEMORY;
+ }
- /* We only set Content-Length and allow a custom Content-Length if
- we don't upload data chunked, as RFC2616 forbids us to set both
- kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if((postsize != -1) && !data->req.upload_chunky &&
- (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
- /* we allow replacing this header if not during auth negotiation,
- although it isn't very wise to actually set your own */
- result = Curl_dyn_addf(&req, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
- if(result)
- return result;
- }
+ if(!Curl_checkheaders(conn, "Accept-Encoding") &&
+ data->set.str[STRING_ENCODING]) {
+ Curl_safefree(data->state.aptr.accept_encoding);
+ data->state.aptr.accept_encoding =
+ aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
+ if(!data->state.aptr.accept_encoding)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else {
+ Curl_safefree(data->state.aptr.accept_encoding);
+ data->state.aptr.accept_encoding = NULL;
+ }
- if(!Curl_checkheaders(conn, "Content-Type")) {
- result = Curl_dyn_add(&req, "Content-Type: application/"
- "x-www-form-urlencoded\r\n");
- if(result)
- return result;
- }
+#ifdef HAVE_LIBZ
+ /* we only consider transfer-encoding magic if libz support is built-in */
- /* For really small posts we don't use Expect: headers at all, and for
- the somewhat bigger ones we allow the app to disable it. Just make
- sure that the expect100header is always set to the preferred value
- here. */
- ptr = Curl_checkheaders(conn, "Expect");
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, "Expect:", "100-continue");
- }
- else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
- result = expect100(data, conn, &req);
- if(result)
- return result;
- }
- else
- data->state.expect100header = FALSE;
+ if(!Curl_checkheaders(conn, "TE") &&
+ data->set.http_transfer_encoding) {
+ /* When we are to insert a TE: header in the request, we must also insert
+ TE in a Connection: header, so we need to merge the custom provided
+ Connection: header and prevent the original to get sent. Note that if
+ the user has inserted his/hers own TE: header we don't do this magic
+ but then assume that the user will handle it all! */
+ char *cptr = Curl_checkheaders(conn, "Connection");
+#define TE_HEADER "TE: gzip\r\n"
- if(data->set.postfields) {
+ Curl_safefree(data->state.aptr.te);
- /* In HTTP2, we send request body in DATA frame regardless of
- its size. */
- if(conn->httpversion != 20 &&
- !data->state.expect100header &&
- (postsize < MAX_INITIAL_POST_SIZE)) {
- /* if we don't use expect: 100 AND
- postsize is less than MAX_INITIAL_POST_SIZE
+ if(cptr) {
+ cptr = Curl_copy_header_value(cptr);
+ if(!cptr)
+ return CURLE_OUT_OF_MEMORY;
+ }
- then append the post data to the HTTP request header. This limit
- is no magic limit but only set to prevent really huge POSTs to
- get the data duplicated with malloc() and family. */
+ /* Create the (updated) Connection: header */
+ data->state.aptr.te = aprintf("Connection: %s%sTE\r\n" TE_HEADER,
+ cptr ? cptr : "", (cptr && *cptr) ? ", ":"");
- /* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
- return result;
+ free(cptr);
+ if(!data->state.aptr.te)
+ return CURLE_OUT_OF_MEMORY;
+ }
+#endif
- if(!data->req.upload_chunky) {
- /* We're not sending it 'chunked', append it to the request
- already now to reduce the number if send() calls */
- result = Curl_dyn_addn(&req, data->set.postfields,
- (size_t)postsize);
- included_body = postsize;
- }
- else {
- if(postsize) {
- char chunk[16];
- /* Append the POST data chunky-style */
- msnprintf(chunk, sizeof(chunk), "%x\r\n", (int)postsize);
- result = Curl_dyn_add(&req, chunk);
- if(!result) {
- included_body = postsize + strlen(chunk);
- result = Curl_dyn_addn(&req, data->set.postfields,
- (size_t)postsize);
- if(!result)
- result = Curl_dyn_add(&req, "\r\n");
- included_body += 2;
- }
- }
- if(!result) {
- result = Curl_dyn_add(&req, "\x30\x0d\x0a\x0d\x0a");
- /* 0 CR LF CR LF */
- included_body += 5;
- }
- }
- if(result)
- return result;
- /* Make sure the progress information is accurate */
- Curl_pgrsSetUploadSize(data, postsize);
- }
- else {
- /* A huge POST coming up, do data separate from the request */
- http->postsize = postsize;
- http->postdata = data->set.postfields;
+ result = Curl_http_body(data, conn, httpreq, &te);
+ if(result)
+ return result;
- http->sending = HTTPSEND_BODY;
+ p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
- data->state.fread_func = (curl_read_callback)readmoredata;
- data->state.in = (void *)conn;
+ result = Curl_http_resume(data, conn, httpreq);
+ if(result)
+ return result;
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, http->postsize);
+ result = Curl_http_range(data, conn, httpreq);
+ if(result)
+ return result;
- /* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
- return result;
- }
- }
- else {
- /* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
- return result;
+ httpstring = get_http_string(data, conn);
- if(data->req.upload_chunky && conn->bits.authneg) {
- /* Chunky upload is selected and we're negotiating auth still, send
- end-of-data only */
- result = Curl_dyn_add(&req, (char *)"\x30\x0d\x0a\x0d\x0a");
- /* 0 CR LF CR LF */
- if(result)
- return result;
- }
+ /* initialize a dynamic send-buffer */
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
- else if(data->state.infilesize) {
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize?postsize:-1);
+ /* add the main request stuff */
+ /* GET/HEAD/POST/PUT */
+ result = Curl_dyn_addf(&req, "%s ", request);
+ if(!result)
+ result = Curl_http_target(data, conn, &req);
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
- /* set the pointer to mark that we will send the post body using the
- read callback, but only if we're not in authenticate
- negotiation */
- if(!conn->bits.authneg) {
- http->postdata = (char *)&http->postdata;
- http->postsize = postsize;
- }
- }
+#ifndef CURL_DISABLE_ALTSVC
+ if(conn->bits.altused && !Curl_checkheaders(conn, "Alt-Used")) {
+ altused = aprintf("Alt-Used: %s:%d\r\n",
+ conn->conn_to_host.name, conn->conn_to_port);
+ if(!altused) {
+ Curl_dyn_free(&req);
+ return CURLE_OUT_OF_MEMORY;
}
- /* issue the request */
- result = Curl_buffer_send(&req, conn, &data->info.request_size,
- (size_t)included_body, FIRSTSOCKET);
+ }
+#endif
+ result =
+ Curl_dyn_addf(&req,
+ " HTTP/%s\r\n" /* HTTP version */
+ "%s" /* host */
+ "%s" /* proxyuserpwd */
+ "%s" /* userpwd */
+ "%s" /* range */
+ "%s" /* user agent */
+ "%s" /* accept */
+ "%s" /* TE: */
+ "%s" /* accept-encoding */
+ "%s" /* referer */
+ "%s" /* Proxy-Connection */
+ "%s" /* transfer-encoding */
+ "%s",/* Alt-Used */
- if(result)
- failf(data, "Failed sending HTTP POST request");
- else
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- http->postdata?FIRSTSOCKET:-1);
- break;
+ httpstring,
+ (data->state.aptr.host?data->state.aptr.host:""),
+ data->state.aptr.proxyuserpwd?
+ data->state.aptr.proxyuserpwd:"",
+ data->state.aptr.userpwd?data->state.aptr.userpwd:"",
+ (data->state.use_range && data->state.aptr.rangeline)?
+ data->state.aptr.rangeline:"",
+ (data->set.str[STRING_USERAGENT] &&
+ *data->set.str[STRING_USERAGENT] &&
+ data->state.aptr.uagent)?
+ data->state.aptr.uagent:"",
+ p_accept?p_accept:"",
+ data->state.aptr.te?data->state.aptr.te:"",
+ (data->set.str[STRING_ENCODING] &&
+ *data->set.str[STRING_ENCODING] &&
+ data->state.aptr.accept_encoding)?
+ data->state.aptr.accept_encoding:"",
+ (data->change.referer && data->state.aptr.ref)?
+ data->state.aptr.ref:"" /* Referer: <data> */,
+#ifndef CURL_DISABLE_PROXY
+ (conn->bits.httpproxy &&
+ !conn->bits.tunnel_proxy &&
+ !Curl_checkProxyheaders(conn, "Proxy-Connection"))?
+ "Proxy-Connection: Keep-Alive\r\n":"",
+#else
+ "",
+#endif
+ te,
+ altused ? altused : ""
+ );
+
+ /* clear userpwd and proxyuserpwd to avoid re-using old credentials
+ * from re-used connections */
+ Curl_safefree(data->state.aptr.userpwd);
+ Curl_safefree(data->state.aptr.proxyuserpwd);
+ free(altused);
- default:
- result = Curl_dyn_add(&req, "\r\n");
- if(result)
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
+
+ if(!(conn->handler->flags&PROTOPT_SSL) &&
+ conn->httpversion != 20 &&
+ (data->set.httpversion == CURL_HTTP_VERSION_2)) {
+ /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
+ over SSL */
+ result = Curl_http2_request_upgrade(&req, conn);
+ if(result) {
+ Curl_dyn_free(&req);
return result;
+ }
+ }
- /* issue the request */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
- FIRSTSOCKET);
+ result = Curl_http_cookies(data, conn, &req);
+ if(!result)
+ result = Curl_add_timecondition(conn, &req);
+ if(!result)
+ result = Curl_add_custom_headers(conn, FALSE, &req);
- if(result)
- failf(data, "Failed sending HTTP request");
- else
- /* HTTP GET/HEAD download: */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
+ if(!result) {
+ http->postdata = NULL; /* nothing to post at this point */
+ if((httpreq == HTTPREQ_GET) ||
+ (httpreq == HTTPREQ_HEAD))
+ Curl_pgrsSetUploadSize(data, 0); /* nothing */
+
+ /* bodysend takes ownership of the 'req' memory on success */
+ result = Curl_http_bodysend(data, conn, &req, httpreq);
}
- if(result)
+ if(result) {
+ Curl_dyn_free(&req);
return result;
- if(!postsize && (http->sending != HTTPSEND_REQUEST))
+ }
+
+ if((http->postsize > -1) &&
+ (http->postsize <= data->req.writebytecount) &&
+ (http->sending != HTTPSEND_REQUEST))
data->req.upload_done = TRUE;
if(data->req.writebytecount) {
if(Curl_pgrsUpdate(conn))
result = CURLE_ABORTED_BY_CALLBACK;
- if(data->req.writebytecount >= postsize) {
+ if(!http->postsize) {
/* already sent the entire request body, mark the "upload" as
complete */
infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
" out of %" CURL_FORMAT_CURL_OFF_T " bytes\n",
- data->req.writebytecount, postsize);
+ data->req.writebytecount, http->postsize);
data->req.upload_done = TRUE;
data->req.keepon &= ~KEEP_SEND; /* we're done writing */
data->req.exp100 = EXP100_SEND_DATA; /* already sent */
return result;
}
+#endif /* USE_HYPER */
+
typedef enum {
STATUS_UNKNOWN, /* not enough data to tell yet */
STATUS_DONE, /* a status line was read */
static void print_http_error(struct Curl_easy *data)
{
struct SingleRequest *k = &data->req;
- char *beg = Curl_dyn_ptr(&data->state.headerb);
-
- /* make sure that data->req.p points to the HTTP status line */
- if(!strncmp(beg, "HTTP", 4)) {
-
- /* skip to HTTP status code */
- beg = strchr(beg, ' ');
- if(beg && *++beg) {
-
- /* find trailing CR */
- char end_char = '\r';
- char *end = strchr(beg, end_char);
- if(!end) {
- /* try to find LF (workaround for non-compliant HTTP servers) */
- end_char = '\n';
- end = strchr(beg, end_char);
+ failf(data, "The requested URL returned error: %d", k->httpcode);
+}
+
+/*
+ * Curl_http_header() parses a single response header.
+ */
+CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
+ char *headp)
+{
+ CURLcode result;
+ struct SingleRequest *k = &data->req;
+ /* Check for Content-Length: header lines to get size */
+ if(!k->http_bodyless &&
+ !data->set.ignorecl && checkprefix("Content-Length:", headp)) {
+ curl_off_t contentlength;
+ CURLofft offt = curlx_strtoofft(headp + 15, NULL, 10, &contentlength);
+
+ if(offt == CURL_OFFT_OK) {
+ if(data->set.max_filesize &&
+ contentlength > data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
+ }
+ k->size = contentlength;
+ k->maxdownload = k->size;
+ /* we set the progress download size already at this point
+ just to make it easier for apps/callbacks to extract this
+ info as soon as possible */
+ Curl_pgrsSetDownloadSize(data, k->size);
+ }
+ else if(offt == CURL_OFFT_FLOW) {
+ /* out of range */
+ if(data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
+ }
+ streamclose(conn, "overflow content-length");
+ infof(data, "Overflow Content-Length: value!\n");
+ }
+ else {
+ /* negative or just rubbish - bad HTTP */
+ failf(data, "Invalid Content-Length: value");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
+ }
+ /* check for Content-Type: header lines to get the MIME-type */
+ else if(checkprefix("Content-Type:", headp)) {
+ char *contenttype = Curl_copy_header_value(headp);
+ if(!contenttype)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*contenttype)
+ /* ignore empty data */
+ free(contenttype);
+ else {
+ Curl_safefree(data->info.contenttype);
+ data->info.contenttype = contenttype;
+ }
+ }
+#ifndef CURL_DISABLE_PROXY
+ else if((conn->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(headp, "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
+ }
+ else if((conn->httpversion == 11) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(headp, "Proxy-Connection:", "close")) {
+ /*
+ * We get a HTTP/1.1 response from a proxy and it says it'll
+ * close down after this transfer.
+ */
+ connclose(conn, "Proxy-Connection: asked to close after done");
+ infof(data, "HTTP/1.1 proxy connection set close!\n");
+ }
+#endif
+ else if((conn->httpversion == 10) &&
+ Curl_compareheader(headp, "Connection:", "keep-alive")) {
+ /*
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ connkeep(conn, "Connection keep-alive");
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if(Curl_compareheader(headp, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ streamclose(conn, "Connection: close used");
+ }
+ else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", headp)) {
+ /* One or more encodings. We check for chunked and/or a compression
+ algorithm. */
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+
+ result = Curl_build_unencoding_stack(conn, headp + 18, TRUE);
+ if(result)
+ return result;
+ }
+ else if(!k->http_bodyless && checkprefix("Content-Encoding:", headp) &&
+ data->set.str[STRING_ENCODING]) {
+ /*
+ * Process Content-Encoding. Look for the values: identity,
+ * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
+ * x-compress are the same as gzip and compress. (Sec 3.5 RFC
+ * 2616). zlib cannot handle compress. However, errors are
+ * handled further down when the response body is processed
+ */
+ result = Curl_build_unencoding_stack(conn, headp + 17, FALSE);
+ if(result)
+ return result;
+ }
+ else if(checkprefix("Retry-After:", headp)) {
+ /* Retry-After = HTTP-date / delay-seconds */
+ curl_off_t retry_after = 0; /* zero for unknown or "now" */
+ time_t date = Curl_getdate_capped(&headp[12]);
+ if(-1 == date) {
+ /* not a date, try it as a decimal number */
+ (void)curlx_strtoofft(&headp[12], NULL, 10, &retry_after);
+ }
+ else
+ /* convert date to number of seconds into the future */
+ retry_after = date - time(NULL);
+ data->info.retry_after = retry_after; /* store it */
+ }
+ else if(!k->http_bodyless && checkprefix("Content-Range:", headp)) {
+ /* Content-Range: bytes [num]-
+ Content-Range: bytes: [num]-
+ Content-Range: [num]-
+ Content-Range: [asterisk]/[total]
+
+ The second format was added since Sun's webserver
+ JavaWebServer/1.1.1 obviously sends the header this way!
+ The third added since some servers use that!
+ The forth means the requested range was unsatisfied.
+ */
+
+ char *ptr = headp + 14;
+
+ /* Move forward until first digit or asterisk */
+ while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
+ ptr++;
+
+ /* if it truly stopped on a digit */
+ if(ISDIGIT(*ptr)) {
+ if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
+ if(data->state.resume_from == k->offset)
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
}
+ }
+ else
+ data->state.resume_from = 0; /* get everything */
+ }
+#if !defined(CURL_DISABLE_COOKIES)
+ else if(data->cookies && data->state.cookie_engine &&
+ checkprefix("Set-Cookie:", headp)) {
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
+ CURL_LOCK_ACCESS_SINGLE);
+ Curl_cookie_add(data,
+ data->cookies, TRUE, FALSE, headp + 11,
+ /* If there is a custom-set Host: name, use it
+ here, or else use real peer host name. */
+ data->state.aptr.cookiehost?
+ data->state.aptr.cookiehost:conn->host.name,
+ data->state.up.path,
+ (conn->handler->protocol&CURLPROTO_HTTPS)?
+ TRUE:FALSE);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ }
+#endif
+ else if(!k->http_bodyless && checkprefix("Last-Modified:", headp) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ k->timeofdoc = Curl_getdate_capped(headp + strlen("Last-Modified:"));
+ if(data->set.get_filetime)
+ data->info.filetime = k->timeofdoc;
+ }
+ else if((checkprefix("WWW-Authenticate:", headp) &&
+ (401 == k->httpcode)) ||
+ (checkprefix("Proxy-authenticate:", headp) &&
+ (407 == k->httpcode))) {
+
+ bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
+ char *auth = Curl_copy_header_value(headp);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
+
+ result = Curl_http_input_auth(conn, proxy, auth);
+
+ free(auth);
+
+ if(result)
+ return result;
+ }
+#ifdef USE_SPNEGO
+ else if(checkprefix("Persistent-Auth", headp)) {
+ struct negotiatedata *negdata = &conn->negotiate;
+ struct auth *authp = &data->state.authhost;
+ if(authp->picked == CURLAUTH_NEGOTIATE) {
+ char *persistentauth = Curl_copy_header_value(headp);
+ if(!persistentauth)
+ return CURLE_OUT_OF_MEMORY;
+ negdata->noauthpersist = checkprefix("false", persistentauth)?
+ TRUE:FALSE;
+ negdata->havenoauthpersist = TRUE;
+ infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
+ negdata->noauthpersist, persistentauth);
+ free(persistentauth);
+ }
+ }
+#endif
+ else if((k->httpcode >= 300 && k->httpcode < 400) &&
+ checkprefix("Location:", headp) &&
+ !data->req.location) {
+ /* this is the URL that the server advises us to use instead */
+ char *location = Curl_copy_header_value(headp);
+ if(!location)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*location)
+ /* ignore empty data */
+ free(location);
+ else {
+ data->req.location = location;
- if(end) {
- /* temporarily replace CR or LF by NUL and print the error message */
- *end = '\0';
- failf(data, "The requested URL returned error: %s", beg);
+ if(data->set.http_follow_location) {
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->req.location); /* clone */
+ if(!data->req.newurl)
+ return CURLE_OUT_OF_MEMORY;
- /* restore the previously replaced CR or LF */
- *end = end_char;
- return;
+ /* some cases of POST and PUT etc needs to rewind the data
+ stream at this point */
+ result = http_perhapsrewind(conn);
+ if(result)
+ return result;
}
}
}
- /* fall-back to printing the HTTP status code only */
- failf(data, "The requested URL returned error: %d", k->httpcode);
+#ifdef USE_HSTS
+ /* If enabled, the header is incoming and this is over HTTPS */
+ else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
+ (conn->handler->flags & PROTOPT_SSL)) {
+ CURLcode check =
+ Curl_hsts_parse(data->hsts, data->state.up.hostname,
+ &headp[ sizeof("Strict-Transport-Security:") -1 ]);
+ if(check)
+ infof(data, "Illegal STS header skipped\n");
+#ifdef DEBUGBUILD
+ else
+ infof(data, "Parsed STS header fine (%zu entries)\n",
+ data->hsts->list.size);
+#endif
+ }
+#endif
+#ifndef CURL_DISABLE_ALTSVC
+ /* If enabled, the header is incoming and this is over HTTPS */
+ else if(data->asi && checkprefix("Alt-Svc:", headp) &&
+ ((conn->handler->flags & PROTOPT_SSL) ||
+#ifdef CURLDEBUG
+ /* allow debug builds to circumvent the HTTPS restriction */
+ getenv("CURL_ALTSVC_HTTP")
+#else
+ 0
+#endif
+ )) {
+ /* the ALPN of the current request */
+ enum alpnid id = (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
+ result = Curl_altsvc_parse(data, data->asi,
+ &headp[ strlen("Alt-Svc:") ],
+ id, conn->host.name,
+ curlx_uitous(conn->remote_port));
+ if(result)
+ return result;
+ }
+#endif
+ else if(conn->handler->protocol & CURLPROTO_RTSP) {
+ result = Curl_rtsp_parseheader(conn, headp);
+ if(result)
+ return result;
+ }
+ return CURLE_OK;
+}
+
+/*
+ * Called after the first HTTP response line (the status line) has been
+ * received and parsed.
+ */
+
+CURLcode Curl_http_statusline(struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ struct SingleRequest *k = &data->req;
+ data->info.httpcode = k->httpcode;
+
+ data->info.httpversion = conn->httpversion;
+ if(!data->state.httpversion ||
+ data->state.httpversion > conn->httpversion)
+ /* store the lowest server version we encounter */
+ data->state.httpversion = conn->httpversion;
+
+ /*
+ * This code executes as part of processing the header. As a
+ * result, it's not totally clear how to interpret the
+ * response code yet as that depends on what other headers may
+ * be present. 401 and 407 may be errors, but may be OK
+ * depending on how authentication is working. Other codes
+ * are definitely errors, so give up here.
+ */
+ if(data->state.resume_from && data->state.httpreq == HTTPREQ_GET &&
+ k->httpcode == 416) {
+ /* "Requested Range Not Satisfiable", just proceed and
+ pretend this is no error */
+ k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
+ }
+ else if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
+ ((k->httpcode != 401) || !conn->bits.user_passwd)
+#ifndef CURL_DISABLE_PROXY
+ && ((k->httpcode != 407) || !conn->bits.proxy_user_passwd)
+#endif
+ ) {
+ /* serious error, go home! */
+ print_http_error(data);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+
+ if(conn->httpversion == 10) {
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ infof(data, "HTTP 1.0, assume close after body\n");
+ connclose(conn, "HTTP/1.0 close after body");
+ }
+ else if(conn->httpversion == 20 ||
+ (k->upgr101 == UPGR101_REQUESTED && k->httpcode == 101)) {
+ DEBUGF(infof(data, "HTTP/2 found, allow multiplexing\n"));
+ /* HTTP/2 cannot avoid multiplexing since it is a core functionality
+ of the protocol */
+ conn->bundle->multiuse = BUNDLE_MULTIPLEX;
+ }
+ else if(conn->httpversion >= 11 &&
+ !conn->bits.close) {
+ /* If HTTP version is >= 1.1 and connection is persistent */
+ DEBUGF(infof(data,
+ "HTTP 1.1 or later with persistent connection\n"));
+ }
+
+ k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
+ switch(k->httpcode) {
+ case 304:
+ /* (quote from RFC2616, section 10.3.5): The 304 response
+ * MUST NOT contain a message-body, and thus is always
+ * terminated by the first empty line after the header
+ * fields. */
+ if(data->set.timecondition)
+ data->info.timecond = TRUE;
+ /* FALLTHROUGH */
+ case 204:
+ /* (quote from RFC2616, section 10.2.5): The server has
+ * fulfilled the request but does not need to return an
+ * entity-body ... The 204 response MUST NOT include a
+ * message-body, and thus is always terminated by the first
+ * empty line after the header fields. */
+ k->size = 0;
+ k->maxdownload = 0;
+ k->http_bodyless = TRUE;
+ break;
+ default:
+ break;
+ }
+ return CURLE_OK;
}
/*
}
if(nc) {
- data->info.httpcode = k->httpcode;
-
- data->info.httpversion = conn->httpversion;
- if(!data->state.httpversion ||
- data->state.httpversion > conn->httpversion)
- /* store the lowest server version we encounter */
- data->state.httpversion = conn->httpversion;
-
- /*
- * This code executes as part of processing the header. As a
- * result, it's not totally clear how to interpret the
- * response code yet as that depends on what other headers may
- * be present. 401 and 407 may be errors, but may be OK
- * depending on how authentication is working. Other codes
- * are definitely errors, so give up here.
- */
- if(data->state.resume_from && data->state.httpreq == HTTPREQ_GET &&
- k->httpcode == 416) {
- /* "Requested Range Not Satisfiable", just proceed and
- pretend this is no error */
- k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
- }
- else if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
- ((k->httpcode != 401) || !conn->bits.user_passwd)
-#ifndef CURL_DISABLE_PROXY
- && ((k->httpcode != 407) || !conn->bits.proxy_user_passwd)
-#endif
- ) {
- /* serious error, go home! */
- print_http_error(data);
- return CURLE_HTTP_RETURNED_ERROR;
- }
-
- if(conn->httpversion == 10) {
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- infof(data, "HTTP 1.0, assume close after body\n");
- connclose(conn, "HTTP/1.0 close after body");
- }
- else if(conn->httpversion == 20 ||
- (k->upgr101 == UPGR101_REQUESTED && k->httpcode == 101)) {
- DEBUGF(infof(data, "HTTP/2 found, allow multiplexing\n"));
- /* HTTP/2 cannot avoid multiplexing since it is a core functionality
- of the protocol */
- conn->bundle->multiuse = BUNDLE_MULTIPLEX;
- }
- else if(conn->httpversion >= 11 &&
- !conn->bits.close) {
- /* If HTTP version is >= 1.1 and connection is persistent */
- DEBUGF(infof(data,
- "HTTP 1.1 or later with persistent connection\n"));
- }
-
- k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
- switch(k->httpcode) {
- case 304:
- /* (quote from RFC2616, section 10.3.5): The 304 response
- * MUST NOT contain a message-body, and thus is always
- * terminated by the first empty line after the header
- * fields. */
- if(data->set.timecondition)
- data->info.timecond = TRUE;
- /* FALLTHROUGH */
- case 204:
- /* (quote from RFC2616, section 10.2.5): The server has
- * fulfilled the request but does not need to return an
- * entity-body ... The 204 response MUST NOT include a
- * message-body, and thus is always terminated by the first
- * empty line after the header fields. */
- k->size = 0;
- k->maxdownload = 0;
- k->http_bodyless = TRUE;
- break;
- default:
- break;
- }
+ result = Curl_http_statusline(data, conn);
+ if(result)
+ return result;
}
else {
k->header = FALSE; /* this is not a header line */
if(result)
return result;
- /* Check for Content-Length: header lines to get size */
- if(!k->http_bodyless &&
- !data->set.ignorecl && checkprefix("Content-Length:", headp)) {
- curl_off_t contentlength;
- CURLofft offt = curlx_strtoofft(headp + 15, NULL, 10, &contentlength);
-
- if(offt == CURL_OFFT_OK) {
- if(data->set.max_filesize &&
- contentlength > data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
- }
- k->size = contentlength;
- k->maxdownload = k->size;
- /* we set the progress download size already at this point
- just to make it easier for apps/callbacks to extract this
- info as soon as possible */
- Curl_pgrsSetDownloadSize(data, k->size);
- }
- else if(offt == CURL_OFFT_FLOW) {
- /* out of range */
- if(data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
- }
- streamclose(conn, "overflow content-length");
- infof(data, "Overflow Content-Length: value!\n");
- }
- else {
- /* negative or just rubbish - bad HTTP */
- failf(data, "Invalid Content-Length: value");
- return CURLE_WEIRD_SERVER_REPLY;
- }
- }
- /* check for Content-Type: header lines to get the MIME-type */
- else if(checkprefix("Content-Type:", headp)) {
- char *contenttype = Curl_copy_header_value(headp);
- if(!contenttype)
- return CURLE_OUT_OF_MEMORY;
- if(!*contenttype)
- /* ignore empty data */
- free(contenttype);
- else {
- Curl_safefree(data->info.contenttype);
- data->info.contenttype = contenttype;
- }
- }
-#ifndef CURL_DISABLE_PROXY
- else if((conn->httpversion == 10) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp, "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((conn->httpversion == 11) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp, "Proxy-Connection:", "close")) {
- /*
- * We get a HTTP/1.1 response from a proxy and it says it'll
- * close down after this transfer.
- */
- connclose(conn, "Proxy-Connection: asked to close after done");
- infof(data, "HTTP/1.1 proxy connection set close!\n");
- }
-#endif
- else if((conn->httpversion == 10) &&
- Curl_compareheader(headp, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- connkeep(conn, "Connection keep-alive");
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if(Curl_compareheader(headp, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
- */
- streamclose(conn, "Connection: close used");
- }
- else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", headp)) {
- /* One or more encodings. We check for chunked and/or a compression
- algorithm. */
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
-
- result = Curl_build_unencoding_stack(conn, headp + 18, TRUE);
- if(result)
- return result;
- }
- else if(!k->http_bodyless && checkprefix("Content-Encoding:", headp) &&
- data->set.str[STRING_ENCODING]) {
- /*
- * Process Content-Encoding. Look for the values: identity,
- * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
- * x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress. However, errors are
- * handled further down when the response body is processed
- */
- result = Curl_build_unencoding_stack(conn, headp + 17, FALSE);
- if(result)
- return result;
- }
- else if(checkprefix("Retry-After:", headp)) {
- /* Retry-After = HTTP-date / delay-seconds */
- curl_off_t retry_after = 0; /* zero for unknown or "now" */
- time_t date = Curl_getdate_capped(&headp[12]);
- if(-1 == date) {
- /* not a date, try it as a decimal number */
- (void)curlx_strtoofft(&headp[12], NULL, 10, &retry_after);
- }
- else
- /* convert date to number of seconds into the future */
- retry_after = date - time(NULL);
- data->info.retry_after = retry_after; /* store it */
- }
- else if(!k->http_bodyless && checkprefix("Content-Range:", headp)) {
- /* Content-Range: bytes [num]-
- Content-Range: bytes: [num]-
- Content-Range: [num]-
- Content-Range: [asterisk]/[total]
-
- The second format was added since Sun's webserver
- JavaWebServer/1.1.1 obviously sends the header this way!
- The third added since some servers use that!
- The forth means the requested range was unsatisfied.
- */
-
- char *ptr = headp + 14;
-
- /* Move forward until first digit or asterisk */
- while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
- ptr++;
-
- /* if it truly stopped on a digit */
- if(ISDIGIT(*ptr)) {
- if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
- if(data->state.resume_from == k->offset)
- /* we asked for a resume and we got it */
- k->content_range = TRUE;
- }
- }
- else
- data->state.resume_from = 0; /* get everything */
- }
-#if !defined(CURL_DISABLE_COOKIES)
- else if(data->cookies && data->state.cookie_engine &&
- checkprefix("Set-Cookie:", headp)) {
- Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
- CURL_LOCK_ACCESS_SINGLE);
- Curl_cookie_add(data,
- data->cookies, TRUE, FALSE, headp + 11,
- /* If there is a custom-set Host: name, use it
- here, or else use real peer host name. */
- data->state.aptr.cookiehost?
- data->state.aptr.cookiehost:conn->host.name,
- data->state.up.path,
- (conn->handler->protocol&CURLPROTO_HTTPS)?
- TRUE:FALSE);
- Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
- }
-#endif
- else if(!k->http_bodyless && checkprefix("Last-Modified:", headp) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- k->timeofdoc = Curl_getdate_capped(headp + strlen("Last-Modified:"));
- if(data->set.get_filetime)
- data->info.filetime = k->timeofdoc;
- }
- else if((checkprefix("WWW-Authenticate:", headp) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", headp) &&
- (407 == k->httpcode))) {
-
- bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
- char *auth = Curl_copy_header_value(headp);
- if(!auth)
- return CURLE_OUT_OF_MEMORY;
-
- result = Curl_http_input_auth(conn, proxy, auth);
-
- free(auth);
-
- if(result)
- return result;
- }
-#ifdef USE_SPNEGO
- else if(checkprefix("Persistent-Auth", headp)) {
- struct negotiatedata *negdata = &conn->negotiate;
- struct auth *authp = &data->state.authhost;
- if(authp->picked == CURLAUTH_NEGOTIATE) {
- char *persistentauth = Curl_copy_header_value(headp);
- if(!persistentauth)
- return CURLE_OUT_OF_MEMORY;
- negdata->noauthpersist = checkprefix("false", persistentauth)?
- TRUE:FALSE;
- negdata->havenoauthpersist = TRUE;
- infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
- negdata->noauthpersist, persistentauth);
- free(persistentauth);
- }
- }
-#endif
- else if((k->httpcode >= 300 && k->httpcode < 400) &&
- checkprefix("Location:", headp) &&
- !data->req.location) {
- /* this is the URL that the server advises us to use instead */
- char *location = Curl_copy_header_value(headp);
- if(!location)
- return CURLE_OUT_OF_MEMORY;
- if(!*location)
- /* ignore empty data */
- free(location);
- else {
- data->req.location = location;
-
- if(data->set.http_follow_location) {
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->req.location); /* clone */
- if(!data->req.newurl)
- return CURLE_OUT_OF_MEMORY;
-
- /* some cases of POST and PUT etc needs to rewind the data
- stream at this point */
- result = http_perhapsrewind(conn);
- if(result)
- return result;
- }
- }
- }
-
-#ifdef USE_HSTS
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
- (conn->handler->flags & PROTOPT_SSL)) {
- CURLcode check =
- Curl_hsts_parse(data->hsts, data->state.up.hostname,
- &headp[ sizeof("Strict-Transport-Security:") -1 ]);
- if(check)
- infof(data, "Illegal STS header skipped\n");
-#ifdef DEBUGBUILD
- else
- infof(data, "Parsed STS header fine (%zu entries)\n",
- data->hsts->list.size);
-#endif
- }
-#endif
-#ifndef CURL_DISABLE_ALTSVC
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->asi && checkprefix("Alt-Svc:", headp) &&
- ((conn->handler->flags & PROTOPT_SSL) ||
-#ifdef CURLDEBUG
- /* allow debug builds to circumvent the HTTPS restriction */
- getenv("CURL_ALTSVC_HTTP")
-#else
- 0
-#endif
- )) {
- /* the ALPN of the current request */
- enum alpnid id = (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
- result = Curl_altsvc_parse(data, data->asi,
- &headp[ strlen("Alt-Svc:") ],
- id, conn->host.name,
- curlx_uitous(conn->remote_port));
- if(result)
- return result;
- }
-#endif
- else if(conn->handler->protocol & CURLPROTO_RTSP) {
- result = Curl_rtsp_parseheader(conn, headp);
- if(result)
- return result;
- }
+ result = Curl_http_header(data, conn, headp);
+ if(result)
+ return result;
/*
* End of header-checks. Write them to the client.