cf->ctx = ts;
}
- /* TODO: can we do blocking? */
/* We want "seamless" operations through HTTP proxy tunnel */
result = H1_CONNECT(cf, data, ts);
ssize_t nwritten;
CURLcode result;
- (void)eos; /* TODO, maybe useful for blocks? */
+ (void)eos;
if(ctx->tunnel.state != H2_TUNNEL_ESTABLISHED) {
*err = CURLE_SEND_ERROR;
return -1;
break;
case CURL_HTTP_VERSION_3:
/* We assume that silently not even trying H3 is ok here */
- /* TODO: should we fail instead? */
if(Curl_conn_may_http3(data, conn) == CURLE_OK)
alpn_ids[alpn_count++] = ALPN_h3;
alpn_ids[alpn_count++] = ALPN_h2;
return CURLE_OK;
}
- /* TODO: need to support blocking connect? */
if(blocking)
return CURLE_UNSUPPORTED_PROTOCOL;
/* A listening socket filter needs to be connected before the accept
* for some weird FTP interaction. This should be rewritten, so that
* FTP no longer does the socket checks and accept calls and delegates
- * all that to the filter. TODO. */
+ * all that to the filter. */
if(ctx->listening) {
Curl_pollset_set_in_only(data, ps, ctx->sock);
CURL_TRC_CF(data, cf, "adjust_pollset, listening, POLLIN fd=%"
/* QUIC needs a connected socket, nonblocking */
DEBUGASSERT(ctx->sock != CURL_SOCKET_BAD);
- rc = connect(ctx->sock, &ctx->addr.curl_sa_addr, /* NOLINT FIXME */
+ rc = connect(ctx->sock, &ctx->addr.curl_sa_addr, /* NOLINT */
(curl_socklen_t)ctx->addr.addrlen);
if(-1 == rc) {
return socket_connect_result(data, ctx->ip.remote_ip, SOCKERRNO);
cf_tcp_accept_connect,
cf_socket_close,
cf_socket_shutdown,
- cf_socket_get_host, /* TODO: not accurate */
+ cf_socket_get_host,
cf_socket_adjust_pollset,
cf_socket_data_pending,
cf_socket_send,
if(!cpool->idata)
return 1; /* bad */
cpool->idata->state.internal = TRUE;
- /* TODO: this is quirky. We need an internal handle for certain
- * operations, but we do not add it to the multi (if there is one).
- * But we give it the multi so that socket event operations can work.
- * Probably better to have an internal handle owned by the multi that
- * can be used for cpool operations. */
+ /* This is quirky. We need an internal handle for certain operations, but we
+ * do not add it to the multi (if there is one). We give it the multi so
+ * that socket event operations can work. Probably better to have an
+ * internal handle owned by the multi that can be used for cpool
+ * operations. */
cpool->idata->multi = multi;
#ifdef DEBUGBUILD
if(getenv("CURL_DEBUG"))
void *param)
{
struct curltime *now = param;
- /* TODO, shall we reap connections that return an error here? */
Curl_conn_upkeep(data, conn, now);
return 0; /* continue iteration */
}
return CURLE_OK;
}
- (void)blocking; /* TODO: do we want to support this? */
+ (void)blocking;
DEBUGASSERT(ctx);
*done = FALSE;
ongoing_auth ? " send, " : "");
/* We decided to abort the ongoing transfer */
streamclose(conn, "Mid-auth HTTP and much data left to send");
- /* FIXME: questionable manipulation here, can we do this differently? */
data->req.size = 0; /* do not download any more than 0 bytes */
}
return CURLE_OK;
}
else if(data->state.resume_from) {
/* This is because "resume" was selected */
- /* TODO: not sure if we want to send this header during authentication
+ /* Not sure if we want to send this header during authentication
* negotiation, but test1084 checks for it. In which case we have a
* "null" client reader installed that gives an unexpected length. */
curl_off_t total_len = data->req.authneg ?
}
#endif
else {
- /* We silently accept this as the final response.
- * TODO: this looks, uhm, wrong. What are we switching to if we
- * did not ask for an Upgrade? Maybe the application provided an
- * `Upgrade: xxx` header? */
+ /* We silently accept this as the final response. What are we
+ * switching to if we did not ask for an Upgrade? Maybe the
+ * application provided an `Upgrade: xxx` header? */
k->header = FALSE;
}
break;
if(!target_len || !hv_len)
goto out;
- /* TODO: we do not check HTTP_VERSION for conformity, should
- + do that when STRICT option is supplied. */
(void)hv;
/* The TARGET can be (rfc 9112, ch. 3.2):
(void) size; /* Always 1. */
- /* TODO: this loop is broken. If `nitems` is <= 4, some encoders will
- * return STOP_FILLING without adding any data and this loops infinitely. */
+ /* If `nitems` is <= 4, some encoders will return STOP_FILLING without
+ * adding any data and this loops infinitely. */
do {
hasread = FALSE;
ret = readback_part(part, buffer, nitems, &hasread);
switch(arg) {
case CURL_HTTP_VERSION_NONE:
#ifdef USE_HTTP2
- /* TODO: this seems an undesirable quirk to force a behaviour on
- * lower implementations that they should recognize independently? */
+ /* This seems an undesirable quirk to force a behaviour on lower
+ * implementations that they should recognize independently? */
arg = CURL_HTTP_VERSION_2TLS;
#endif
/* accepted */
/* returned not-zero, this an error */
if(result) {
keepon = FALSE;
- /* TODO: in test 1452, macOS sees a ECONNRESET sometimes?
- * Is this the telnet test server not shutting down the socket
- * in a clean way? Seems to be timing related, happens more
- * on slow debug build */
+ /* In test 1452, macOS sees a ECONNRESET sometimes? Is this the
+ * telnet test server not shutting down the socket in a clean way?
+ * Seems to be timing related, happens more on slow debug build */
if(data->state.os_errno == ECONNRESET) {
DEBUGF(infof(data, "telnet_do, unexpected ECONNRESET on recv"));
}
#endif
if(unix_path) {
- /* TODO, this only works if previous transport is TRNSPRT_TCP. Check it? */
+ /* This only works if previous transport is TRNSPRT_TCP. Check it? */
conn->transport = TRNSPRT_UNIX;
return resolve_unix(data, conn, unix_path);
}
* We want to reuse an existing conn to the remote endpoint.
* Since connection reuse does not match on conn->host necessarily, we
* switch `existing` conn to `temp` conn's host settings.
- * TODO: is this correct in the case of TLS connections that have
+ * Is this correct in the case of TLS connections that have
* used the original hostname in SNI to negotiate? Do we send
* requests for another host through the different SNI?
*/
if(result)
goto out;
- /* FIXME: do we really want to run this every time we add a transfer? */
Curl_cpool_prune_dead(data);
/*************************************************************
/**
* Priority information for an easy handle in relation to others
* on the same connection.
- * TODO: we need to adapt it to the new priority scheme as defined in RFC 9218
*/
struct Curl_data_priority {
#ifdef USE_NGHTTP2
CURLcode result;
bool rv = FALSE;
- /* TODO: we would like to limit the amount of data we are buffer here.
- * There seems to be no mechanism in msh3 to adjust flow control and
- * it is undocumented what happens if we return FALSE here or less
- * length (buflen is an inout parameter).
+ /* We would like to limit the amount of data we are buffer here. There seems
+ * to be no mechanism in msh3 to adjust flow control and it is undocumented
+ * what happens if we return FALSE here or less length (buflen is an inout
+ * parameter).
*/
(void)Request;
if(!stream)
goto out;
}
- /* TODO - msh3/msquic will hold onto this memory until the send complete
- event. How do we make sure curl does not free it until then? */
+ /* msh3/msquic will hold onto this memory until the send complete event.
+ How do we make sure curl does not free it until then? */
*err = CURLE_OK;
nwritten = len;
}
MSH3_SET_PORT(&addr, (uint16_t)cf->conn->remote_port);
if(verify && (conn_config->CAfile || conn_config->CApath)) {
- /* TODO: need a way to provide trust anchors to MSH3 */
+ /* Need a way to provide trust anchors to MSH3 */
#ifdef DEBUGBUILD
/* we need this for our test cases to run */
CURL_TRC_CF(data, cf, "non-standard CA not supported, "
switch(query) {
case CF_QUERY_MAX_CONCURRENT: {
- /* TODO: we do not have access to this so far, fake it */
+ /* We do not have access to this so far, fake it */
(void)ctx;
*pres1 = 100;
return CURLE_OK;
(void)data;
(void)conn;
- (void)ai; /* TODO: msh3 resolves itself? */
+ (void)ai; /* msh3 resolves itself? */
ctx = calloc(1, sizeof(*ctx));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
struct Curl_cfilter *cf = user_data;
struct cf_ngtcp2_ctx *ctx = cf->ctx;
- (void)ctx; /* TODO: need an easy handle to infof() message */
+ (void)ctx; /* need an easy handle to infof() message */
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
if(ctx->tls_vrfy_result)
return ctx->tls_vrfy_result;
- (void)eos; /* TODO: use for stream EOF and block handling */
+ (void)eos; /* use for stream EOF and block handling */
result = cf_progress_ingress(cf, data, &pktx);
if(result) {
*err = result;
struct Curl_easy *data,
bool pause)
{
- /* TODO: there seems right now no API in ngtcp2 to shrink/enlarge
- * the streams windows. As we do in HTTP/2. */
+ /* There seems to exist no API in ngtcp2 to shrink/enlarge the streams
+ * windows. As we do in HTTP/2. */
if(!pause) {
h3_drain_stream(cf, data);
Curl_expire(data, 0, EXPIRE_RUN_NOW);
ssize_t nwritten;
CURLcode result;
- (void)eos; /* TODO: use to end stream */
+ (void)eos; /* use to end stream */
CF_DATA_SAVE(save, cf, data);
DEBUGASSERT(cf->connected);
DEBUGASSERT(ctx->tls.ossl.ssl);
nwritten = quiche_h3_send_body(ctx->h3c, ctx->qconn, stream->id,
(uint8_t *)buf, len, eos);
if(nwritten == QUICHE_H3_ERR_DONE || (nwritten == 0 && len > 0)) {
- /* TODO: we seem to be blocked on flow control and should HOLD
- * sending. But when do we open again? */
+ /* Blocked on flow control and should HOLD sending. But when do we open
+ * again? */
if(!quiche_conn_stream_writable(ctx->qconn, stream->id, len)) {
CURL_TRC_CF(data, cf, "[%" FMT_PRIu64 "] send_body(len=%zu) "
"-> window exhausted", stream->id, len);
struct Curl_easy *data,
bool pause)
{
- /* TODO: there seems right now no API in quiche to shrink/enlarge
- * the streams windows. As we do in HTTP/2. */
+ /* There seems to exist no API in quiche to shrink/enlarge the streams
+ * windows. As we do in HTTP/2. */
if(!pause) {
h3_drain_stream(cf, data);
Curl_expire(data, 0, EXPIRE_RUN_NOW);
unsigned char *sha256sum,
size_t sha256len UNUSED_PARAM)
{
- /* TODO: explain this for different mbedtls 2.x vs 3 version */
(void)sha256len;
#if MBEDTLS_VERSION_NUMBER < 0x02070000
mbedtls_sha256(input, inputlen, sha256sum, 0);
sizeof(error_buffer)));
/* Do not attempt to load it again */
data->state.provider_failed = TRUE;
- /* FIXME not the right error but much less fuss than creating a new
- * public one */
return CURLE_SSL_ENGINE_NOTFOUND;
}
data->state.provider = TRUE;
servername_type = SSL_get_servername_type(ssl);
inner = SSL_get_servername(ssl, servername_type);
SSL_get0_ech_name_override(ssl, &outer, &out_name_len);
- /* TODO: get the inner from BoringSSL */
infof(data, "ECH: retry_configs for %s from %s, %d %d",
inner ? inner : "NULL", outer ? outer : "NULL", reason, rv);
#endif
Curl_pSecFn->FreeContextBuffer(outbuf.pvBuffer);
if(!result) {
if(written < (ssize_t)outbuf.cbBuffer) {
- /* TODO: handle partial sends */
failf(data, "schannel: failed to send close msg: %s"
" (bytes written: %zd)", curl_easy_strerror(result), written);
result = CURLE_SEND_ERROR;
else {
*palpn = CURL_HTTP_VERSION_NONE;
failf(data, "unsupported ALPN protocol: '%.*s'", (int)proto_len, proto);
- /* TODO: do we want to fail this? Previous code just ignored it and
- * some vtls backends even ignore the return code of this function. */
+ /* Previous code just ignored it and some vtls backends even ignore the
+ * return code of this function. */
/* return CURLE_NOT_BUILT_IN; */
goto out;
}
* or close/re-open the file so that the next attempt starts
* over from the beginning.
*
- * TODO: similar action for the upload case. We might need
- * to start over reading from a previous point if we have
- * uploaded something when this was returned.
+ * For the upload case, we might need to start over reading from a
+ * previous point if we have uploaded something when this was
+ * returned.
*/
break;
}
* the headers, we need to open it in append mode, since transfers
* might finish in any order.
* The first transfer just clears the file.
- * TODO: Consider placing the file handle inside the
- * OperationConfig, so that it does not need to be opened/closed
- * for every transfer.
+ *
+ * Consider placing the file handle inside the OperationConfig, so
+ * that it does not need to be opened/closed for every transfer.
*/
if(config->create_dirs) {
result = create_dir_hierarchy(config->headerfile, global);
* Read commands from FILE (set with --config). The commands control how to
* act and is reset to defaults each client TCP connect.
*
- * Config file keywords:
- *
- * TODO
*/
/* based on sockfilt.c */
if(!packet)
return 1;
- packet[0] = MQTT_MSG_PUBLISH; /* TODO: set QoS? */
+ packet[0] = MQTT_MSG_PUBLISH;
memcpy(&packet[1], rembuffer, encodedlen);
(void)packetid;
topiclen = (size_t)(buffer[1 + bytes] << 8) | buffer[2 + bytes];
logmsg("Got %zu bytes topic", topiclen);
- /* TODO: verify topiclen */
#ifdef QOS
- /* TODO: handle packetid if there is one. Send puback if QoS > 0 */
+ /* Handle packetid if there is one. Send puback if QoS > 0 */
puback(dump, fd, 0);
#endif
/* expect a disconnect here */