Used to influence the buffer chunk size used for WebSocket encoding and
decoding.
+
+## CURL_FORBID_REUSE
+
+Used to set the CURLOPT_FORBID_REUSE flag on each transfer initiated
+by the curl command line tool. The value of the environment variable
+does not matter.
+
+## CURL_GRACEFUL_SHUTDOWN
+
+Make a blocking, graceful shutdown of all remaining connections when
+a multi handle is destroyed. This implicitly triggers for easy handles
+that are run via easy_perform. The value of the environment variable
+gives the shutdown timeout in milliseconds.
\ No newline at end of file
BIT(conn_closed);
BIT(rcvd_goaway);
BIT(sent_goaway);
- BIT(shutdown);
BIT(nw_out_blocked);
};
struct Curl_easy *data, bool *done)
{
struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
CURLcode result;
int rv;
- if(!cf->connected || !ctx->h2 || ctx->shutdown) {
+ if(!cf->connected || !ctx->h2 || cf->shutdown || ctx->conn_closed) {
*done = TRUE;
return CURLE_OK;
}
+ CF_DATA_SAVE(save, cf, data);
+
if(!ctx->sent_goaway) {
rv = nghttp2_submit_goaway(ctx->h2, NGHTTP2_FLAG_NONE,
0, 0,
if(rv) {
failf(data, "nghttp2_submit_goaway() failed: %s(%d)",
nghttp2_strerror(rv), rv);
- return CURLE_SEND_ERROR;
+ result = CURLE_SEND_ERROR;
+ goto out;
}
ctx->sent_goaway = TRUE;
}
if(!result && nghttp2_session_want_read(ctx->h2))
result = proxy_h2_progress_ingress(cf, data);
- *done = !result && !nghttp2_session_want_write(ctx->h2) &&
- !nghttp2_session_want_read(ctx->h2);
- ctx->shutdown = (result || *done);
+ *done = (ctx->conn_closed ||
+ (!result && !nghttp2_session_want_write(ctx->h2) &&
+ !nghttp2_session_want_read(ctx->h2)));
+out:
+ CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
return result;
}
Curl_pollset_set(data, ps, sock, want_recv, want_send);
CF_DATA_RESTORE(cf, save);
}
- else if(ctx->sent_goaway && !ctx->shutdown) {
+ else if(ctx->sent_goaway && !cf->shutdown) {
/* shutdown in progress */
CF_DATA_SAVE(save, cf, data);
want_send = nghttp2_session_want_write(ctx->h2);
struct curltime now;
DEBUGASSERT(data->conn);
- /* it is valid to call that without filters being present */
+ /* Get the first connected filter that is not shut down already. */
cf = data->conn->cfilter[sockindex];
+ while(cf && (!cf->connected || cf->shutdown))
+ cf = cf->next;
+
if(!cf) {
*done = TRUE;
return CURLE_OK;
}
while(cf) {
- bool cfdone = FALSE;
- result = cf->cft->do_shutdown(cf, data, &cfdone);
- if(result) {
- CURL_TRC_CF(data, cf, "shut down failed with %d", result);
- return result;
- }
- else if(!cfdone) {
- CURL_TRC_CF(data, cf, "shut down not done yet");
- return CURLE_OK;
+ if(!cf->shutdown) {
+ bool cfdone = FALSE;
+ result = cf->cft->do_shutdown(cf, data, &cfdone);
+ if(result) {
+ CURL_TRC_CF(data, cf, "shut down failed with %d", result);
+ return result;
+ }
+ else if(!cfdone) {
+ CURL_TRC_CF(data, cf, "shut down not done yet");
+ return CURLE_OK;
+ }
+ CURL_TRC_CF(data, cf, "shut down successfully");
+ cf->shutdown = TRUE;
}
- CURL_TRC_CF(data, cf, "shut down successfully");
cf = cf->next;
}
*done = (!result);
/* Get the lowest not-connected filter, if there are any */
while(cf && !cf->connected && cf->next && !cf->next->connected)
cf = cf->next;
+ /* Skip all filters that have already shut down */
+ while(cf && cf->shutdown)
+ cf = cf->next;
/* From there on, give all filters a chance to adjust the pollset.
* Lower filters are called later, so they may override */
while(cf) {
struct connectdata *conn; /* the connection this filter belongs to */
int sockindex; /* the index the filter is installed at */
BIT(connected); /* != 0 iff this filter is connected */
+ BIT(shutdown); /* != 0 iff this filter has shut down */
};
/* Default implementations for the type functions, implementing nop. */
#include "urldata.h"
#include "url.h"
+#include "cfilters.h"
#include "progress.h"
#include "multiif.h"
#include "sendf.h"
#include "conncache.h"
+#include "http_negotiate.h"
+#include "http_ntlm.h"
#include "share.h"
#include "sigpipe.h"
#include "connect.h"
+#include "select.h"
#include "strcase.h"
/* The last 3 #include files should be in this order */
#define HASHKEY_SIZE 128
+static void connc_discard_conn(struct conncache *connc,
+ struct Curl_easy *last_data,
+ struct connectdata *conn,
+ bool aborted);
+static void connc_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct conncache *connc,
+ bool do_shutdown);
+static void connc_run_conn_shutdown(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done);
+static void connc_run_conn_shutdown_handler(struct Curl_easy *data,
+ struct connectdata *conn);
+static CURLcode connc_update_shutdown_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct connectdata *conn);
+static void connc_shutdown_all(struct conncache *connc, int timeout_ms);
+
static CURLcode bundle_create(struct connectbundle **bundlep)
{
DEBUGASSERT(*bundlep == NULL);
bundle_destroy(b);
}
-int Curl_conncache_init(struct conncache *connc, size_t size)
+int Curl_conncache_init(struct conncache *connc,
+ struct Curl_multi *multi, size_t size)
{
/* allocate a new easy handle to use when closing cached connections */
connc->closure_handle = curl_easy_init();
if(!connc->closure_handle)
return 1; /* bad */
connc->closure_handle->state.internal = true;
+ #ifdef DEBUGBUILD
+ if(getenv("CURL_DEBUG"))
+ connc->closure_handle->set.verbose = true;
+#endif
Curl_hash_init(&connc->hash, size, Curl_hash_str,
Curl_str_key_compare, free_bundle_hash_entry);
connc->closure_handle->state.conn_cache = connc;
+ connc->multi = multi;
+ Curl_llist_init(&connc->shutdowns.conn_list, NULL);
return 0; /* good */
}
void Curl_conncache_destroy(struct conncache *connc)
{
- if(connc)
+ if(connc) {
Curl_hash_destroy(&connc->hash);
+ connc->multi = NULL;
+ DEBUGASSERT(!Curl_llist_count(&connc->shutdowns.conn_list));
+ }
}
/* creates a key to find a bundle for this connection */
return bundle;
}
-static void *conncache_add_bundle(struct conncache *connc,
- char *key,
- struct connectbundle *bundle)
+static void *connc_add_bundle(struct conncache *connc,
+ char *key, struct connectbundle *bundle)
{
return Curl_hash_add(&connc->hash, key, strlen(key), bundle);
}
-static void conncache_remove_bundle(struct conncache *connc,
- struct connectbundle *bundle)
+static void connc_remove_bundle(struct conncache *connc,
+ struct connectbundle *bundle)
{
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
hashkey(conn, key, sizeof(key));
- if(!conncache_add_bundle(data->state.conn_cache, key, bundle)) {
+ if(!connc_add_bundle(data->state.conn_cache, key, bundle)) {
bundle_destroy(bundle);
result = CURLE_OUT_OF_MEMORY;
goto unlock;
return result;
}
+static void connc_remove_conn(struct conncache *connc,
+ struct connectdata *conn)
+{
+ struct connectbundle *bundle = conn->bundle;
+
+ /* The bundle pointer can be NULL, since this function can be called
+ due to a failed connection attempt, before being added to a bundle */
+ if(bundle) {
+ bundle_remove_conn(bundle, conn);
+ if(connc && bundle->num_connections == 0)
+ connc_remove_bundle(connc, bundle);
+ conn->bundle = NULL; /* removed from it */
+ if(connc)
+ connc->num_conn--;
+ }
+}
+
/*
* Removes the connectdata object from the connection cache, but the transfer
* still owns this connection.
void Curl_conncache_remove_conn(struct Curl_easy *data,
struct connectdata *conn, bool lock)
{
- struct connectbundle *bundle = conn->bundle;
struct conncache *connc = data->state.conn_cache;
- /* The bundle pointer can be NULL, since this function can be called
- due to a failed connection attempt, before being added to a bundle */
- if(bundle) {
- if(lock) {
- CONNCACHE_LOCK(data);
- }
- bundle_remove_conn(bundle, conn);
- if(bundle->num_connections == 0)
- conncache_remove_bundle(connc, bundle);
- conn->bundle = NULL; /* removed from it */
- if(connc) {
- connc->num_conn--;
- DEBUGF(infof(data, "The cache now contains %zu members",
- connc->num_conn));
- }
- if(lock) {
- CONNCACHE_UNLOCK(data);
- }
- }
+ if(lock)
+ CONNCACHE_LOCK(data);
+ connc_remove_conn(connc, conn);
+ if(lock)
+ CONNCACHE_UNLOCK(data);
+ if(connc)
+ DEBUGF(infof(data, "The cache now contains %zu members",
+ connc->num_conn));
}
/* This function iterates the entire connection cache and calls the function
up a cache!
*/
static struct connectdata *
-conncache_find_first_connection(struct conncache *connc)
+connc_find_first_connection(struct conncache *connc)
{
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
important that details from this (unrelated) disconnect does not
taint meta-data in the data handle. */
struct conncache *connc = data->state.conn_cache;
- Curl_disconnect(connc->closure_handle, conn_candidate,
- /* dead_connection */ FALSE);
+ connc_disconnect(NULL, conn_candidate, connc, TRUE);
}
}
return conn_candidate;
}
-void Curl_conncache_close_all_connections(struct conncache *connc)
+static void connc_shutdown_discard_all(struct conncache *connc)
+{
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct connectdata *conn;
+
+ if(!e)
+ return;
+
+ DEBUGF(infof(connc->closure_handle, "conncache_shutdown_discard_all"));
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ conn = e->ptr;
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ DEBUGF(infof(connc->closure_handle, "discard connection #%"
+ CURL_FORMAT_CURL_OFF_T, conn->connection_id));
+ connc_disconnect(NULL, conn, connc, FALSE);
+ e = connc->shutdowns.conn_list.head;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+static void connc_close_all(struct conncache *connc)
{
+ struct Curl_easy *data = connc->closure_handle;
struct connectdata *conn;
+ int timeout_ms = 0;
SIGPIPE_VARIABLE(pipe_st);
- if(!connc->closure_handle)
+
+ if(!data)
return;
- conn = conncache_find_first_connection(connc);
+ /* Move all connections to the shutdown list */
+ conn = connc_find_first_connection(connc);
while(conn) {
- sigpipe_ignore(connc->closure_handle, &pipe_st);
+ connc_remove_conn(connc, conn);
+ sigpipe_ignore(data, &pipe_st);
/* This will remove the connection from the cache */
connclose(conn, "kill all");
Curl_conncache_remove_conn(connc->closure_handle, conn, TRUE);
- Curl_disconnect(connc->closure_handle, conn, FALSE);
+ connc_discard_conn(connc, connc->closure_handle, conn, FALSE);
sigpipe_restore(&pipe_st);
- conn = conncache_find_first_connection(connc);
+ conn = connc_find_first_connection(connc);
}
- sigpipe_ignore(connc->closure_handle, &pipe_st);
+ /* Just for testing, run graceful shutdown */
+#ifdef DEBUGBUILD
+ {
+ char *p = getenv("CURL_GRACEFUL_SHUTDOWN");
+ if(p) {
+ long l = strtol(p, NULL, 10);
+ if(l > 0 && l < INT_MAX)
+ timeout_ms = (int)l;
+ }
+ }
+#endif
+ connc_shutdown_all(connc, timeout_ms);
+
+ /* discard all connections in the shutdown list */
+ connc_shutdown_discard_all(connc);
- Curl_hostcache_clean(connc->closure_handle,
- connc->closure_handle->dns.hostcache);
- Curl_close(&connc->closure_handle);
+ sigpipe_ignore(data, &pipe_st);
+ Curl_hostcache_clean(data, data->dns.hostcache);
+ Curl_close(&data);
sigpipe_restore(&pipe_st);
}
+void Curl_conncache_close_all_connections(struct conncache *connc)
+{
+ connc_close_all(connc);
+}
+
+static void connc_shutdown_discard_oldest(struct conncache *connc)
+{
+ struct Curl_llist_element *e;
+ struct connectdata *conn;
+ SIGPIPE_VARIABLE(pipe_st);
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ if(connc->shutdowns.iter_locked)
+ return;
+
+ e = connc->shutdowns.conn_list.head;
+ if(e) {
+ conn = e->ptr;
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ sigpipe_ignore(connc->closure_handle, &pipe_st);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ sigpipe_restore(&pipe_st);
+ }
+}
+
+static void connc_discard_conn(struct conncache *connc,
+ struct Curl_easy *last_data,
+ struct connectdata *conn,
+ bool aborted)
+{
+ /* `last_data`, if present, is the transfer that last worked with
+ * the connection. It is present when the connection is being shut down
+ * via `Curl_conncache_discard_conn()`, e.g. when the transfer failed
+ * or does not allow connection reuse.
+ * Using the original handle is necessary for shutting down the protocol
+ * handler belonging to the connection. Protocols like 'file:' rely on
+ * being invoked to clean up their allocations in the easy handle.
+ * When a connection comes from the cache, the transfer is no longer
+ * there and we use the cache's own closure handle.
+ */
+ struct Curl_easy *data = last_data? last_data : connc->closure_handle;
+ bool done = FALSE;
+
+ DEBUGASSERT(connc);
+ DEBUGASSERT(!conn->bundle);
+
+ /*
+ * If this connection isn't marked to force-close, leave it open if there
+ * are other users of it
+ */
+ if(CONN_INUSE(conn) && !aborted) {
+ DEBUGF(infof(data, "[CCACHE] not discarding #%" CURL_FORMAT_CURL_OFF_T
+ " still in use by %zu transfers", conn->connection_id,
+ CONN_INUSE(conn)));
+ return;
+ }
+
+ /* treat the connection as aborted in CONNECT_ONLY situations, we do
+ * not know what the APP did with it. */
+ if(conn->connect_only)
+ aborted = TRUE;
+ conn->bits.aborted = aborted;
+
+ /* We do not shutdown dead connections. The term 'dead' can be misleading
+ * here, as we also mark errored connections/transfers as 'dead'.
+ * If we do a shutdown for an aborted transfer, the server might think
+ * it was successful otherwise (for example an ftps: upload). This is
+ * not what we want. */
+ if(aborted)
+ done = TRUE;
+ else if(!done) {
+ /* Attempt to shutdown the connection right away. */
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d",conn->connection_id, done));
+ Curl_detach_connection(data);
+ }
+
+ if(done) {
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ if(connc->shutdowns.iter_locked) {
+ DEBUGF(infof(data, "[CCACHE] discarding #%" CURL_FORMAT_CURL_OFF_T
+ ", list locked", conn->connection_id));
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+
+ /* Add the connection to our shutdown list for non-blocking shutdown
+ * during multi processing. */
+ if(data->multi && data->multi->max_shutdown_connections > 0 &&
+ (data->multi->max_shutdown_connections >=
+ (long)Curl_llist_count(&connc->shutdowns.conn_list))) {
+ DEBUGF(infof(data, "[CCACHE] discarding oldest shutdown connection "
+ "due to limit of %ld",
+ data->multi->max_shutdown_connections));
+ connc_shutdown_discard_oldest(connc);
+ }
+
+ if(data->multi && data->multi->socket_cb) {
+ DEBUGASSERT(connc == &data->multi->conn_cache);
+ if(connc_update_shutdown_ev(data->multi, data, conn)) {
+ DEBUGF(infof(data, "[CCACHE] update events for shutdown failed, "
+ "discarding #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+ }
+
+ Curl_llist_append(&connc->shutdowns.conn_list, conn, &conn->bundle_node);
+ DEBUGF(infof(data, "[CCACHE] added #%" CURL_FORMAT_CURL_OFF_T
+ " to shutdown list of length %zu", conn->connection_id,
+ Curl_llist_count(&connc->shutdowns.conn_list)));
+
+ /* Forget what this transfer last polled, the connection is ours now.
+ * If we do not clear this, the event handling for `data` will tell
+ * the callback to remove the connection socket after we return here. */
+ memset(&data->last_poll, 0, sizeof(data->last_poll));
+}
+
+void Curl_conncache_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool aborted)
+{
+ DEBUGASSERT(data);
+ /* Connection must no longer be in and connection cache */
+ DEBUGASSERT(!conn->bundle);
+
+ if(data->multi) {
+ /* Add it to the multi's conncache for shutdown handling */
+ infof(data, "%s connection #%" CURL_FORMAT_CURL_OFF_T,
+ aborted? "closing" : "shutting down", conn->connection_id);
+ connc_discard_conn(&data->multi->conn_cache, data, conn, aborted);
+ }
+ else {
+ /* No multi available. Make a best-effort shutdown + close */
+ infof(data, "closing connection #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id);
+ DEBUGASSERT(!conn->bundle);
+ connc_run_conn_shutdown_handler(data, conn);
+ connc_disconnect(data, conn, NULL, !aborted);
+ }
+}
+
+static void connc_run_conn_shutdown_handler(struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ if(!conn->bits.shutdown_handler) {
+ if(conn->dns_entry) {
+ Curl_resolv_unlock(data, conn->dns_entry);
+ conn->dns_entry = NULL;
+ }
+
+ /* Cleanup NTLM connection-related data */
+ Curl_http_auth_cleanup_ntlm(conn);
+
+ /* Cleanup NEGOTIATE connection-related data */
+ Curl_http_auth_cleanup_negotiate(conn);
+
+ if(conn->handler && conn->handler->disconnect) {
+ /* This is set if protocol-specific cleanups should be made */
+ DEBUGF(infof(data, "connection #%" CURL_FORMAT_CURL_OFF_T
+ ", shutdown protocol handler (aborted=%d)",
+ conn->connection_id, conn->bits.aborted));
+ conn->handler->disconnect(data, conn, conn->bits.aborted);
+ }
+
+ /* possible left-overs from the async name resolvers */
+ Curl_resolver_cancel(data);
+
+ conn->bits.shutdown_handler = TRUE;
+ }
+}
+
+static void connc_run_conn_shutdown(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done)
+{
+ CURLcode r1, r2;
+ bool done1, done2;
+
+ /* We expect to be attached when called */
+ DEBUGASSERT(data->conn == conn);
+
+ connc_run_conn_shutdown_handler(data, conn);
+
+ if(conn->bits.shutdown_filters) {
+ *done = TRUE;
+ return;
+ }
+
+ if(!conn->connect_only && Curl_conn_is_connected(conn, FIRSTSOCKET))
+ r1 = Curl_conn_shutdown(data, FIRSTSOCKET, &done1);
+ else {
+ r1 = CURLE_OK;
+ done1 = TRUE;
+ }
+
+ if(!conn->connect_only && Curl_conn_is_connected(conn, SECONDARYSOCKET))
+ r2 = Curl_conn_shutdown(data, SECONDARYSOCKET, &done2);
+ else {
+ r2 = CURLE_OK;
+ done2 = TRUE;
+ }
+
+ /* we are done when any failed or both report success */
+ *done = (r1 || r2 || (done1 && done2));
+ if(*done)
+ conn->bits.shutdown_filters = TRUE;
+}
+
+CURLcode Curl_conncache_add_pollfds(struct conncache *connc,
+ struct curl_pollfds *cpfds)
+{
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ if(connc->shutdowns.conn_list.head) {
+ struct Curl_llist_element *e;
+ struct easy_pollset ps;
+ struct connectdata *conn;
+
+ for(e = connc->shutdowns.conn_list.head; e; e = e->next) {
+ conn = e->ptr;
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(connc->closure_handle, conn);
+ Curl_conn_adjust_pollset(connc->closure_handle, &ps);
+ Curl_detach_connection(connc->closure_handle);
+
+ result = Curl_pollfds_add_ps(cpfds, &ps);
+ if(result) {
+ Curl_pollfds_cleanup(cpfds);
+ goto out;
+ }
+ }
+ }
+out:
+ connc->shutdowns.iter_locked = FALSE;
+ return result;
+}
+
+CURLcode Curl_conncache_add_waitfds(struct conncache *connc,
+ struct curl_waitfds *cwfds)
+{
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ if(connc->shutdowns.conn_list.head) {
+ struct Curl_llist_element *e;
+ struct easy_pollset ps;
+ struct connectdata *conn;
+
+ for(e = connc->shutdowns.conn_list.head; e; e = e->next) {
+ conn = e->ptr;
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(connc->closure_handle, conn);
+ Curl_conn_adjust_pollset(connc->closure_handle, &ps);
+ Curl_detach_connection(connc->closure_handle);
+
+ result = Curl_waitfds_add_ps(cwfds, &ps);
+ if(result)
+ goto out;
+ }
+ }
+out:
+ connc->shutdowns.iter_locked = FALSE;
+ return result;
+}
+
+static void connc_perform(struct conncache *connc)
+{
+ struct Curl_easy *data = connc->closure_handle;
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct Curl_llist_element *enext;
+ struct connectdata *conn;
+ bool done;
+
+ if(!e)
+ return;
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ DEBUGF(infof(data, "[CCACHE] perform, %zu connections being shutdown",
+ Curl_llist_count(&connc->shutdowns.conn_list)));
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ enext = e->next;
+ conn = e->ptr;
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d", conn->connection_id, done));
+ Curl_detach_connection(data);
+ if(done) {
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ }
+ e = enext;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+void Curl_conncache_multi_perform(struct Curl_multi *multi)
+{
+ connc_perform(&multi->conn_cache);
+}
+
+
+/*
+ * Disconnects the given connection. Note the connection may not be the
+ * primary connection, like when freeing room in the connection cache or
+ * killing of a dead old connection.
+ *
+ * A connection needs an easy handle when closing down. We support this passed
+ * in separately since the connection to get closed here is often already
+ * disassociated from an easy handle.
+ *
+ * This function MUST NOT reset state in the Curl_easy struct if that
+ * isn't strictly bound to the life-time of *this* particular connection.
+ *
+ */
+static void connc_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct conncache *connc,
+ bool do_shutdown)
+{
+ bool done;
+
+ /* there must be a connection to close */
+ DEBUGASSERT(conn);
+ /* it must be removed from the connection cache */
+ DEBUGASSERT(!conn->bundle);
+ /* there must be an associated transfer */
+ DEBUGASSERT(data || connc);
+ if(!data)
+ data = connc->closure_handle;
+
+ /* the transfer must be detached from the connection */
+ DEBUGASSERT(data && !data->conn);
+
+ if(connc && connc->multi && connc->multi->socket_cb) {
+ unsigned int i;
+ for(i = 0; i < 2; ++i) {
+ if(CURL_SOCKET_BAD == conn->sock[i])
+ continue;
+ /* remove all connection's sockets from event handling */
+ connc->multi->in_callback = TRUE;
+ connc->multi->socket_cb(data, conn->sock[i], CURL_POLL_REMOVE,
+ connc->multi->socket_userp, NULL);
+ connc->multi->in_callback = FALSE;
+ }
+ }
+
+ Curl_attach_connection(data, conn);
+
+ connc_run_conn_shutdown_handler(data, conn);
+ if(do_shutdown) {
+ /* Make a last attempt to shutdown handlers and filters, if
+ * not done so already. */
+ connc_run_conn_shutdown(data, conn, &done);
+ }
+
+ if(connc)
+ DEBUGF(infof(data, "[CCACHE] closing #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ else
+ DEBUGF(infof(data, "closing connection #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ Curl_conn_close(data, SECONDARYSOCKET);
+ Curl_conn_close(data, FIRSTSOCKET);
+ Curl_detach_connection(data);
+
+ Curl_conn_free(data, conn);
+}
+
+
+static CURLcode connc_update_shutdown_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ struct easy_pollset ps;
+ unsigned int i;
+ int rc;
+
+ DEBUGASSERT(data);
+ DEBUGASSERT(multi);
+ DEBUGASSERT(multi->socket_cb);
+
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(data, conn);
+ Curl_conn_adjust_pollset(data, &ps);
+ Curl_detach_connection(data);
+
+ if(!ps.num)
+ return CURLE_FAILED_INIT;
+
+ for(i = 0; i < ps.num; ++i) {
+ DEBUGF(infof(data, "[CCACHE] set socket=%" CURL_FORMAT_SOCKET_T
+ " events=%d on #%" CURL_FORMAT_CURL_OFF_T,
+ ps.sockets[i], ps.actions[i], conn->connection_id));
+ multi->in_callback = TRUE;
+ rc = multi->socket_cb(data, ps.sockets[i], ps.actions[i],
+ multi->socket_userp, NULL);
+ multi->in_callback = FALSE;
+ if(rc == -1)
+ return CURLE_FAILED_INIT;
+ }
+
+ return CURLE_OK;
+}
+
+void Curl_conncache_multi_socket(struct Curl_multi *multi,
+ curl_socket_t s, int ev_bitmask)
+{
+ struct conncache *connc = &multi->conn_cache;
+ struct Curl_easy *data = connc->closure_handle;
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct connectdata *conn;
+ bool done;
+
+ (void)ev_bitmask;
+ DEBUGASSERT(multi->socket_cb);
+ if(!e)
+ return;
+
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ conn = e->ptr;
+ if(s == conn->sock[FIRSTSOCKET] || s == conn->sock[SECONDARYSOCKET]) {
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d", conn->connection_id, done));
+ Curl_detach_connection(data);
+ if(done || connc_update_shutdown_ev(multi, data, conn)) {
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ }
+ break;
+ }
+ e = e->next;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+void Curl_conncache_multi_close_all(struct Curl_multi *multi)
+{
+ connc_close_all(&multi->conn_cache);
+}
+
+
+#define NUM_POLLS_ON_STACK 10
+
+static CURLcode connc_shutdown_wait(struct conncache *connc, int timeout_ms)
+{
+ struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK];
+ struct curl_pollfds cpfds;
+ CURLcode result;
+
+ Curl_pollfds_init(&cpfds, a_few_on_stack, NUM_POLLS_ON_STACK);
+
+ result = Curl_conncache_add_pollfds(connc, &cpfds);
+ if(result)
+ goto out;
+
+ Curl_poll(cpfds.pfds, cpfds.n, CURLMIN(timeout_ms, 1000));
+
+out:
+ Curl_pollfds_cleanup(&cpfds);
+ return result;
+}
+
+static void connc_shutdown_all(struct conncache *connc, int timeout_ms)
+{
+ struct Curl_easy *data = connc->closure_handle;
+ struct connectdata *conn;
+ struct curltime started = Curl_now();
+
+ if(!data)
+ return;
+ (void)data;
+
+ DEBUGF(infof(data, "conncache shutdown all"));
+
+ /* Move all connections into the shutdown queue */
+ conn = connc_find_first_connection(connc);
+ while(conn) {
+ /* This will remove the connection from the cache */
+ DEBUGF(infof(data, "moving connection %" CURL_FORMAT_CURL_OFF_T
+ " to shutdown queue", conn->connection_id));
+ connc_remove_conn(connc, conn);
+ connc_discard_conn(connc, NULL, conn, FALSE);
+ conn = connc_find_first_connection(connc);
+ }
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ while(connc->shutdowns.conn_list.head) {
+ timediff_t timespent;
+ int remain_ms;
+
+ connc_perform(connc);
+
+ if(!connc->shutdowns.conn_list.head) {
+ DEBUGF(infof(data, "conncache shutdown ok"));
+ break;
+ }
+
+ /* wait for activity, timeout or "nothing" */
+ timespent = Curl_timediff(Curl_now(), started);
+ if(timespent >= (timediff_t)timeout_ms) {
+ DEBUGF(infof(data, "conncache shutdown %s",
+ (timeout_ms > 0)? "timeout" : "best effort done"));
+ break;
+ }
+
+ remain_ms = timeout_ms - (int)timespent;
+ if(connc_shutdown_wait(connc, remain_ms)) {
+ DEBUGF(infof(data, "conncache shutdown all, abort"));
+ break;
+ }
+ }
+
+ /* Due to errors/timeout, we might come here without being full ydone. */
+ connc_shutdown_discard_all(connc);
+}
+
#if 0
/* Useful for debugging the connection cache */
void Curl_conncache_print(struct conncache *connc)
#include "timeval.h"
struct connectdata;
+struct curl_pollfds;
+struct curl_waitfds;
+struct Curl_multi;
+
+struct connshutdowns {
+ struct Curl_llist conn_list; /* The connectdata to shut down */
+ BIT(iter_locked); /* TRUE while iterating the list */
+};
struct conncache {
struct Curl_hash hash;
curl_off_t next_connection_id;
curl_off_t next_easy_id;
struct curltime last_cleanup;
+ struct connshutdowns shutdowns;
/* handle used for closing cached connections */
struct Curl_easy *closure_handle;
+ struct Curl_multi *multi; /* Optional, set if cache belongs to multi */
};
#define BUNDLE_NO_MULTIUSE -1
struct Curl_llist conn_list; /* The connectdata members of the bundle */
};
-/* returns 1 on error, 0 is fine */
-int Curl_conncache_init(struct conncache *, size_t size);
+/* Init the cache, pass multi only if cache is owned by it.
+ * returns 1 on error, 0 is fine.
+ */
+int Curl_conncache_init(struct conncache *,
+ struct Curl_multi *multi,
+ size_t size);
void Curl_conncache_destroy(struct conncache *connc);
/* return the correct bundle, to a host or a proxy */
void Curl_conncache_close_all_connections(struct conncache *connc);
void Curl_conncache_print(struct conncache *connc);
+/**
+ * Tear down the connection. If `aborted` is FALSE, the connection
+ * will be shut down first before discarding. If the shutdown
+ * is not immediately complete, the connection
+ * will be placed into the cache's shutdown queue.
+ */
+void Curl_conncache_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool aborted);
+
+/**
+ * Add sockets and POLLIN/OUT flags for connections handled by the cache.
+ */
+CURLcode Curl_conncache_add_pollfds(struct conncache *connc,
+ struct curl_pollfds *cpfds);
+CURLcode Curl_conncache_add_waitfds(struct conncache *connc,
+ struct curl_waitfds *cwfds);
+
+/**
+ * Perform maintenance on connections in the cache. Specifically,
+ * progress the shutdown of connections in the queue.
+ */
+void Curl_conncache_multi_perform(struct Curl_multi *multi);
+
+void Curl_conncache_multi_socket(struct Curl_multi *multi,
+ curl_socket_t s, int ev_bitmask);
+void Curl_conncache_multi_close_all(struct Curl_multi *multi);
+
#endif /* HEADER_CURL_CONNCACHE_H */
BIT(rcvd_goaway);
BIT(sent_goaway);
BIT(enable_push);
- BIT(shutdown);
BIT(nw_out_blocked);
};
Curl_pollset_set(data, ps, sock, want_recv, want_send);
CF_DATA_RESTORE(cf, save);
}
- else if(ctx->sent_goaway && !ctx->shutdown) {
+ else if(ctx->sent_goaway && !cf->shutdown) {
/* shutdown in progress */
CF_DATA_SAVE(save, cf, data);
want_send = nghttp2_session_want_write(ctx->h2);
struct Curl_easy *data, bool *done)
{
struct cf_h2_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
CURLcode result;
int rv;
- if(!cf->connected || !ctx->h2 || ctx->shutdown) {
+ if(!cf->connected || !ctx->h2 || cf->shutdown || ctx->conn_closed) {
*done = TRUE;
return CURLE_OK;
}
+ CF_DATA_SAVE(save, cf, data);
+
if(!ctx->sent_goaway) {
rv = nghttp2_submit_goaway(ctx->h2, NGHTTP2_FLAG_NONE,
ctx->local_max_sid, 0,
if(rv) {
failf(data, "nghttp2_submit_goaway() failed: %s(%d)",
nghttp2_strerror(rv), rv);
- return CURLE_SEND_ERROR;
+ result = CURLE_SEND_ERROR;
+ goto out;
}
ctx->sent_goaway = TRUE;
}
if(!result && nghttp2_session_want_read(ctx->h2))
result = h2_progress_ingress(cf, data, 0);
- *done = !result && !nghttp2_session_want_write(ctx->h2) &&
- !nghttp2_session_want_read(ctx->h2);
- ctx->shutdown = (result || *done);
+ *done = (ctx->conn_closed ||
+ (!result && !nghttp2_session_want_write(ctx->h2) &&
+ !nghttp2_session_want_read(ctx->h2)));
+
+out:
+ CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
return result;
}
Curl_hash_init(&multi->proto_hash, 23,
Curl_hash_str, Curl_str_key_compare, ph_freeentry);
- if(Curl_conncache_init(&multi->conn_cache, chashsize))
+ if(Curl_conncache_init(&multi->conn_cache, multi, chashsize))
goto error;
Curl_llist_init(&multi->msglist, NULL);
struct Curl_easy *data;
struct curl_waitfds cwfds;
struct easy_pollset ps;
+ CURLMcode result = CURLM_OK;
if(!ufds)
return CURLM_BAD_FUNCTION_ARGUMENT;
memset(&ps, 0, sizeof(ps));
for(data = multi->easyp; data; data = data->next) {
multi_getsock(data, &ps);
- if(Curl_waitfds_add_ps(&cwfds, &ps))
- return CURLM_OUT_OF_MEMORY;
+ if(Curl_waitfds_add_ps(&cwfds, &ps)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
}
+ if(Curl_conncache_add_waitfds(&multi->conn_cache, &cwfds)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
+
+out:
if(fd_count)
*fd_count = cwfds.n;
- return CURLM_OK;
+ return result;
}
#ifdef USE_WINSOCK
struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK];
struct curl_pollfds cpfds;
unsigned int curl_nfds = 0; /* how many pfds are for curl transfers */
+ CURLMcode result = CURLM_OK;
#ifdef USE_WINSOCK
WSANETWORKEVENTS wsa_events;
DEBUGASSERT(multi->wsa_event != WSA_INVALID_EVENT);
for(data = multi->easyp; data; data = data->next) {
multi_getsock(data, &ps);
if(Curl_pollfds_add_ps(&cpfds, &ps)) {
- Curl_pollfds_cleanup(&cpfds);
- return CURLM_OUT_OF_MEMORY;
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
+ if(Curl_conncache_add_pollfds(&multi->conn_cache, &cpfds)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
+
curl_nfds = cpfds.n; /* what curl internally uses in cpfds */
/* Add external file descriptions from poll-like struct curl_waitfd */
for(i = 0; i < extra_nfds; i++) {
if(extra_fds[i].events & CURL_WAIT_POLLOUT)
events |= POLLOUT;
if(Curl_pollfds_add_sock(&cpfds, extra_fds[i].fd, events)) {
- Curl_pollfds_cleanup(&cpfds);
- return CURLM_OUT_OF_MEMORY;
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
}
if(mask) {
if(WSAEventSelect(cpfds.pfds[i].fd, multi->wsa_event, mask) != 0) {
- Curl_pollfds_cleanup(&cpfds);
- return CURLM_INTERNAL_ERROR;
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
}
#ifndef USE_WINSOCK
if(use_wakeup && multi->wakeup_pair[0] != CURL_SOCKET_BAD) {
if(Curl_pollfds_add_sock(&cpfds, multi->wakeup_pair[0], POLLIN)) {
- Curl_pollfds_cleanup(&cpfds);
- return CURLM_OUT_OF_MEMORY;
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
#endif
pollrc = Curl_poll(cpfds.pfds, cpfds.n, timeout_ms); /* wait... */
#endif
if(pollrc < 0) {
- Curl_pollfds_cleanup(&cpfds);
- return CURLM_UNRECOVERABLE_POLL;
+ result = CURLM_UNRECOVERABLE_POLL;
+ goto out;
}
if(pollrc > 0) {
}
}
+out:
Curl_pollfds_cleanup(&cpfds);
- return CURLM_OK;
+ return result;
}
CURLMcode curl_multi_wait(struct Curl_multi *multi,
/* the current node might be unlinked in multi_runsingle(), get the next
pointer now */
struct Curl_easy *datanext = data->next;
+
if(data->set.no_signal != nosig) {
sigpipe_restore(&pipe_st);
sigpipe_ignore(data, &pipe_st);
result = multi_runsingle(multi, &now, data);
if(result)
returncode = result;
+
data = datanext; /* operate on next handle */
} while(data);
sigpipe_restore(&pipe_st);
}
+ Curl_conncache_multi_perform(multi);
+
/*
* Simply remove all expired timers from the splay since handles are dealt
* with unconditionally by this function and curl_multi_timeout() requires
}
/* Close all the connections in the connection cache */
- Curl_conncache_close_all_connections(&multi->conn_cache);
+ Curl_conncache_multi_close_all(multi);
sockhash_destroy(&multi->sockhash);
Curl_hash_destroy(&multi->proto_hash);
/* Fill in the 'current' struct with the state as it is now: what sockets to
supervise and for what actions */
multi_getsock(data, &cur_poll);
-
/* We have 0 .. N sockets already and we get to know about the 0 .. M
sockets we should have from now on. Detect the differences, remove no
longer supervised ones and add new ones */
if(s != CURL_SOCKET_TIMEOUT) {
struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s);
- if(!entry)
+ if(!entry) {
/* Unmatched socket, we can't act on it but we ignore this fact. In
real-world tests it has been proved that libevent can in fact give
the application actions even though the socket was just previously
asked to get removed, so thus we better survive stray socket actions
and just move on. */
- ;
+ /* The socket might come from a connection that is being shut down
+ * by the multi's conncache. */
+ Curl_conncache_multi_socket(multi, s, ev_bitmask);
+ }
else {
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
break;
case CURLMOPT_MAX_TOTAL_CONNECTIONS:
multi->max_total_connections = va_arg(param, long);
+ /* for now, let this also decide the max number of connections
+ * in shutdown handling */
+ multi->max_shutdown_connections = va_arg(param, long);
break;
/* options formerly used for pipelining */
case CURLMOPT_MAX_PIPELINE_LENGTH:
long max_total_connections; /* if >0, a fixed limit of the maximum number
of connections in total */
+ long max_shutdown_connections; /* if >0, a fixed limit of the maximum number
+ of connections in shutdown handling */
/* timer callback and user data pointer for the *socket() API */
curl_multi_timer_callback timer_cb;
#include <curl/curl.h>
#include "urldata.h"
+#include "connect.h"
#include "share.h"
#include "psl.h"
#include "vtls/vtls.h"
break;
case CURL_LOCK_DATA_CONNECT:
- if(Curl_conncache_init(&share->conn_cache, 103))
+ if(Curl_conncache_init(&share->conn_cache, NULL, 103))
res = CURLSHE_NOMEM;
break;
return result;
}
-static void conn_shutdown(struct Curl_easy *data)
-{
- DEBUGASSERT(data);
- infof(data, "Closing connection");
-
- /* possible left-overs from the async name resolvers */
- Curl_resolver_cancel(data);
-
- Curl_conn_close(data, SECONDARYSOCKET);
- Curl_conn_close(data, FIRSTSOCKET);
-}
-
-static void conn_free(struct Curl_easy *data, struct connectdata *conn)
+void Curl_conn_free(struct Curl_easy *data, struct connectdata *conn)
{
size_t i;
*
* This function MUST NOT reset state in the Curl_easy struct if that
* isn't strictly bound to the life-time of *this* particular connection.
- *
*/
-
void Curl_disconnect(struct Curl_easy *data,
- struct connectdata *conn, bool dead_connection)
+ struct connectdata *conn, bool aborted)
{
/* there must be a connection to close */
DEBUGASSERT(conn);
DEBUGASSERT(!data->conn);
DEBUGF(infof(data, "Curl_disconnect(conn #%"
- CURL_FORMAT_CURL_OFF_T ", dead=%d)",
- conn->connection_id, dead_connection));
+ CURL_FORMAT_CURL_OFF_T ", aborted=%d)",
+ conn->connection_id, aborted));
+
/*
* If this connection isn't marked to force-close, leave it open if there
* are other users of it
*/
- if(CONN_INUSE(conn) && !dead_connection) {
+ if(CONN_INUSE(conn) && !aborted) {
DEBUGF(infof(data, "Curl_disconnect when inuse: %zu", CONN_INUSE(conn)));
return;
}
Curl_http_auth_cleanup_negotiate(conn);
if(conn->connect_only)
- /* treat the connection as dead in CONNECT_ONLY situations */
- dead_connection = TRUE;
-
- /* temporarily attach the connection to this transfer handle for the
- disconnect and shutdown */
- Curl_attach_connection(data, conn);
-
- if(conn->handler && conn->handler->disconnect)
- /* This is set if protocol-specific cleanups should be made */
- conn->handler->disconnect(data, conn, dead_connection);
-
- conn_shutdown(data);
-
- /* detach it again */
- Curl_detach_connection(data);
+ /* treat the connection as aborted in CONNECT_ONLY situations */
+ aborted = TRUE;
- conn_free(data, conn);
+ Curl_conncache_disconnect(data, conn, aborted);
}
/*
* any time (HTTP/2 PING for example), the protocol handler needs
* to install its own `connection_check` callback.
*/
+ DEBUGF(infof(data, "connection has input pending, not reusable"));
dead = TRUE;
}
Curl_detach_connection(data);
/* connection previously removed from cache in prune_if_dead() */
- /* disconnect it */
- Curl_disconnect(data, pruned, TRUE);
+ /* disconnect it, do not treat as aborted */
+ Curl_disconnect(data, pruned, FALSE);
}
CONNCACHE_LOCK(data);
data->state.conn_cache->last_cleanup = now;
infof(data, "Multiplexed connection found");
}
else if(prune_if_dead(check, data)) {
- /* disconnect it */
- Curl_disconnect(data, check, TRUE);
+ /* disconnect it, do not treat as aborted */
+ Curl_disconnect(data, check, FALSE);
continue;
}
/* reuse init */
existing->bits.reuse = TRUE; /* yes, we're reusing here */
- conn_free(data, temp);
+ Curl_conn_free(data, temp);
}
/**
if(!connections_available) {
infof(data, "No connections available.");
- conn_free(data, conn);
+ Curl_conn_free(data, conn);
*in_connect = NULL;
result = CURLE_NO_CONNECTION_AVAILABLE;
CURLcode Curl_close(struct Curl_easy **datap); /* opposite of curl_open() */
CURLcode Curl_connect(struct Curl_easy *, bool *async, bool *protocol_connect);
void Curl_disconnect(struct Curl_easy *data,
- struct connectdata *, bool dead_connection);
+ struct connectdata *, bool aborted);
CURLcode Curl_setup_conn(struct Curl_easy *data,
bool *protocol_done);
+void Curl_conn_free(struct Curl_easy *data, struct connectdata *conn);
CURLcode Curl_parse_login_details(const char *login, const size_t len,
char **userptr, char **passwdptr,
char **optionsptr);
accept() */
BIT(parallel_connect); /* set TRUE when a parallel connect attempt has
started (happy eyeballs) */
+ BIT(aborted); /* connection was aborted, e.g. in unclean state */
+ BIT(shutdown_handler); /* connection shutdown: handler shut down */
+ BIT(shutdown_filters); /* connection shutdown: filters shut down */
};
struct hostname {
CURLcode result;
DEBUGASSERT(backend);
- if(!backend->active || connssl->shutdown) {
+ if(!backend->active || cf->shutdown) {
*done = TRUE;
return CURLE_OK;
}
else
CURL_TRC_CF(data, cf, "shutdown error: %d", result);
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
(struct bearssl_ssl_backend_data *)connssl->backend;
size_t i;
+ (void)data;
DEBUGASSERT(backend);
- if(backend->active) {
- if(!connssl->shutdown) {
- bool done;
- bearssl_shutdown(cf, data, TRUE, &done);
- }
- backend->active = FALSE;
- }
+ backend->active = FALSE;
if(backend->anchors) {
for(i = 0; i < backend->anchors_len; ++i)
free(backend->anchors[i].dn.data);
size_t i;
DEBUGASSERT(backend);
- if(!backend->gtls.session || connssl->shutdown) {
+ if(!backend->gtls.session || cf->shutdown) {
*done = TRUE;
goto out;
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
DEBUGASSERT(backend);
CURL_TRC_CF(data, cf, "close");
if(backend->gtls.session) {
- if(!connssl->shutdown) {
- bool done;
- gtls_shutdown(cf, data, TRUE, &done);
- }
gnutls_deinit(backend->gtls.session);
backend->gtls.session = NULL;
}
DEBUGASSERT(backend);
- if(!backend->initialized || connssl->shutdown) {
+ if(!backend->initialized || cf->shutdown) {
*done = TRUE;
return CURLE_OK;
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
struct mbed_ssl_backend_data *backend =
(struct mbed_ssl_backend_data *)connssl->backend;
+ (void)data;
DEBUGASSERT(backend);
if(backend->initialized) {
- if(!connssl->shutdown) {
- bool done;
- mbedtls_shutdown(cf, data, TRUE, &done);
- }
-
mbedtls_pk_free(&backend->pk);
mbedtls_x509_crt_free(&backend->clicert);
mbedtls_x509_crt_free(&backend->cacert);
char buf[1024];
int nread, err;
unsigned long sslerr;
+ size_t i;
DEBUGASSERT(octx);
- if(!octx->ssl || connssl->shutdown) {
+ if(!octx->ssl || cf->shutdown) {
*done = TRUE;
goto out;
}
/* We have not started the shutdown from our side yet. Check
* if the server already sent us one. */
ERR_clear_error();
- nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
+ for(i = 0; i < 10; ++i) {
+ nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
+ CURL_TRC_CF(data, cf, "SSL shutdown not sent, read -> %d", nread);
+ if(nread <= 0)
+ break;
+ }
err = SSL_get_error(octx->ssl, nread);
if(!nread && err == SSL_ERROR_ZERO_RETURN) {
bool input_pending;
/* Yes, it did. */
if(!send_shutdown) {
- connssl->shutdown = TRUE;
CURL_TRC_CF(data, cf, "SSL shutdown received, not sending");
+ *done = TRUE;
goto out;
}
else if(!cf->next->cft->is_alive(cf->next, data, &input_pending)) {
* seems not interested to see our close notify, so do not
* send it. We are done. */
connssl->peer_closed = TRUE;
- connssl->shutdown = TRUE;
CURL_TRC_CF(data, cf, "peer closed connection");
+ *done = TRUE;
goto out;
}
}
+ if(send_shutdown && SSL_shutdown(octx->ssl) == 1) {
+ CURL_TRC_CF(data, cf, "SSL shutdown finished");
+ *done = TRUE;
+ goto out;
+ }
}
- if(send_shutdown && SSL_shutdown(octx->ssl) == 1) {
- CURL_TRC_CF(data, cf, "SSL shutdown finished");
+ /* SSL should now have started the shutdown from our side. Since it
+ * was not complete, we are lacking the close notify from the server. */
+ for(i = 0; i < 10; ++i) {
+ ERR_clear_error();
+ nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
+ CURL_TRC_CF(data, cf, "SSL shutdown read -> %d", nread);
+ if(nread <= 0)
+ break;
+ }
+ if(SSL_get_shutdown(octx->ssl) & SSL_RECEIVED_SHUTDOWN) {
+ CURL_TRC_CF(data, cf, "SSL shutdown received, finished");
*done = TRUE;
goto out;
}
- else {
- size_t i;
- /* SSL should now have started the shutdown from our side. Since it
- * was not complete, we are lacking the close notify from the server. */
- for(i = 0; i < 10; ++i) {
- ERR_clear_error();
- nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
- if(nread <= 0)
- break;
- }
- err = SSL_get_error(octx->ssl, nread);
- switch(err) {
- case SSL_ERROR_ZERO_RETURN: /* no more data */
- CURL_TRC_CF(data, cf, "SSL shutdown received");
- *done = TRUE;
- break;
- case SSL_ERROR_NONE: /* just did not get anything */
- case SSL_ERROR_WANT_READ:
- /* SSL has send its notify and now wants to read the reply
- * from the server. We are not really interested in that. */
- CURL_TRC_CF(data, cf, "SSL shutdown sent, want receive");
- connssl->io_need = CURL_SSL_IO_NEED_RECV;
- break;
- case SSL_ERROR_WANT_WRITE:
- CURL_TRC_CF(data, cf, "SSL shutdown send blocked");
- connssl->io_need = CURL_SSL_IO_NEED_SEND;
- break;
- default:
- sslerr = ERR_get_error();
- CURL_TRC_CF(data, cf, "SSL shutdown, error: '%s', errno %d",
- (sslerr ?
- ossl_strerror(sslerr, buf, sizeof(buf)) :
- SSL_ERROR_to_str(err)),
- SOCKERRNO);
- result = CURLE_RECV_ERROR;
- break;
- }
+ err = SSL_get_error(octx->ssl, nread);
+ switch(err) {
+ case SSL_ERROR_ZERO_RETURN: /* no more data */
+ CURL_TRC_CF(data, cf, "SSL shutdown not received, but closed");
+ *done = TRUE;
+ break;
+ case SSL_ERROR_NONE: /* just did not get anything */
+ case SSL_ERROR_WANT_READ:
+ /* SSL has send its notify and now wants to read the reply
+ * from the server. We are not really interested in that. */
+ CURL_TRC_CF(data, cf, "SSL shutdown sent, want receive");
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ break;
+ case SSL_ERROR_WANT_WRITE:
+ CURL_TRC_CF(data, cf, "SSL shutdown send blocked");
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ break;
+ default:
+ /* Server seems to have closed the connection without sending us
+ * a close notify. */
+ sslerr = ERR_get_error();
+ CURL_TRC_CF(data, cf, "SSL shutdown, ignore recv error: '%s', errno %d",
+ (sslerr ?
+ ossl_strerror(sslerr, buf, sizeof(buf)) :
+ SSL_ERROR_to_str(err)),
+ SOCKERRNO);
+ *done = TRUE;
+ result = CURLE_OK;
+ break;
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
DEBUGASSERT(octx);
if(octx->ssl) {
- /* Send the TLS shutdown if have not done so already and are still
- * connected *and* if the peer did not already close the connection. */
- if(cf->connected && !connssl->shutdown &&
- cf->next && cf->next->connected && !connssl->peer_closed) {
- bool done;
- (void)ossl_shutdown(cf, data, TRUE, &done);
- }
-
SSL_free(octx->ssl);
octx->ssl = NULL;
}
size_t i;
DEBUGASSERT(backend);
- if(!backend->conn || connssl->shutdown) {
+ if(!backend->conn || cf->shutdown) {
*done = TRUE;
goto out;
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
struct rustls_ssl_backend_data *backend =
(struct rustls_ssl_backend_data *)connssl->backend;
+ (void)data;
DEBUGASSERT(backend);
if(backend->conn) {
- /* Send the TLS shutdown if have not done so already and are still
- * connected *and* if the peer did not already close the connection. */
- if(cf->connected && !connssl->shutdown &&
- cf->next && cf->next->connected && !connssl->peer_closed) {
- bool done;
- (void)cr_shutdown(cf, data, TRUE, &done);
- }
-
rustls_connection_free(backend->conn);
backend->conn = NULL;
}
(struct schannel_ssl_backend_data *)connssl->backend;
CURLcode result = CURLE_OK;
- if(connssl->shutdown) {
+ if(cf->shutdown) {
*done = TRUE;
return CURLE_OK;
}
connssl->peer.hostname, connssl->peer.port);
}
- if(!backend->ctxt || connssl->shutdown) {
+ if(!backend->ctxt || cf->shutdown) {
*done = TRUE;
goto out;
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
DEBUGASSERT(data);
DEBUGASSERT(backend);
- if(backend->cred && backend->ctxt &&
- cf->connected && !connssl->shutdown &&
- cf->next && cf->next->connected && !connssl->peer_closed) {
- bool done;
- (void)schannel_shutdown(cf, data, TRUE, &done);
- }
-
/* free SSPI Schannel API security context handle */
if(backend->ctxt) {
DEBUGF(infof(data, "schannel: clear security context handle"));
size_t i;
DEBUGASSERT(backend);
- if(!backend->ssl_ctx || connssl->shutdown) {
+ if(!backend->ssl_ctx || cf->shutdown) {
*done = TRUE;
goto out;
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
if(backend->ssl_ctx) {
CURL_TRC_CF(data, cf, "close");
- if(cf->connected && !connssl->shutdown &&
- cf->next && cf->next->connected && !connssl->peer_closed) {
- bool done;
- (void)sectransp_shutdown(cf, data, TRUE, &done);
- }
-
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
if(SSLCreateContext)
CFRelease(backend->ssl_ctx);
struct Curl_easy *data,
bool *done)
{
- struct ssl_connect_data *connssl = cf->ctx;
- struct cf_call_data save;
CURLcode result = CURLE_OK;
*done = TRUE;
- if(!connssl->shutdown) {
+ if(!cf->shutdown) {
+ struct cf_call_data save;
+
CF_DATA_SAVE(save, cf, data);
result = Curl_ssl->shut_down(cf, data, TRUE, done);
CURL_TRC_CF(data, cf, "cf_shutdown -> %d, done=%d", result, *done);
CF_DATA_RESTORE(cf, save);
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
}
return result;
}
timediff_t timeout_ms;
int what, loop = 10;
- if(connssl->shutdown) {
+ if(cf->shutdown) {
*done = TRUE;
return CURLE_OK;
}
}
out:
CF_DATA_RESTORE(cf, save);
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
int io_need; /* TLS signals special SEND/RECV needs */
BIT(use_alpn); /* if ALPN shall be used in handshake */
BIT(peer_closed); /* peer has closed connection */
- BIT(shutdown); /* graceful close notify finished */
};
int nread, err;
DEBUGASSERT(wctx);
- if(!wctx->handle || connssl->shutdown) {
+ if(!wctx->handle || cf->shutdown) {
*done = TRUE;
goto out;
}
bool input_pending;
/* Yes, it did. */
if(!send_shutdown) {
- connssl->shutdown = TRUE;
CURL_TRC_CF(data, cf, "SSL shutdown received, not sending");
+ *done = TRUE;
goto out;
}
else if(!cf->next->cft->is_alive(cf->next, data, &input_pending)) {
/* Server closed the connection after its closy notify. It
* seems not interested to see our close notify, so do not
* send it. We are done. */
- connssl->peer_closed = TRUE;
- connssl->shutdown = TRUE;
CURL_TRC_CF(data, cf, "peer closed connection");
+ connssl->peer_closed = TRUE;
+ *done = TRUE;
goto out;
}
}
}
out:
- connssl->shutdown = (result || *done);
+ cf->shutdown = (result || *done);
return result;
}
DEBUGASSERT(backend);
if(backend->handle) {
- if(cf->connected && !connssl->shutdown &&
- cf->next && cf->next->connected && !connssl->peer_closed) {
- bool done;
- (void)wolfssl_shutdown(cf, data, TRUE, &done);
- }
wolfSSL_free(backend->handle);
backend->handle = NULL;
}
}
start = tvnow();
#ifdef DEBUGBUILD
+ if(getenv("CURL_FORBID_REUSE"))
+ (void)curl_easy_setopt(per->curl, CURLOPT_FORBID_REUSE, 1L);
+
if(global->test_event_based)
result = curl_easy_perform_ev(per->curl);
else
])
if env.has_vsftpd():
report.extend([
- f' VsFTPD: {env.vsftpd_version()}, ftp:{env.ftp_port}'
+ f' VsFTPD: {env.vsftpd_version()}, ftp:{env.ftp_port}, ftps:{env.ftps_port}'
])
return '\n'.join(report)
== Info: Connection #0 to host %HOSTIP left intact
== Info: Connection #0 to host %HOSTIP left intact
== Info: Connection #0 to host %HOSTIP left intact
-== Info: Closing connection
+== Info: shutting down connection #0
== Info: Connection #1 to host %HOSTIP left intact
</file>
<stripfile>
-$_ = '' if (($_ !~ /left intact/) && ($_ !~ /Closing connection/))
+$_ = '' if (($_ !~ /left intact/) && ($_ !~ /(closing|shutting down) connection #\d+/))
</stripfile>
</verify>
</testcase>
static size_t transfer_count = 1;
static struct transfer *transfers;
+static int forbid_reuse = 0;
static struct transfer *get_transfer_for_easy(CURL *easy)
{
curl_easy_setopt(hnd, CURLOPT_NOPROGRESS, 0L);
curl_easy_setopt(hnd, CURLOPT_XFERINFOFUNCTION, my_progress_cb);
curl_easy_setopt(hnd, CURLOPT_XFERINFODATA, t);
+ if(forbid_reuse)
+ curl_easy_setopt(hnd, CURLOPT_FORBID_REUSE, 1L);
/* please be verbose */
if(verbose) {
int http_version = CURL_HTTP_VERSION_2_0;
int ch;
- while((ch = getopt(argc, argv, "ahm:n:A:F:P:V:")) != -1) {
+ while((ch = getopt(argc, argv, "afhm:n:A:F:P:V:")) != -1) {
switch(ch) {
case 'h':
usage(NULL);
case 'a':
abort_paused = 1;
break;
+ case 'f':
+ forbid_reuse = 1;
+ break;
case 'm':
max_parallel = (size_t)strtol(optarg, NULL, 10);
break;
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#***************************************************************************
+# _ _ ____ _
+# Project ___| | | | _ \| |
+# / __| | | | |_) | |
+# | (__| |_| | _ <| |___
+# \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+import difflib
+import filecmp
+import logging
+import os
+import re
+from datetime import timedelta
+import pytest
+
+from testenv import Env, CurlClient, LocalClient
+
+
+log = logging.getLogger(__name__)
+
+
+class TestShutdown:
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env, httpd, nghttpx):
+ if env.have_h3():
+ nghttpx.start_if_needed()
+ httpd.clear_extra_configs()
+ httpd.reload()
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env, httpd):
+ indir = httpd.docs_dir
+ env.make_data_file(indir=indir, fname="data-10k", fsize=10*1024)
+ env.make_data_file(indir=indir, fname="data-100k", fsize=100*1024)
+ env.make_data_file(indir=indir, fname="data-1m", fsize=1024*1024)
+
+ # check with `tcpdump` that we see curl TCP RST packets
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_19_01_check_tcp_rst(self, env: Env, httpd, repeat, proto):
+ if env.ci_run:
+ pytest.skip("seems not to work in CI")
+ curl = CurlClient(env=env)
+ url = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-1]'
+ r = curl.http_download(urls=[url], alpn_proto=proto, with_tcpdump=True, extra_args=[
+ '--parallel'
+ ])
+ r.check_response(http_status=200, count=2)
+ assert r.tcpdump
+ assert len(r.tcpdump.stats) != 0, f'Expected TCP RSTs packets: {r.tcpdump.stderr}'
+
+ # check with `tcpdump` that we do NOT see TCP RST when CURL_GRACEFUL_SHUTDOWN set
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
+ def test_19_02_check_shutdown(self, env: Env, httpd, repeat, proto):
+ if not env.curl_is_debug():
+ pytest.skip('only works for curl debug builds')
+ curl = CurlClient(env=env, run_env={
+ 'CURL_GRACEFUL_SHUTDOWN': '2000',
+ 'CURL_DEBUG': 'ssl'
+ })
+ url = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-1]'
+ r = curl.http_download(urls=[url], alpn_proto=proto, with_tcpdump=True, extra_args=[
+ '--parallel'
+ ])
+ r.check_response(http_status=200, count=2)
+ assert r.tcpdump
+ assert len(r.tcpdump.stats) == 0, f'Unexpected TCP RSTs packets'
+
+ # run downloads where the server closes the connection after each request
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_19_03_shutdown_by_server(self, env: Env, httpd, repeat, proto):
+ if not env.curl_is_debug():
+ pytest.skip('only works for curl debug builds')
+ count = 10
+ curl = CurlClient(env=env, run_env={
+ 'CURL_GRACEFUL_SHUTDOWN': '2000',
+ 'CURL_DEBUG': 'ssl'
+ })
+ url = f'https://{env.authority_for(env.domain1, proto)}/curltest/tweak/?'\
+ f'id=[0-{count-1}]&with_cl&close'
+ r = curl.http_download(urls=[url], alpn_proto=proto)
+ r.check_response(http_status=200, count=count)
+ shutdowns = [l for l in r.trace_lines if re.match(r'.*CCACHE\] shutdown #\d+, done=1', l)]
+ assert len(shutdowns) == count, f'{shutdowns}'
+
+ # run downloads with CURLOPT_FORBID_REUSE set, meaning *we* close
+ # the connection after each request
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_19_04_shutdown_by_curl(self, env: Env, httpd, proto, repeat):
+ if not env.curl_is_debug():
+ pytest.skip('only works for curl debug builds')
+ count = 10
+ docname = 'data.json'
+ url = f'https://localhost:{env.https_port}/{docname}'
+ client = LocalClient(name='h2-download', env=env, run_env={
+ 'CURL_GRACEFUL_SHUTDOWN': '2000',
+ 'CURL_DEBUG': 'ssl'
+ })
+ if not client.exists():
+ pytest.skip(f'example client not built: {client.name}')
+ r = client.run(args=[
+ '-n', f'{count}', '-f', '-V', proto, url
+ ])
+ r.check_exit_code(0)
+ shutdowns = [l for l in r.trace_lines if re.match(r'.*CCACHE\] shutdown #\d+, done=1', l)]
+ assert len(shutdowns) == count, f'{shutdowns}'
+
+ # run event-based downloads with CURLOPT_FORBID_REUSE set, meaning *we* close
+ # the connection after each request
+ @pytest.mark.parametrize("proto", ['http/1.1'])
+ def test_19_05_event_shutdown_by_server(self, env: Env, httpd, proto, repeat):
+ if not env.curl_is_debug():
+ pytest.skip('only works for curl debug builds')
+ count = 10
+ curl = CurlClient(env=env, run_env={
+ # forbid connection reuse to trigger shutdowns after transfer
+ 'CURL_FORBID_REUSE': '1',
+ # make socket receives block 50% of the time to delay shutdown
+ 'CURL_DBG_SOCK_RBLOCK': '50',
+ 'CURL_DEBUG': 'ssl'
+ })
+ url = f'https://{env.authority_for(env.domain1, proto)}/curltest/tweak/?'\
+ f'id=[0-{count-1}]&with_cl&'
+ r = curl.http_download(urls=[url], alpn_proto=proto, extra_args=[
+ '--test-event'
+ ])
+ r.check_response(http_status=200, count=count)
+ # check that we closed all connections
+ closings = [l for l in r.trace_lines if re.match(r'.*CCACHE\] closing #\d+', l)]
+ assert len(closings) == count, f'{closings}'
+ # check that all connection sockets were removed from event
+ removes = [l for l in r.trace_lines if re.match(r'.*socket cb: socket \d+ REMOVED', l)]
+ assert len(removes) == count, f'{removes}'
+
+
if os.path.exists(path):
return os.remove(path)
+ # check with `tcpdump` if curl causes any TCP RST packets
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ def test_30_06_shutdownh_download(self, env: Env, vsftpd: VsFTPD, repeat):
+ docname = 'data-1k'
+ curl = CurlClient(env=env)
+ count = 1
+ url = f'ftp://{env.ftp_domain}:{vsftpd.port}/{docname}?[0-{count-1}]'
+ r = curl.ftp_get(urls=[url], with_stats=True, with_tcpdump=True)
+ r.check_stats(count=count, http_status=226)
+ assert r.tcpdump
+ assert len(r.tcpdump.stats) == 0, f'Unexpected TCP RSTs packets'
+
+ # check with `tcpdump` if curl causes any TCP RST packets
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ def test_30_07_shutdownh_upload(self, env: Env, vsftpd: VsFTPD, repeat):
+ docname = 'upload-1k'
+ curl = CurlClient(env=env)
+ srcfile = os.path.join(env.gen_dir, docname)
+ dstfile = os.path.join(vsftpd.docs_dir, docname)
+ self._rmf(dstfile)
+ count = 1
+ url = f'ftp://{env.ftp_domain}:{vsftpd.port}/'
+ r = curl.ftp_upload(urls=[url], fupload=f'{srcfile}', with_stats=True, with_tcpdump=True)
+ r.check_stats(count=count, http_status=226)
+ assert r.tcpdump
+ assert len(r.tcpdump.stats) == 0, f'Unexpected TCP RSTs packets'
+
def check_downloads(self, client, srcfile: str, count: int,
complete: bool = True):
for i in range(count):
if os.path.exists(path):
return os.remove(path)
+ # check with `tcpdump` if curl causes any TCP RST packets
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ def test_31_06_shutdownh_download(self, env: Env, vsftpds: VsFTPD, repeat):
+ docname = 'data-1k'
+ curl = CurlClient(env=env)
+ count = 1
+ url = f'ftp://{env.ftp_domain}:{vsftpds.port}/{docname}?[0-{count-1}]'
+ r = curl.ftp_ssl_get(urls=[url], with_stats=True, with_tcpdump=True)
+ r.check_stats(count=count, http_status=226)
+ # vsftp closes control connection without niceties,
+ # disregard RST packets it sent from its port to curl
+ assert len(r.tcpdump.stats_excluding(src_port=env.ftps_port)) == 0, f'Unexpected TCP RSTs packets'
+
+ # check with `tcpdump` if curl causes any TCP RST packets
+ @pytest.mark.skipif(condition=not Env.tcpdump(), reason="tcpdump not available")
+ def test_31_07_shutdownh_upload(self, env: Env, vsftpds: VsFTPD, repeat):
+ docname = 'upload-1k'
+ curl = CurlClient(env=env)
+ srcfile = os.path.join(env.gen_dir, docname)
+ dstfile = os.path.join(vsftpds.docs_dir, docname)
+ self._rmf(dstfile)
+ count = 1
+ url = f'ftp://{env.ftp_domain}:{vsftpds.port}/'
+ r = curl.ftp_ssl_upload(urls=[url], fupload=f'{srcfile}', with_stats=True, with_tcpdump=True)
+ r.check_stats(count=count, http_status=226)
+ # vsftp closes control connection without niceties,
+ # disregard RST packets it sent from its port to curl
+ assert len(r.tcpdump.stats_excluding(src_port=env.ftps_port)) == 0, f'Unexpected TCP RSTs packets'
+
def check_downloads(self, client, srcfile: str, count: int,
complete: bool = True):
for i in range(count):
import json
import logging
import os
+import sys
+import time
+from threading import Thread
+
import psutil
import re
import shutil
f'stats={self.stats}]'
+class RunTcpDump:
+
+ def __init__(self, env, run_dir):
+ self._env = env
+ self._run_dir = run_dir
+ self._proc = None
+ self._stdoutfile = os.path.join(self._run_dir, 'tcpdump.out')
+ self._stderrfile = os.path.join(self._run_dir, 'tcpdump.err')
+
+ @property
+ def stats(self) -> Optional[List[str]]:
+ if self._proc:
+ raise Exception('tcpdump still running')
+ lines = []
+ for l in open(self._stdoutfile).readlines():
+ if re.match(r'.* IP 127\.0\.0\.1\.\d+ [<>] 127\.0\.0\.1\.\d+:.*', l):
+ lines.append(l)
+ return lines
+
+ def stats_excluding(self, src_port) -> Optional[List[str]]:
+ if self._proc:
+ raise Exception('tcpdump still running')
+ lines = []
+ for l in self.stats:
+ if not re.match(r'.* IP 127\.0\.0\.1\.' + str(src_port) + ' >.*', l):
+ lines.append(l)
+ return lines
+
+ @property
+ def stderr(self) -> List[str]:
+ if self._proc:
+ raise Exception('tcpdump still running')
+ lines = []
+ return open(self._stderrfile).readlines()
+
+ def sample(self):
+ # not sure how to make that detection reliable for all platforms
+ local_if = 'lo0' if sys.platform.startswith('darwin') else 'lo'
+ try:
+ tcpdump = self._env.tcpdump()
+ if tcpdump is None:
+ raise Exception('tcpdump not available')
+ # look with tcpdump for TCP RST packets which indicate
+ # we did not shut down connections cleanly
+ args = []
+ # at least on Linux, we need root permissions to run tcpdump
+ if sys.platform.startswith('linux'):
+ args.append('sudo')
+ args.extend([
+ tcpdump, '-i', local_if, '-n', 'tcp[tcpflags] & (tcp-rst)!=0'
+ ])
+ with open(self._stdoutfile, 'w') as cout:
+ with open(self._stderrfile, 'w') as cerr:
+ self._proc = subprocess.Popen(args, stdout=cout, stderr=cerr,
+ text=True, cwd=self._run_dir,
+ shell=False)
+ assert self._proc
+ assert self._proc.returncode is None
+ while self._proc:
+ try:
+ self._proc.wait(timeout=1)
+ except subprocess.TimeoutExpired:
+ pass
+ except Exception as e:
+ log.error(f'Tcpdump: {e}')
+
+ def start(self):
+ def do_sample():
+ self.sample()
+ t = Thread(target=do_sample)
+ t.start()
+
+ def finish(self):
+ if self._proc:
+ time.sleep(1)
+ self._proc.terminate()
+ self._proc = None
+
+
class ExecResult:
def __init__(self, args: List[str], exit_code: int,
duration: Optional[timedelta] = None,
with_stats: bool = False,
exception: Optional[str] = None,
- profile: Optional[RunProfile] = None):
+ profile: Optional[RunProfile] = None,
+ tcpdump: Optional[RunTcpDump] = None):
self._args = args
self._exit_code = exit_code
self._exception = exception
self._stdout = stdout
self._stderr = stderr
self._profile = profile
+ self._tcpdump = tcpdump
self._duration = duration if duration is not None else timedelta()
self._response = None
self._responses = []
def profile(self) -> Optional[RunProfile]:
return self._profile
+ @property
+ def tcpdump(self) -> Optional[RunTcpDump]:
+ return self._tcpdump
+
@property
def response(self) -> Optional[Dict]:
return self._response
'h3': '--http3-only',
}
- def __init__(self, env: Env, run_dir: Optional[str] = None,
- timeout: Optional[float] = None, silent: bool = False):
+ def __init__(self, env: Env,
+ run_dir: Optional[str] = None,
+ timeout: Optional[float] = None,
+ silent: bool = False,
+ run_env: Optional[Dict[str, str]] = None):
self.env = env
self._timeout = timeout if timeout else env.test_timeout
self._curl = os.environ['CURL'] if 'CURL' in os.environ else env.curl
self._headerfile = f'{self._run_dir}/curl.headers'
self._log_path = f'{self._run_dir}/curl.log'
self._silent = silent
+ self._run_env = run_env
self._rmrf(self._run_dir)
self._mkpath(self._run_dir)
alpn_proto: Optional[str] = None,
def_tracing: bool = True,
with_stats: bool = False,
- with_profile: bool = False):
+ with_profile: bool = False,
+ with_tcpdump: bool = False):
return self._raw(url, options=extra_args,
with_stats=with_stats,
alpn_proto=alpn_proto,
def_tracing=def_tracing,
- with_profile=with_profile)
+ with_profile=with_profile,
+ with_tcpdump=with_tcpdump)
def http_download(self, urls: List[str],
alpn_proto: Optional[str] = None,
with_stats: bool = True,
with_headers: bool = False,
with_profile: bool = False,
+ with_tcpdump: bool = False,
no_save: bool = False,
extra_args: List[str] = None):
if extra_args is None:
return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
with_stats=with_stats,
with_headers=with_headers,
- with_profile=with_profile)
+ with_profile=with_profile,
+ with_tcpdump=with_tcpdump)
def http_upload(self, urls: List[str], data: str,
alpn_proto: Optional[str] = None,
with_stats: bool = True,
with_headers: bool = False,
with_profile: bool = False,
+ with_tcpdump: bool = False,
extra_args: Optional[List[str]] = None):
if extra_args is None:
extra_args = []
return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
with_stats=with_stats,
with_headers=with_headers,
- with_profile=with_profile)
+ with_profile=with_profile,
+ with_tcpdump=with_tcpdump)
def http_delete(self, urls: List[str],
alpn_proto: Optional[str] = None,
def ftp_get(self, urls: List[str],
with_stats: bool = True,
with_profile: bool = False,
+ with_tcpdump: bool = False,
no_save: bool = False,
extra_args: List[str] = None):
if extra_args is None:
return self._raw(urls, options=extra_args,
with_stats=with_stats,
with_headers=False,
- with_profile=with_profile)
+ with_profile=with_profile,
+ with_tcpdump=with_tcpdump)
def ftp_ssl_get(self, urls: List[str],
with_stats: bool = True,
with_profile: bool = False,
+ with_tcpdump: bool = False,
no_save: bool = False,
extra_args: List[str] = None):
if extra_args is None:
])
return self.ftp_get(urls=urls, with_stats=with_stats,
with_profile=with_profile, no_save=no_save,
+ with_tcpdump=with_tcpdump,
extra_args=extra_args)
def ftp_upload(self, urls: List[str], fupload,
with_stats: bool = True,
with_profile: bool = False,
+ with_tcpdump: bool = False,
extra_args: List[str] = None):
if extra_args is None:
extra_args = []
return self._raw(urls, options=extra_args,
with_stats=with_stats,
with_headers=False,
- with_profile=with_profile)
+ with_profile=with_profile,
+ with_tcpdump=with_tcpdump)
def ftp_ssl_upload(self, urls: List[str], fupload,
with_stats: bool = True,
with_profile: bool = False,
+ with_tcpdump: bool = False,
extra_args: List[str] = None):
if extra_args is None:
extra_args = []
])
return self.ftp_upload(urls=urls, fupload=fupload,
with_stats=with_stats, with_profile=with_profile,
+ with_tcpdump=with_tcpdump,
extra_args=extra_args)
def response_file(self, idx: int):
my_args.extend(args)
return self._run(args=my_args, with_stats=with_stats, with_profile=with_profile)
- def _run(self, args, intext='', with_stats: bool = False, with_profile: bool = True):
+ def _run(self, args, intext='', with_stats: bool = False,
+ with_profile: bool = True, with_tcpdump: bool = False):
self._rmf(self._stdoutfile)
self._rmf(self._stderrfile)
self._rmf(self._headerfile)
- started_at = datetime.now()
exception = None
profile = None
+ tcpdump = None
started_at = datetime.now()
+ if with_tcpdump:
+ tcpdump = RunTcpDump(self.env, self._run_dir)
+ tcpdump.start()
try:
with open(self._stdoutfile, 'w') as cout:
with open(self._stderrfile, 'w') as cerr:
if self._timeout else None
log.info(f'starting: {args}')
p = subprocess.Popen(args, stderr=cerr, stdout=cout,
- cwd=self._run_dir, shell=False)
+ cwd=self._run_dir, shell=False,
+ env=self._run_env)
profile = RunProfile(p.pid, started_at, self._run_dir)
if intext is not None and False:
p.communicate(input=intext.encode(), timeout=1)
p = subprocess.run(args, stderr=cerr, stdout=cout,
cwd=self._run_dir, shell=False,
input=intext.encode() if intext else None,
- timeout=self._timeout)
+ timeout=self._timeout,
+ env=self._run_env)
exitcode = p.returncode
except subprocess.TimeoutExpired:
now = datetime.now()
f'(configured {self._timeout}s): {args}')
exitcode = -1
exception = 'TimeoutExpired'
+ if tcpdump:
+ tcpdump.finish()
coutput = open(self._stdoutfile).readlines()
cerrput = open(self._stderrfile).readlines()
return ExecResult(args=args, exit_code=exitcode, exception=exception,
stdout=coutput, stderr=cerrput,
duration=datetime.now() - started_at,
with_stats=with_stats,
- profile=profile)
+ profile=profile, tcpdump=tcpdump)
def _raw(self, urls, intext='', timeout=None, options=None, insecure=False,
alpn_proto: Optional[str] = None,
with_stats=False,
with_headers=True,
def_tracing=True,
- with_profile=False):
+ with_profile=False,
+ with_tcpdump=False):
args = self._complete_args(
urls=urls, timeout=timeout, options=options, insecure=insecure,
alpn_proto=alpn_proto, force_resolve=force_resolve,
with_headers=with_headers, def_tracing=def_tracing)
r = self._run(args, intext=intext, with_stats=with_stats,
- with_profile=with_profile)
+ with_profile=with_profile, with_tcpdump=with_tcpdump)
if r.exit_code == 0 and with_headers:
self._parse_headerfile(self._headerfile, r=r)
if r.json:
import logging
import os
import re
+import shutil
import socket
import subprocess
import sys
except Exception as e:
self.vsftpd = None
+ self._tcpdump = shutil.which('tcpdump')
+
@property
def httpd_version(self):
if self._httpd_version is None and self.apxs is not None:
def vsftpd_version(self):
return self._vsftpd_version
+ @property
+ def tcpdmp(self) -> Optional[str]:
+ return self._tcpdump
+
class Env:
def vsftpd_version() -> str:
return Env.CONFIG.vsftpd_version
+ @staticmethod
+ def tcpdump() -> Optional[str]:
+ return Env.CONFIG.tcpdmp
+
def __init__(self, pytestconfig=None):
self._verbose = pytestconfig.option.verbose \
if pytestconfig is not None else 0
int i, chunks = 3, error_bucket = 1;
size_t chunk_size = sizeof(buffer);
const char *request_id = "none";
- apr_time_t delay = 0, chunk_delay = 0;
+ apr_time_t delay = 0, chunk_delay = 0, close_delay = 0;
apr_array_header_t *args = NULL;
int http_status = 200;
apr_status_t error = APR_SUCCESS, body_error = APR_SUCCESS;
+ int close_conn = 0, with_cl = 0;
if(strcmp(r->handler, "curltest-tweak")) {
return DECLINED;
continue;
}
}
+ else if(!strcmp("close_delay", arg)) {
+ rv = duration_parse(&close_delay, val, "s");
+ if(APR_SUCCESS == rv) {
+ continue;
+ }
+ }
+ }
+ else if(!strcmp("close", arg)) {
+ /* we are asked to close the connection */
+ close_conn = 1;
+ continue;
+ }
+ else if(!strcmp("with_cl", arg)) {
+ with_cl = 1;
+ continue;
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "query parameter not "
"understood: '%s' in %s",
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "error_handler: processing "
"request, %s", r->args? r->args : "(no args)");
r->status = http_status;
- r->clength = -1;
- r->chunked = (r->proto_num >= HTTP_VERSION(1,1));
+ r->clength = with_cl? (chunks * chunk_size) : -1;
+ r->chunked = (r->proto_num >= HTTP_VERSION(1,1)) && !with_cl;
apr_table_setn(r->headers_out, "request-id", request_id);
- apr_table_unset(r->headers_out, "Content-Length");
+ if(r->clength >= 0) {
+ apr_table_set(r->headers_out, "Content-Length",
+ apr_ltoa(r->pool, (long)r->clength));
+ }
+ else
+ apr_table_unset(r->headers_out, "Content-Length");
/* Discourage content-encodings */
apr_table_unset(r->headers_out, "Content-Encoding");
apr_table_setn(r->subprocess_env, "no-brotli", "1");
"error_handler: response passed");
cleanup:
+ if(close_conn) {
+ if(close_delay) {
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ apr_brigade_cleanup(bb);
+ apr_sleep(close_delay);
+ }
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
- "error_handler: request cleanup, r->status=%d, aborted=%d",
- r->status, c->aborted);
+ "error_handler: request cleanup, r->status=%d, aborted=%d, "
+ "close=%d", r->status, c->aborted, close_conn);
if(rv == APR_SUCCESS) {
return OK;
}