}
/**
- * job walking the matching addr infos, createing a sub-cfilter with the
+ * job walking the matching addr infos, creating a sub-cfilter with the
* provided method `cf_create` and running setup/connect on it.
*/
struct eyeballer {
/**
* Create a cfilter for making an "ip" connection to the
- * given address, using paramters from `conn`. The "ip" connection
+ * given address, using parameters from `conn`. The "ip" connection
* can be a TCP socket, a UDP socket or even a QUIC connection.
*
* It MUST use only the supplied `ai` for its connection attempt.
conn->bits.do_more = FALSE;
(void)curlx_nonblock(s, TRUE); /* enable non-blocking */
- /* Replace any filter on SECONDARY with one listeing on this socket */
+ /* Replace any filter on SECONDARY with one listening on this socket */
result = Curl_conn_tcp_accepted_set(data, conn, SECONDARYSOCKET, &s);
if(result)
return result;
/* store which command was sent */
ftpc->count1 = fcmd;
- /* Replace any filter on SECONDARY with one listeing on this socket */
+ /* Replace any filter on SECONDARY with one listening on this socket */
result = Curl_conn_tcp_listen_set(data, conn, SECONDARYSOCKET, &portsock);
if(result)
goto out;
r.check_responses(count=count, exp_status=200)
assert len(r.stats) == count, f'{r.stats}'
# reload will shut down the connection gracefully with GOAWAY
- # we expect to see a second connetion opened afterwards
+ # we expect to see a second connection opened afterwards
assert r.total_connects == 2
for idx, s in enumerate(r.stats):
if s['num_connects'] > 0:
r.check_responses(count=count, exp_status=200)
assert len(r.stats) == count, f'{r.stats}'
# reload will shut down the connection gracefully with GOAWAY
- # we expect to see a second connetion opened afterwards
+ # we expect to see a second connection opened afterwards
assert r.total_connects == 2
for idx, s in enumerate(r.stats):
if s['num_connects'] > 0:
r.check_responses(count=1, exp_status=200)
# download 50 files in 100 chunks a 100 bytes with 10ms delay between
- # prepend 100 file requests to warm up connection procesing limits
+ # prepend 100 file requests to warm up connection processing limits
# (Apache2 increases # of parallel processed requests after successes)
@pytest.mark.parametrize("proto", ['h2', 'h3'])
def test_04_02_100_100_10(self, env: Env,
f'avg time of transfer: {t_avg} [{i_min}={t_min}, {i_max}={t_max}]'
# download 50 files in 1000 chunks a 10 bytes with 1ms delay between
- # prepend 100 file requests to warm up connection procesing limits
+ # prepend 100 file requests to warm up connection processing limits
# (Apache2 increases # of parallel processed requests after successes)
@pytest.mark.parametrize("proto", ['h2', 'h3'])
def test_04_03_1000_10_1(self, env: Env, httpd, nghttpx, repeat, proto):
f'avg time of transfer: {t_avg} [{i_min}={t_min}, {i_max}={t_max}]'
# download 50 files in 10000 chunks a 1 byte with 10us delay between
- # prepend 100 file requests to warm up connection procesing limits
+ # prepend 100 file requests to warm up connection processing limits
# (Apache2 increases # of parallel processed requests after successes)
@pytest.mark.parametrize("proto", ['h2', 'h3'])
def test_04_04_1000_10_1(self, env: Env, httpd, nghttpx, repeat, proto):