---WWW::Curl
---NET::Curl
---Curl Corporation
-\bmanpages[^./&:-]:man pages
-\bmanpage[^si./&:-]:man page
+\bmanpages[^./;=&{:-]:man pages
+\bmanpage[^si./;=&{:-]:man page
valgrind.supp \
$(TESTSCRIPTS)
-# we have two variables here to make sure DIST_SUBDIRS won't get 'unit'
+# we have two variables here to make sure DIST_SUBDIRS does not get 'unit'
# added twice as then targets such as 'distclean' misbehave and try to
# do things twice in that subdir at times (and thus fails).
if BUILD_UNITTESTS
TEST_COMMON =
if CROSSCOMPILING
-TEST = @echo "NOTICE: we can't run the tests when cross-compiling!"
+TEST = @echo "NOTICE: we cannot run the tests when cross-compiling!"
PYTEST = $(TEST)
else # if not cross-compiling:
TEST_T = -a -w -t -j20
TEST_E = -a -w -e
-# ~<keyword> means that it will run all tests matching the keyword, but will
-# ignore their results (since these ones are likely to fail for no good reason)
+# ~<keyword> means that it runs all tests matching the keyword, but ignores
+# their results (since these ones are likely to fail for no good reason)
TEST_NF = -a -w -p ~flaky ~timing-dependent
# special target for CI use
sub allversions {
my ($file) = @_;
open(A, "<$file") ||
- die "can't open the versions file $file\n";
+ die "cannot open the versions file $file\n";
my $before = 1;
my $relcount;
while(<A>) {
} elsif(! -f "$CAPREFIX-ca.cacert" ||
! -f "$CAPREFIX-ca.key") {
- if($OPENSSL eq basename($OPENSSL)) { # has no dir component
+ if($OPENSSL eq basename($OPENSSL)) { # has no directory component
# find openssl in PATH
my $found = 0;
foreach(File::Spec->path()) {
countryName = "Country Name"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = Northern Nowhere Trust Anchor
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost.nn
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost.nn
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost.nn
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost
countryName = "Country Name is Northern Nowhere"
countryName_value = NN
organizationName = "Organization Name"
-organizationName_value = Edel Curl Arctic Illudium Research Cloud
+organizationName_value = Edel curl Arctic Illudium Research Cloud
commonName = "Common Name"
commonName_value = localhost
LC_ALL=C.UTF-8
</setenv>
<name>
-HTTP over proxy with malformatted IDN host name
+HTTP over proxy with malformatted IDN hostname
</name>
-# This host name contains an invalid UTF-8 byte sequence that can't be
+# This hostname contains an invalid UTF-8 byte sequence that cannot be
# converted into an IDN name
<stdin>
url = "http://invalid-utf8-%hex[%e2%90]hex%.local/page/%TESTNUMBER"
LC_ALL=C.UTF-8
</setenv>
<name>
-HTTP over proxy with too long IDN host name
+HTTP over proxy with too long IDN hostname
</name>
<command>
http://too-long-IDN-name-c%hex[%c3%bc]hex%rl-r%hex[%c3%bc]hex%le%hex[%c3%9f]hex%-la-la-la-dee-da-flooby-nooby.local/page/%TESTNUMBER -x %HOSTIP:%NOLISTENPORT
ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T %LOGDIR/upload%TESTNUMBER -C -
</command>
<file name="%LOGDIR/upload%TESTNUMBER">
-this is the *****cr@p******** that we're gonna upload
+this is the *****cr@p******** that we are gonna upload
worx?
</file>
QUIT
</protocol>
<upload>
-cr@p******** that we're gonna upload
+cr@p******** that we are gonna upload
worx?
</upload>
ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T %LOGDIR/upload%TESTNUMBER -C -
</command>
<file name="%LOGDIR/upload%TESTNUMBER">
-this is the *****cr@p******** that we're gonna upload
+this is the *****cr@p******** that we are gonna upload
worx?
</file>
QUIT
</protocol>
<upload>
-this is the *****cr@p******** that we're gonna upload
+this is the *****cr@p******** that we are gonna upload
worx?
</upload>
# Verify data after the test has been "shot"
<verify>
-# curl doesn't do a HEAD request on the remote file so it has no idea whether
+# curl does not do a HEAD request on the remote file so it has no idea whether
# it can skip part of the file or not. Instead, it sends the entire file.
<protocol crlf="headers">
PUT /%TESTNUMBER HTTP/1.1
#
# Verify data after the test has been "shot"
<verify>
-# The server doesn't implement CONNECT for ftp, so this must be a failure test
+# The server does not implement CONNECT for ftp, so this must be a failure test
<errorcode>
56
</errorcode>
http://%HOSTIP:%HTTPPORT/bzz/%TESTNUMBER -T - -0
</command>
<stdin>
-this data can't be sent
+this data cannot be sent
</stdin>
</client>
http
</server>
<name>
-NO_PROXY test, with user name in URL
+NO_PROXY test, with username in URL
</name>
<setenv>
PWD
PRET RETR %TESTNUMBER
</protocol>
-# we expect that the server doesn't understand PRET
+# we expect that the server does not understand PRET
<errorcode>
84
</errorcode>
FTP PASV upload resume
</name>
<command>
-ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T %LOGDIR/upload%TESTNUMBER -C 40
+ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T %LOGDIR/upload%TESTNUMBER -C 41
</command>
<file name="%LOGDIR/upload%TESTNUMBER">
-this is the *****crap******** that we're gonna upload
+this is the *****crap******** that we are gonna upload
worx?
</file>
# Client-side
<client>
<name>
-Manpage syntax checks
+Man page syntax checks
</name>
<command type="perl">
// CPP comment ?
- /* comment doesn't end
+ /* comment does not end
</file>
</client>
smtp
</server>
<name>
-SMTP multipart with file name escaping
+SMTP multipart with filename escaping
</name>
<stdin crlf="yes">
From: different
s/^EPRT \|1\|(\S*)/EPRT \|1\|/
</strippart>
-# This test doesn't send a QUIT because the main state machine in multi.c
+# This test does not send a QUIT because the main state machine in multi.c
# triggers the timeout and sets the CURLE_OPERATION_TIMEDOUT error (28) for
# which the FTP disconnect code generically has to assume could mean the
# control the connection and thus it cannot send any command.
http
</server>
<name>
-HTTP cookie domains tailmatching the host name
+HTTP cookie domains tailmatching the hostname
</name>
<command>
http://example.fake/c/%TESTNUMBER http://bexample.fake/c/%TESTNUMBER -b %LOGDIR/injar%TESTNUMBER -x %HOSTIP:%HTTPPORT
</keywords>
</info>
-# This test is very similar to 1216, only that it sets the cookies from the
+# This test is similar to 1216, only that it sets the cookies from the
# first site instead of reading from a file
<reply>
<data crlf="headers">
digest
</features>
<name>
-HTTP with Digest authorization with user name needing escape
+HTTP with Digest authorization with username needing escape
</name>
<command>
http://%5cuser%22:password@%HOSTIP:%HTTPPORT/%TESTNUMBER --digest
# Server-side
<reply>
<servercmd>
-# Assuming there's nothing listening on port 1
+# Assuming there is nothing listening on port 1
REPLY EPSV 229 Entering Passive Mode (|||1|)
</servercmd>
<data>
http
</server>
<name>
-URL with 1000+ letter user name + password
+URL with 1000+ letter username + password
</name>
<command>
"%repeat[1000 x A]%:%repeat[1002 x B]%@%HOSTIP:%HTTPPORT/%TESTNUMBER"
<servercmd>
writedelay: 2000
</servercmd>
-# ~1200 bytes (so that they don't fit in two 512 byte chunks)
+# ~1200 bytes (so that they do not fit in two 512 byte chunks)
<data nocheck="yes">
012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
</data>
http
</server>
<name>
-URL with '#' at end of host name instead of '/'
+URL with '#' at end of hostname instead of '/'
</name>
<command>
--proxy http://%HOSTIP:%HTTPPORT http://test.remote.haxx.se.%TESTNUMBER:%HTTPPORT#@127.0.0.1/tricked.html no-scheme-url.com.%TESTNUMBER:%HTTPPORT#@127.127.127.127/again.html
http
</features>
<name>
-HTTP URL with space in host name
+HTTP URL with space in hostname
</name>
<command>
-g "http://127.0.0.1 www.example.com/we/want/%TESTNUMBER"
http
</server>
<name>
-HTTP with --resolve and same host name using different cases
+HTTP with --resolve and same hostname using different cases
</name>
<command>
--resolve MiXeDcAsE.cOm:%HTTPPORT:%HOSTIP http://MiXeDcAsE.cOm:%HTTPPORT/%TESTNUMBER http://mixedcase.com:%HTTPPORT/%TESTNUMBER0001
http
</server>
-# make sure there's no Expect: 100-continue when there's no file to send!
+# make sure there is no Expect: 100-continue when there is no file to send!
<name>
HTTP with zero size file PUT
</name>
http
</server>
<name>
-Reusing HTTP proxy connection for two different host names
+Reusing HTTP proxy connection for two different hostnames
</name>
<command>
--proxy http://%HOSTIP:%HTTPPORT http://test.remote.haxx.se.%TESTNUMBER:8990/ http://different.remote.haxx.se.%TESTNUMBER:8990
#
# Verify data after the test has been "shot"
<verify>
-# Couldn't resolve proxy name
+# Could not resolve proxy name
<errorcode>
5
</errorcode>
LC_ALL=C.UTF-8
</setenv>
<name>
-Redirect following to UTF-8 IDN host name
+Redirect following to UTF-8 IDN hostname
</name>
<command>
#
# Verify data after the test has been "shot"
<verify>
-# TFTP file name too long
+# TFTP filename too long
<errorcode>
71
</errorcode>
Content-Type: text/html
Funny-head: yesyes
-These data aren't actually sent to the client
+These data are not actually sent to the client
</data>
</reply>
socks5unix
</server>
<name>
-HTTP GET with host name using SOCKS5h via Unix sockets
+HTTP GET with hostname using SOCKS5h via Unix sockets
</name>
<command>
http://this.is.a.host.name:%HTTPPORT/%TESTNUMBER --proxy socks5h://localhost%SOCKSUNIXPATH
socks5unix
</server>
<name>
-HTTPS GET with host name using SOCKS5h via Unix sockets
+HTTPS GET with hostname using SOCKS5h via Unix sockets
</name>
<command>
https://this.is.a.host.name:%HTTPSPORT/%TESTNUMBER --insecure --proxy socks5h://localhost%SOCKSUNIXPATH
#
# Verify data after the test has been "shot"
<verify>
-# Couldn't resolve host name
+# Could not resolve hostname
<errorcode>
6
</errorcode>
#
# Verify data after the test has been "shot"
<verify>
-# Couldn't resolve host name
+# Could not resolve hostname
<errorcode>
6
</errorcode>
file
</server>
<name>
-file:// don't overwrite self with --skip-existing
+file:// do not overwrite self with --skip-existing
</name>
<command option="no-output">
file://localhost%FILE_PWD/%LOGDIR/test%TESTNUMBER.txt -o %LOGDIR/test%TESTNUMBER.txt --skip-existing
Server: swsclose
Content-Type: text/html
-This contains a response code >= 400, so curl shouldn't display this. Even
-though it's a response code that triggers authentication, we're not using
+This contains a response code >= 400, so curl should not display this. Even
+though it is a response code that triggers authentication, we are not using
authentication so we should still fail.
</data>
</reply>
return failure immediately from progress callback
</name>
-# this server/host won't be used for real
+# this server/host will not be used for real
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER
</command>
</info>
<reply>
-# Close the connection after the first request but don't tell the client to do
+# Close the connection after the first request but do not tell the client to do
# so! When starting the second request it'll detect a dead connection and must
# not clean the DNS entries added manually.
<data1>
Server: swsclose
Content-Type: text/html
-This contains a response code >= 400, so curl shouldn't display this. Even
-though it's a response code that triggers authentication, we're not using
+This contains a response code >= 400, so curl should not display this. Even
+though it is a response code that triggers authentication, we are not using
authentication so we should still fail.
</data>
</reply>
lib%TESTNUMBER
</tool>
-# this MUST use a host name that doesn't resolve
+# this MUST use a hostname that does not resolve
<command>
imap://non-existing-host.haxx.se:%IMAPPORT/%TESTNUMBER
</command>
verify api is protected against calls from callbacks
</name>
-# this server/host won't be used for real
+# this server/host will not be used for real
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER
</command>
</tool>
<name>
-Remove easy handle in pending connections doesn't leave dangling entry
+Remove easy handle in pending connections does not leave dangling entry
</name>
<command>
hostname.invalid
https
</killserver>
<name>
-Cookies set over HTTP can't override secure ones
+Cookies set over HTTP cannot override secure ones
</name>
<command>
--insecure https://%HOSTIP:%HTTPSPORT/%TESTNUMBER0001 -L -c %LOGDIR/jar%TESTNUMBER.txt -H "Host: www.example.com" http://%HOSTIP:%HTTPPORT/%TESTNUMBER0002 -L -c %LOGDIR/jar%TESTNUMBER.txt -H "Host: www.example.com"
lib1553
</tool>
-# it is important this uses a host name that resolves successfully
+# it is important this uses a hostname that resolves successfully
<command>
imap://localhost:%IMAPPORT/%TESTNUMBER
</command>
lib%TESTNUMBER
</tool>
<name>
-HTTP request, remove handle while resolving, don't block
+HTTP request, remove handle while resolving, do not block
</name>
<command>
# Verify data after the test has been "shot"
<verify>
-# This doesn't send QUIT because of known bug:
+# This does not send QUIT because of known bug:
# "7.8 Premature transfer end but healthy control channel"
<protocol crlf="yes">
USER anonymous
Server: swsclose
Content-Type: text/html
-Even though it's the response code that triggers authentication, we're
-using NTLM and the server isn't, so we should fail. We know the server
-isn't because there's no Proxy-Authorization: NTLM header
+Even though it is the response code that triggers authentication, we are
+using NTLM and the server is not, so we should fail. We know the server
+is not because there is no Proxy-Authorization: NTLM header
</data1001>
</reply>
<verify>
# The second CONNECT will be made to the dynamic port number the FTP server
-# opens for us, so we can't compare with a known pre-existing number!
+# opens for us, so we cannot compare with a known pre-existing number!
<strippart>
s/((https.proxy):(\d+))/$2:12345/
</strippart>
<verify>
# The second and third CONNECT will be made to the dynamic port number the FTP
-# server opens for us, so we can't compare with known pre-existing numbers!
+# server opens for us, so we cannot compare with known pre-existing numbers!
<strippart>
s/((https.proxy):(\d+))/$2:12345/
LC_ALL=C.UTF-8
</setenv>
<name>
-HTTP over proxy with IDN host name
+HTTP over proxy with IDN hostname
</name>
<command>
http://www.%hex[%c3%a5%c3%a4%c3%b6]hex%.se/page/%TESTNUMBER -x %HOSTIP:%HTTPPORT http://www.gro%hex[%c3%9f]hex%e.de/page/%TESTNUMBER
http
</server>
<name>
-HTTP redirect with bad host name separation and slash in parameters
+HTTP redirect with bad hostname separation and slash in parameters
</name>
<command>
http://%HOSTIP:%HTTPPORT?oh=what-weird=test/%TESTNUMBER -L
http
</server>
<name>
-HTTP GET with resume and redirect (to a page that doesn't resume)
+HTTP GET with resume and redirect (to a page that does not resume)
</name>
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER -C 50 -L
# Verify data after the test has been "shot"
<verify>
-# 7 CURLE_COULDNT_CONNECT (expected since there's nothing listening there)
+# 7 CURLE_COULDNT_CONNECT (expected since there is nothing listening there)
# 42 CURLE_ABORTED_BY_CALLBACK
<errorcode>
42
# Client-side
<client>
-# require HTTP too as otherwise CURLOPT_POST doesn't exist
+# require HTTP too as otherwise CURLOPT_POST does not exist
<features>
mqtt
http
http
</features>
<name>
-attempt connect to non-existing host name
+attempt connect to non-existing hostname
</name>
<command>
--ipv4 non-existing-host.haxx.se.
<reply>
<!-- Alternate the order that Basic and Digest headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!-- First request has Basic auth, wrong password -->
<data100 crlf="headers">
<reply>
<!-- Alternate the order that Basic and NTLM headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!-- First request has Basic auth, wrong password -->
<data100 crlf="headers">
<reply>
<!-- Alternate the order that Basic and Digest headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!-- First request has Digest auth, wrong password -->
<data100 crlf="headers">
Explanation for the duplicate 400 requests:
- libcurl doesn't detect that a given Digest password is wrong already on the
+ libcurl does not detect that a given Digest password is wrong already on the
first 401 response (as the data400 gives). libcurl will instead consider the
new response just as a duplicate and it sends another and detects the auth
problem on the second 401 response!
<reply>
<!-- Alternate the order that Digest and NTLM headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!-- First request has Digest auth, wrong password -->
<data100 crlf="headers">
<reply>
<!-- Alternate the order that Basic and NTLM headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!-- First request has NTLM auth, wrong password -->
<data100 crlf="headers">
<reply>
<!-- Alternate the order that Digest and NTLM headers appear in responses to
-ensure that the order doesn't matter. -->
+ensure that the order does not matter. -->
<!--
Explanation for the duplicate 400 requests:
- libcurl doesn't detect that a given Digest password is wrong already on the
+ libcurl does not detect that a given Digest password is wrong already on the
first 401 response (as the data400 gives). libcurl will instead consider the
new response just as a duplicate and it sends another and detects the auth
problem on the second 401 response!
LC_ALL=C.UTF-8
</setenv>
<name>
-Connection reuse with IDN host name
+Connection reuse with IDN hostname
</name>
<command>
LC_ALL=C.UTF-8
</setenv>
<name>
-Connection reuse with IDN host name over HTTP proxy
+Connection reuse with IDN hostname over HTTP proxy
</name>
<command>
override-dns
</features>
<name>
-HTTP GET with host name
+HTTP GET with hostname
</name>
<setenv>
CURL_DNS_SERVER=127.0.0.1:%DNSPORT
override-dns
</features>
<name>
-HTTP GET with bad host name
+HTTP GET with bad hostname
</name>
<setenv>
CURL_DNS_SERVER=127.0.0.1:%DNSPORT
override-dns
</features>
<name>
-Get three URLs with bad host name - cache
+Get three URLs with bad hostname - cache
</name>
<setenv>
CURL_DNS_SERVER=127.0.0.1:%DNSPORT
mqtt
</server>
<name>
-MQTT with very long user name
+MQTT with 64 KiB long username
</name>
<file name="%LOGDIR/input%TESTNUMBER">
user = %repeat[65536 x a]%:fakepasswd
</client>
#
-# PONG with no data and the 32 bit mask
+# PONG with no data and the 32-bit mask
#
<verify>
<protocol crlf="yes" nonewline="yes" nocheck="yes">
</client>
#
-# PONG with no data and the 32 bit mask
+# PONG with no data and the 32-bit mask
#
<verify>
<protocol crlf="headers" nocheck="yes">
ftp://%HOSTIP:%FTPPORT/%TESTNUMBER -T %LOGDIR/upload%TESTNUMBER -C -
</command>
<file name="%LOGDIR/upload%TESTNUMBER">
-this is the *****crap******** that we're gonna upload
+this is the *****crap******** that we are gonna upload
worx?
</file>
QUIT
</protocol>
<upload>
-this is the *****crap******** that we're gonna upload
+this is the *****crap******** that we are gonna upload
worx?
</upload>
HTTP/1.0 404 BAD BOY
Content-Type: text/html
-This silly page doesn't reaaaaaly exist so you should not get it.
+This silly page does not reaaaaaly exist so you should not get it.
</data>
</reply>
</info>
# Server-side
<reply>
-# The stupid test server doesn't response anything at all until the full
+# The stupid test server does not response anything at all until the full
# request has been sent, and then of course the full POST has already been
# sent!
<data>
</info>
# Server-side
<reply>
-# The stupid test server doesn't response anything at all until the full
+# The stupid test server does not response anything at all until the full
# request has been sent, and then of course the full POST has already been
# sent!
<data>
<name>
GET a directory using file://
</name>
-<!-- doesn't work on win32, see #6379 -->
+<!-- does not work on win32, see #6379 -->
<features>
!win32
</features>
Content-Type: text/html
Funny-head: yesyes
-These data aren't actually sent to the client
+These data are not actually sent to the client
</data>
</reply>
<name>
GET a directory using file://
</name>
-<!-- doesn't work on win32, see #6379 -->
+<!-- does not work on win32, see #6379 -->
<features>
!win32
</features>
http
</server>
-# we're actually more interested in any compression support but this is the
+# we are actually more interested in any compression support but this is the
# best we can do right now
<features>
libz
http
</server>
<name>
-HTTP with cookie using host name 'moo'
+HTTP with cookie using hostname 'moo'
</name>
<command>
-x http://%HOSTIP:%HTTPPORT http://moo/we/want/%TESTNUMBER -b none http://moo/we/want/%TESTNUMBER0002
ftp
</server>
<name>
-FTP range download when SIZE doesn't work
+FTP range download when SIZE does not work
</name>
<command>
ftp://%HOSTIP:%FTPPORT/%TESTNUMBER --range 3-6
HTTP/1.0 404 BAD BOY swsclose
Content-Type: text/html
-This silly page doesn't reaaaaaly exist so you should not get it.
+This silly page does not reaaaaaly exist so you should not get it.
</data>
</reply>
</info>
# Server-side
<reply>
-# 417 means the server didn't like the Expect header
+# 417 means the server did not like the Expect header
<data>
HTTP/1.1 417 BAD swsbounce
Date: Tue, 09 Nov 2010 14:49:00 GMT
HTTP/1.0 404 BAD BOY swsclose
Content-Type: text/html
-This silly page doesn't reaaaaaly exist so you should not get it.
+This silly page does not reaaaaaly exist so you should not get it.
</data>
</reply>
http
</server>
<name>
-Empty user name provided in URL
+Empty username provided in URL
</name>
<command>
http://:example@%HOSTIP:%HTTPPORT/%TESTNUMBER
ftp
</server>
<name>
-pick netrc password based on user name in URL
+pick netrc password based on username in URL
</name>
<command>
<command>
-4 http://curlmachine.localhost:%HTTPPORT/%TESTNUMBER
</command>
-# Ensure that we're running on localhost
+# Ensure that we are running on localhost
</client>
#
http
</server>
<name>
-65536 bytes long host name in URL
+65536 bytes long hostname in URL
</name>
<file name="%LOGDIR/input%TESTNUM">
url = http://%repeat[65536 x a]%/399
https
</server>
<name>
-HTTPS GET with very long request header
+HTTPS GET with 49 KB long request header
</name>
# 14 characters repeated 3500 times makes 49000 bytes
<file name="%LOGDIR/file%TESTNUMBER">
http
</server>
<name>
-alt-svc using host name with trailing dot in URL
+alt-svc using hostname with trailing dot in URL
</name>
<setenv>
# make Debug-curl accept Alt-Svc over plain HTTP
http
</server>
<name>
-alt-svc using host name with trailing dot on host from file
+alt-svc using hostname with trailing dot on host from file
</name>
<setenv>
# make Debug-curl accept Alt-Svc over plain HTTP
</file>
<name>
-HSTS with trailing-dot host name in URL but none in hsts file
+HSTS with trailing-dot hostname in URL but none in hsts file
</name>
<command>
-x http://%HOSTIP:%HTTPPORT http://this.hsts.example./%TESTNUMBER --hsts %LOGDIR/input%TESTNUMBER -w '%{url_effective}\n'
</file>
<name>
-HSTS with no t-dot host name in URL but t-dot in file
+HSTS with no t-dot hostname in URL but t-dot in file
</name>
<command>
-x http://%HOSTIP:%HTTPPORT http://this.hsts.example/%TESTNUMBER --hsts %LOGDIR/input%TESTNUMBER -w '%{url_effective}\n'
VALUE2=curl
</setenv>
<name>
-variable expand the file name with --expand-output
+variable expand the filename with --expand-output
</name>
<file name="%LOGDIR/cmd">
--variable %FUNVALUE
<server>
http
</server>
-# don't run this with the threaded-resolver or c-ares since the events might
+# do not run this with the threaded-resolver or c-ares since the events might
# trigger in a different order!
<features>
!threaded-resolver
http
</server>
<name>
-multi interface get with non-existing host name
+multi interface get with non-existing hostname
</name>
<tool>
lib%TESTNUMBER
# This test case previously tested an overflow case ("2094 Nov 6 =>
# 2147483647") for 32-bit time_t, but since some systems have 64-bit time_t and
-# handles this (returning 3939840000), and some 64-bit time_t systems don't
-# handle this and return -1 for this, it turned very tricky to write a fine
-# test case and thus it is now removed until we have a way to write test cases
+# handles this (returning 3939840000), and some 64-bit time_t systems do not
+# handle this and return -1 for this, it turned tricky to write a fine test
+# case and thus it is now removed until we have a way to write test cases
# for this kind of things.
</verify>
</command>
<file name="%LOGDIR/upload%TESTNUMBER">
Moooooooooooo
-don't upload this
+do not upload this
</file>
</client>
# Verify data after the test has been "shot"
#
-# There's no MTDM in the protocol here since this code doesn't ask for the
+# There is no MTDM in the protocol here since this code does not ask for the
# time/date of the file
<verify>
<protocol crlf="yes">
# Verify data after the test has been "shot"
#
-# There's no MTDM in the protocol here since this code doesn't ask for the
+# There is no MTDM in the protocol here since this code does not ask for the
# time/date of the file
<verify>
<stdout>
Content-Type: multipart/form-data; boundary=----------------------------%CR
%CR
------------------------------%CR
-Content-Disposition: form-data; name="sendfile alternative"; filename="file name 2"%CR
+Content-Disposition: form-data; name="sendfile alternative"; filename="filename 2 "%CR
%CR
this is what we post to the silly web server
%CR
FTP a type=A URL and CURLOPT_PORT set
</name>
-# note that we need quotes around the URL below to make sure the shell doesn't
+# note that we need quotes around the URL below to make sure the shell does not
# treat the semicolon as a separator!
<command>
'ftp://%HOSTIP:23456/%TESTNUMBER;type=A' %FTPPORT
# Verify data after the test has been "shot"
#
-# There's no MTDM in the protocol here since this code doesn't ask for the
+# There is no MTDM in the protocol here since this code does not ask for the
# time/date of the file
<verify>
<protocol crlf="yes">
<setenv>
ftp_proxy=http://%HOSTIP:%HTTPPORT/
</setenv>
-# note that we need quotes around the URL below to make sure the shell doesn't
+# note that we need quotes around the URL below to make sure the shell does not
# treat the semicolon as a separator!
<command>
"ftp://%HOSTIP:23456/%TESTNUMBER;type=A" %FTPPORT
</tool>
<name>
-small chunked HTTP POSTs with digest auth. and progress callback
+small chunked HTTP POSTs with digest auth and progress callback
</name>
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER %LOGDIR/ip%TESTNUMBER
</name>
# The command here uses 'localhost' just to make sure that curl_multi_perform
-# won't reach too far in the first invoke. When using c-ares at least, the
-# name resolve will cause it to return rather quickly and thus we could trigger
-# the problem we're looking to verify.
+# does not reach too far in the first invoke. When using c-ares at least, the
+# name resolve causes it to return rather quickly and thus we could trigger
+# the problem we are looking to verify.
<command>
sftp://localhost:%SSHPORT%SFTP_PWD/%LOGDIR/upload%TESTNUMBER.txt %USER: %LOGDIR/server/curl_client_key.pub %LOGDIR/server/curl_client_key
</command>
<testcase>
#
# This test is exactly like 525 but the server rejects the EPRT command.
-# Written up to make sure that there's nothing in the multi interface
+# Written up to make sure that there is nothing in the multi interface
# active connection case that differs between PORT and EPRT use
#
<info>
lib525
</tool>
<name>
-FTP PORT upload using multi interface, EPRT doesn't work
+FTP PORT upload using multi interface, EPRT does not work
</name>
<command>
ftp://%HOSTIP:%FTPPORT/path/%TESTNUMBER %LOGDIR/upload%TESTNUMBER
Content-Type: multipart/form-data; boundary=----------------------------%CR
%CR
------------------------------%CR
-Content-Disposition: form-data; name="sendfile alternative"; filename="file name 2"%CR
+Content-Disposition: form-data; name="sendfile alternative"; filename="filename 2 "%CR
%CR
dummy
%CR
%CR
8a%CR
------------------------------%CR
-Content-Disposition: form-data; name="sendfile alternative"; filename="file name 2"%CR
+Content-Disposition: form-data; name="sendfile alternative"; filename="filename 2 "%CR
%CR
d%CR
1%CR
<command>
https://localhost:%HTTPSPORT/%TESTNUMBER %CERTDIR/certs/test-ca.crt
</command>
-# Ensure that we're running on localhost because we're checking the host name
+# Ensure that we are running on localhost because we are checking the hostname
<precheck>
%LIBTESTS lib%TESTNUMBER check
</precheck>
proxy
</features>
<name>
-SOCKS5 proxy with too long user name
+SOCKS5 proxy with too long username
</name>
# it should never connect to the target server
SOCKS5 proxy auth
</name>
-# target a port that won't work without the SOCKS magic
+# target a port that does not work without the SOCKS magic
<command>
http://%HOSTIP:1/%TESTNUMBER -x socks5://uz3r:p4ssworm@%HOSTIP:%SOCKSPORT
</command>
socks5
</server>
<name>
-HTTP GET with host name using SOCKS5h
+HTTP GET with hostname using SOCKS5h
</name>
<command>
http://this.is.a.host.name:%HTTPPORT/%TESTNUMBER --proxy socks5h://%HOSTIP:%SOCKSPORT
</server>
<setenv>
#
-# Set a home that doesn't have a ".ipfs" folder. %PWD should be good.
+# Set a home that does not have a ".ipfs" folder. %PWD should be good.
# This is to prevent the automatic gateway detection from finding a gateway file in your home folder.
HOME=%PWD
</setenv>
</client>
#
-# Verify with no gateway url and no auto detection
+# Verify with no gateway URL and no auto detection
<verify>
<errorcode>
37
socks4
</server>
<name>
-SOCKS4 with very long proxy user name
+SOCKS4 with long proxy username
</name>
<command>
http://fake --limit-rate 1 -x socks4a://%repeat[1015 x a]%@%HOSTIP:%SOCKSPORT
http
</server>
<name>
-IPNS path and query args for gateway and IPFS url (malformed gateway url)
+IPNS path and query args for gateway and IPFS URL (malformed gateway URL)
</name>
<command>
--ipfs-gateway "http://%HOSTIP:%HTTPPORT/some/path?biz=baz" "ipns://fancy.tld/a/b?foo=bar&aaa=bbb"
http
</server>
<name>
-SOCKS5-hostname with max length credentials and max host name length
+SOCKS5-hostname with max length credentials and max hostname length
</name>
-# target a port that won't work without the SOCKS magic
+# target a port that does not work without the SOCKS magic
<command>
http://%repeat[254 x c]%:%HTTPPORT -x socks5h://%repeat[255 x a]%:%repeat[255 x b]%@%HOSTIP:%SOCKSPORT
</command>
http
</server>
<name>
---retry and -f on a HTTP 404 response
+--retry and -f on an HTTP 404 response
</name>
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER -f --retry 1
http
</server>
<name>
-HTTP with NTLM with too long user name
+HTTP with NTLM with too long username
</name>
<command>
http://%HOSTIP:%HTTPPORT/%TESTNUMBER -u testuser%repeat[1100 x A]%:testpass --ntlm
imap
</server>
<name>
-IMAP doesn't perform SELECT if reusing the same mailbox
+IMAP does not perform SELECT if reusing the same mailbox
</name>
<command>
'imap://%HOSTIP:%IMAPPORT/%TESTNUMBER/;MAILINDEX=123/;SECTION=1' 'imap://%HOSTIP:%IMAPPORT/%TESTNUMBER/;MAILINDEX=456/;SECTION=2.3' -u user:secret
imap
</server>
<name>
-IMAP custom request doesn't check continuation data
+IMAP custom request does not check continuation data
</name>
<command>
imap://%HOSTIP:%IMAPPORT/%TESTNUMBER/ -u user:secret -X 'FETCH 123 BODY[1]'
<command>
'imap://%HOSTIP:%IMAPPORT/%TESTNUMBER/;MAILINDEX=1' -u user --oauth2-bearer mF_9.B5f-4.1JqM
</command>
-# The protocol section doesn't support ways of specifying the raw data in the
+# The protocol section does not support ways of specifying the raw data in the
# base64 encoded message so we must assert this
</client>
#
# Verify data after the test has been "shot". Note that the command line
-# will write both responses into the same file name so only the second
-# survives
+# writes both responses into the same filename so only the second survives
#
<verify>
<file name="%LOGDIR/dumpit%TESTNUMBER-#2.dump" crlf="headers">
###########################################################################
# This script is intended for developers to test some internals of the
-# runtests.pl harness. Don't try to use this unless you know what you're
+# runtests.pl harness. Do not try to use this unless you know what you are
# doing!
# An example command-line that starts a test http server for test 11 and waits
# for the user before stopping it:
# ./devtest.pl --verbose serverfortest https echo "Started https" protoport https preprocess 11 pause echo Stopping stopservers echo Done
-# curl can connect to the server while it's running like this:
+# curl can connect to the server while it is running like this:
# curl -vkL https://localhost:<protoport>/11
use strict;
# Generate a "proto-ipv6" version of each protocol to match the
# IPv6 <server> name and a "proto-unix" to match the variant which
- # uses Unix domain sockets. This works even if support isn't
+ # uses Unix domain sockets. This works even if support is not
# compiled in because the <features> test will fail.
push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
parser.add_argument("--verbose", action="store", type=int, default=0,
help="verbose output")
parser.add_argument("--pidfile", action="store",
- help="file name for the PID")
+ help="filename for the PID")
parser.add_argument("--logfile", action="store",
- help="file name for the log")
+ help="filename for the log")
parser.add_argument("--srcdir", action="store", help="test directory")
parser.add_argument("--id", action="store", help="server ID")
parser.add_argument("--ipv4", action="store_true", default=0,
#
# Run some tests against servers we know to support ECH (CF, defo.ie, etc.).
-# as well as some we know don't do ECH but have an HTTPS RR, and finally some
+# as well as some we know do not do ECH but have an HTTPS RR, and finally some
# for which neither is the case.
# TODO: Translate this into something that approximates a valid curl test:-)
# set -x
-# Exit with an error if there's an active ech stanza in ~/.curlrc
-# as that'd likely skew some results (e.g. turning a fail into a
-# success or vice versa)
+# Exit with an error if there is an active ech stanza in ~/.curlrc
+# as that would likely skew some results (e.g. turning a fail into
+# a success or vice versa)
: "${CURL_CFG_FILE=$HOME/.curlrc}"
active_ech=$(grep ech "$CURL_CFG_FILE" | grep -v "#.*ech")
if [[ "$active_ech" != "" ]]; then
# Variables that can be over-ridden from environment
#
-# Top of curl test tree, assume we're there
+# Top of curl test tree, assume we are there
: "${CTOP:=.}"
# Place to put test log output
mkdir -p "$LTOP"
fi
if [ ! -d "$LTOP" ]; then
- echo "Can't see $LTOP for logs - exiting"
+ echo "Cannot see $LTOP for logs - exiting"
exit 1
fi
logfile=$LTOP/${BINNAME}_$NOW.log
echo "Running $0 at $NOW" >> "$logfile"
echo "Running $0 at $NOW"
-# check we have the binaries needed and which TLS library we'll be using
+# check we have the binaries needed and which TLS library we will be using
if [ -f "$OSSL"/libssl.so ]; then
have_ossl="yes"
fi
if ((wolf_cnt == 1)); then
using_wolf="yes"
# for some reason curl+wolfSSL dislikes certs that are ok
- # for browsers, so we'll test using "insecure" mode (-k)
- # but that's ok here as we're only interested in ECH testing
+ # for browsers, so we will test using "insecure" mode (-k)
+ # but that is ok here as we are only interested in ECH testing
CURL_PARAMS+=(-k)
fi
# check if we have dig and it knows https or not
if [[ $digout != "1 . "* ]]; then
digout=$($digcmd -t TYPE65 defo.ie)
if [[ $digout == "1 . "* ]]; then
- # we're good
+ # we are good
have_presout="yes"
fi
else
fi
# Check if ports other than 443 are blocked from this
-# vantage point (I run tests in a n/w where that's
+# vantage point (I run tests in a n/w where that is
# sadly true sometimes;-)
# echo "Checking if ports other than 443 are maybe blocked"
not443testurl="https://draft-13.esni.defo.ie:9413/"
echo "ports != 443 blocked: $have_portsblocked"
if [[ "$have_curl" == "no" ]]; then
- echo "Can't proceed without curl - exiting"
+ echo "Cannot proceed without curl - exiting"
exit 32
fi
continue
fi
if [[ "$host" == "cloudflare-ech.com" ]]; then
- echo "Skipping $host as they've blocked PN override"
+ echo "Skipping $host as they have blocked PN override"
continue
fi
path=${ech_targets[$targ]}
echo "" >> "$logfile"
done
-# Check various command line options, if we're good so far
+# Check various command line options, if we are good so far
if [[ "$using_ossl" == "yes" && "$allgood" == "yes" ]]; then
- # use this test URL as it'll tell us if things worked
+ # use this test URL as it will tell us if things worked
turl="https://defo.ie/ech-check.php"
echo "cli_test with $turl"
echo "cli_test with $turl" >> "$logfile"
fi # skip
-# Check combinations of command line options, if we're good so far
-# Most of this only works for OpenSSL, which is ok, as we're checking
+# Check combinations of command line options, if we are good so far
+# Most of this only works for OpenSSL, which is ok, as we are checking
# the argument handling here, not the ECH protocol
if [[ "$using_ossl" == "yes" && "$allgood" == "yes" ]]; then
# ech can be hard, true, grease or false
cli_test "$turl" 1 1 --ech true --ech pn:"$goodpn"
[ "$allgood" != "yes" ] && echo "$LINENO"
- # a target URL that doesn't support ECH
+ # a target URL that does not support ECH
turl="https://tcd.ie"
echo "cli_test with $turl"
echo "cli_test with $turl" >> "$logfile"
- # the params below don't matter much here as we'll fail anyway
+ # the params below do not matter much here as we will fail anyway
echconfiglist=$(get_ech_configlist defo.ie)
goodecl=$echconfiglist
badecl="$goodecl"
echo "NOT all good, log in $logfile"
fi
-# send a mail to root (will be fwd'd) but just once every 24 hours
+# send a mail to root (will be forwarded) but just once every 24 hours
# 'cause we only really need "new" news
itsnews="yes"
age_of_news=0
if [ -f "$LTOP"/bad_runs ]; then
age_of_news=$(fileage "$LTOP"/bad_runs)
- # only consider news "new" if we haven't mailed today
+ # only consider news "new" if we have not mailed today
if ((age_of_news < 24*3600)); then
itsnews="no"
fi
# protocols simultaneously.
#
# It is meant to exercise curl, it is not meant to be a fully working
-# or even very standard compliant server.
+# or even overly standard compliant server.
#
# You may optionally specify port on the command line, otherwise it'll
# default to port 8921.
my $listenaddr = '127.0.0.1'; # default address for listener port
#**********************************************************************
-# global vars used for file names
+# global vars used for filenames
#
-my $PORTFILE="ftpserver.port"; # server port file name
+my $PORTFILE="ftpserver.port"; # server port filename
my $portfile; # server port file path
-my $pidfile; # server pid file name
+my $pidfile; # server pid filename
my $mainsockf_pidfile; # pid file for primary connection sockfilt process
my $mainsockf_logfile; # log file for primary connection sockfilt process
my $datasockf_pidfile; # pid file for secondary connection sockfilt process
#
my $ctrldelay; # set if server should throttle ctrl stream
my $datadelay; # set if server should throttle data stream
-my $retrweirdo; # set if ftp server should use RETRWEIRDO
-my $retrnosize; # set if ftp server should use RETRNOSIZE
-my $retrsize; # set if ftp server should use RETRSIZE
-my $pasvbadip; # set if ftp server should use PASVBADIP
-my $nosave; # set if ftp server should not save uploaded data
-my $nodataconn; # set if ftp srvr doesn't establish or accepts data channel
-my $nodataconn425; # set if ftp srvr doesn't establish data ch and replies 425
-my $nodataconn421; # set if ftp srvr doesn't establish data ch and replies 421
-my $nodataconn150; # set if ftp srvr doesn't establish data ch and replies 150
+my $retrweirdo; # set if FTP server should use RETRWEIRDO
+my $retrnosize; # set if FTP server should use RETRNOSIZE
+my $retrsize; # set if FTP server should use RETRSIZE
+my $pasvbadip; # set if FTP server should use PASVBADIP
+my $nosave; # set if FTP server should not save uploaded data
+my $nodataconn; # set if FTP srvr does not establish or accepts data channel
+my $nodataconn425; # set if FTP srvr does not establish data ch and replies 425
+my $nodataconn421; # set if FTP srvr does not establish data ch and replies 421
+my $nodataconn150; # set if FTP srvr does not establish data ch and replies 150
my $storeresp;
my $postfetch;
my @capabilities; # set if server supports capability commands
my %delayreply; #
#**********************************************************************
-# global variables for to test ftp wildcardmatching or other test that
+# global variables for to test FTP wildcardmatching or other test that
# need flexible LIST responses.. and corresponding files.
# $ftptargetdir is keeping the fake "name" of LIST directory.
#
my $ftptargetdir="";
#**********************************************************************
-# global variables used when running a ftp server to keep state info
+# global variables used when running an FTP server to keep state info
# relative to the secondary or data sockfilt process. Values of these
# variables should only be modified using datasockf_state() sub, given
# that they are closely related and relationship is a bit awkward.
my @data;
# TODO: Get the IP address of the client connection to use in the
- # EHLO response when the client doesn't specify one but for now use
+ # EHLO response when the client does not specify one but for now use
# 127.0.0.1
if(!$client) {
$client = "[127.0.0.1]";
my ($client) = @_;
# TODO: Get the IP address of the client connection to use in the HELO
- # response when the client doesn't specify one but for now use 127.0.0.1
+ # response when the client does not specify one but for now use 127.0.0.1
if(!$client) {
$client = "[127.0.0.1]";
}
}
}
- # this server doesn't "validate" MAIL FROM addresses
+ # this server does not "validate" MAIL FROM addresses
if(length($from)) {
my @found;
my $valid = 1;
}
elsif($state eq 'PASSIVE_NODATACONN') {
# Data sockfilter bound port without listening,
- # client won't be able to establish data connection.
+ # client will not be able to establish data connection.
$datasockf_state = $state;
$datasockf_mode = 'passive';
$datasockf_runs = 'yes';
@auth_mechs = split(/ /, $1);
}
elsif($_ =~ /NOSAVE/) {
- # don't actually store the file we upload - to be used when
+ # do not actually store the file we upload - to be used when
# uploading insanely huge amounts
$nosave = 1;
logmsg "FTPD: NOSAVE prevents saving of uploaded data\n";
$check = 0;
}
- # only perform this if we're not faking a reply
+ # only perform this if we are not faking a reply
my $func = $commandfunc{uc($FTPCMD)};
if($func) {
&$func($FTPARG, $FTPCMD);
use Memoize;
my @xml; # test data file contents
-my $xmlfile; # test data file name
+my $xmlfile; # test data filename
my $warning=0;
my $trace=0;
# Normalize the part function arguments for proper caching. This includes the
-# file name in the arguments since that is an implied parameter that affects the
+# filename in the arguments since that is an implied parameter that affects the
# return value. Any error messages will only be displayed the first time, but
# those are disabled by default anyway, so should never been seen outside
# development.
def main():
parser = argparse.ArgumentParser(prog='scorecard', description="""
- Run a range of tests to give a scorecard for a HTTP protocol
+ Run a range of tests to give a scorecard for an HTTP protocol
'h3' or 'h2' implementation in curl.
""")
parser.add_argument("-v", "--verbose", action='count', default=1,
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche fails from 16k onwards")
curl = CurlClient(env=env)
- # url is longer than 'url_len'
+ # 'url' is longer than 'url_len'
url = f'https://{env.authority_for(env.domain1, proto)}/data.json?{"x"*(url_junk)}'
r = curl.http_download(urls=[url], alpn_proto=proto)
if url_junk <= 1024:
@pytest.mark.skipif(condition=not Env.curl_is_debug(), reason="needs curl debug")
@pytest.mark.skipif(condition=not Env.curl_is_verbose(), reason="needs curl verbose strings")
def test_10_10_reuse_proxy(self, env: Env, httpd, nghttpx_fwd, tunnel):
- # url twice via https: proxy separated with '--next', will reuse
+ # URL twice via https: proxy separated with '--next', will reuse
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
if env.curl_uses_lib('mbedtls') and \
else:
assert djson['SSL_SESSION_RESUMED'] == exp_resumed, f'{i}: {djson}\n{r.dump_logs()}'
- # use host name with trailing dot, verify handshake
+ # use hostname with trailing dot, verify handshake
@pytest.mark.parametrize("proto", Env.http_protos())
def test_17_03_trailing_dot(self, env: Env, proto, httpd, nghttpx):
curl = CurlClient(env=env)
# the SNI the server received is without trailing dot
assert r.json['SSL_TLS_SNI'] == env.domain1, f'{r.json}'
- # use host name with double trailing dot, verify handshake
+ # use hostname with double trailing dot, verify handshake
@pytest.mark.parametrize("proto", Env.http_protos())
def test_17_04_double_dot(self, env: Env, proto, httpd, nghttpx):
curl = CurlClient(env=env)
reused_session = True
assert reused_session, f'{r}\n{r.dump_logs()}'
- # use host name server has no certificate for
+ # use hostname server has no certificate for
@pytest.mark.parametrize("proto", Env.http_protos())
def test_17_11_wrong_host(self, env: Env, proto, httpd, nghttpx):
curl = CurlClient(env=env)
r = curl.http_get(url=url, alpn_proto=proto)
assert r.exit_code == 60, f'{r}'
- # use host name server has no cert for with --insecure
+ # use hostname server has no cert for with --insecure
@pytest.mark.parametrize("proto", Env.http_protos())
def test_17_12_insecure(self, env: Env, proto, httpd, nghttpx):
curl = CurlClient(env=env)
def _write_config(self):
domain1 = self.env.domain1
creds1 = self.env.get_credentials(domain1)
- assert creds1 # convince pytype this isn't None
+ assert creds1 # convince pytype this is not None
domain2 = self.env.domain2
creds2 = self.env.get_credentials(domain2)
- assert creds2 # convince pytype this isn't None
+ assert creds2 # convince pytype this is not None
self._mkpath(self._docs_dir)
self._mkpath(self._tmp_dir)
with open(os.path.join(self._docs_dir, 'data.json'), 'w') as fd:
raise Exception(f'{env.apxs} failed to query libexecdir: {p}')
self._mods_dir = p.stdout.strip()
if self._mods_dir is None:
- raise Exception('apache modules dir cannot be found')
+ raise Exception('apache modules directory cannot be found')
if not os.path.exists(self._mods_dir):
- raise Exception(f'apache modules dir does not exist: {self._mods_dir}')
+ raise Exception(f'apache modules directory does not exist: {self._mods_dir}')
self._maybe_running = False
self.ports = {}
self._rmf(self._error_log)
domain1 = self.env.domain1
domain1brotli = self.env.domain1brotli
creds1 = self.env.get_credentials(self._domain1_cred_name)
- assert creds1 # convince pytype this isn't None
+ assert creds1 # convince pytype this is not None
self._loaded_domain1_cred_name = self._domain1_cred_name
domain2 = self.env.domain2
creds2 = self.env.get_credentials(domain2)
- assert creds2 # convince pytype this isn't None
+ assert creds2 # convince pytype this is not None
exp_domain = self.env.expired_domain
exp_creds = self.env.get_credentials(exp_domain)
- assert exp_creds # convince pytype this isn't None
+ assert exp_creds # convince pytype this is not None
proxy_domain = self.env.proxy_domain
proxy_creds = self.env.get_credentials(proxy_domain)
- assert proxy_creds # convince pytype this isn't None
+ assert proxy_creds # convince pytype this is not None
self._mkpath(self._conf_dir)
self._mkpath(self._docs_dir)
self._mkpath(self._logs_dir)
AP_DECLARE_MODULE(curltest) =
{
STANDARD20_MODULE_STUFF,
- NULL, /* func to create per dir config */
- NULL, /* func to merge per dir config */
- NULL, /* func to create per server config */
- NULL, /* func to merge per server config */
+ NULL, /* func to create per-directory config */
+ NULL, /* func to merge per-directory config */
+ NULL, /* func to create per-server config */
+ NULL, /* func to merge per-server config */
NULL, /* command handlers */
curltest_hooks,
#ifdef AP_MODULE_FLAG_NONE
}
}
else if(!strcmp("id", arg)) {
- /* just an id for repeated requests with curl's url globbing */
+ /* just an id for repeated requests with curl's URL globbing */
request_id = val;
continue;
}
*s = '\0';
val = s + 1;
if(!strcmp("id", arg)) {
- /* just an id for repeated requests with curl's url globbing */
+ /* just an id for repeated requests with curl's URL globbing */
request_id = val;
continue;
}
*s = '\0';
val = s + 1;
if(!strcmp("id", arg)) {
- /* just an id for repeated requests with curl's url globbing */
+ /* just an id for repeated requests with curl's URL globbing */
request_id = val;
continue;
}
if self._process:
self.stop()
creds = self.env.get_credentials(self._cred_name)
- assert creds # convince pytype this isn't None
+ assert creds # convince pytype this is not None
self._loaded_cred_name = self._cred_name
args = [self._cmd, f'--frontend=*,{self._port};tls']
if self.supports_h3():
if self._process:
self.stop()
creds = self.env.get_credentials(self._cred_name)
- assert creds # convince pytype this isn't None
+ assert creds # convince pytype this is not None
self._loaded_cred_name = self._cred_name
args = [
self._cmd,
]
if self._with_ssl:
creds = self.env.get_credentials(self.domain)
- assert creds # convince pytype this isn't None
+ assert creds # convince pytype this is not None
conf.extend([
'ssl_enable=YES',
'debug_ssl=YES',
curl_mfprintf(stderr, "%s\n", msg);
curl_mfprintf(stderr,
"usage: [options] url\n"
- " download a url with following options:\n"
+ " download a URL with following options:\n"
" -a abort paused transfer\n"
" -m number max parallel downloads\n"
" -e use TLS early data when possible\n"
curl_mfprintf(stderr, "%s\n", msg);
curl_mfprintf(stderr,
"usage: [options] url\n"
- " upload to a url with following options:\n"
+ " upload to a URL with following options:\n"
" -a abort paused transfer\n"
" -e use TLS earlydata\n"
" -m number max parallel uploads\n"
/* if CURL_MEMDEBUG is set, this starts memory tracking message logging */
env = getenv("CURL_MEMDEBUG");
if(env) {
- /* use the value as file name */
+ /* use the value as filename */
curl_dbg_memdebug(env);
}
/* if CURL_MEMLIMIT is set, this enables fail-on-alloc-number-N feature */
#define CURL_DISABLE_DEPRECATION
/* Now include the curl_setup.h file from libcurl's private libdir (the source
- version, but that might include "curl_config.h" from the build dir so we
- need both of them in the include path), so that we get good in-depth
- knowledge about the system we're building this on */
+ version, but that might include "curl_config.h" from the build directory so
+ we need both of them in the include path), so that we get good in-depth
+ knowledge about the system we are building this on */
#include "curl_setup.h"
#include <curl/curl.h>
CURLFORM_COPYCONTENTS, "content", CURLFORM_END);
t1308_fail_unless(rc == 0, "curl_formadd returned error");
- /* after the first curl_formadd when there's a single entry, both pointers
+ /* after the first curl_formadd when there is a single entry, both pointers
should point to the same struct */
t1308_fail_unless(post == last, "post and last weren't the same");
/* add the individual transfer */
curl_multi_add_handle(multi, curl);
- /* set the options (I left out a few, you'll get the point anyway) */
+ /* set the options (I left out a few, you get the point anyway) */
curl_easy_setopt(curl, CURLOPT_URL, URL);
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE_LARGE, testDataSize);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, testData);
CURLU_DEFAULT_SCHEME, 0, CURLUE_BAD_IPV6},
{"http://[ab.be]/x", "",
CURLU_DEFAULT_SCHEME, 0, CURLUE_BAD_IPV6},
- /* URL without host name */
+ /* URL without hostname */
{"http://a:b@/x", "",
CURLU_DEFAULT_SCHEME, 0, CURLUE_NO_HOST},
{"boing:80",
{"mailto:infobot@example.com?body=send%20current-issue", "", 0, 0,
CURLUE_UNSUPPORTED_SCHEME},
{"about:80", "https://about:80/", CURLU_DEFAULT_SCHEME, 0, CURLUE_OK},
- /* percent encoded host names */
+ /* percent encoded hostnames */
{"http://example.com%40127.0.0.1/", "", 0, 0, CURLUE_BAD_HOSTNAME},
{"http://example.com%21127.0.0.1/", "", 0, 0, CURLUE_BAD_HOSTNAME},
{"http://example.com%3f127.0.0.1/", "", 0, 0, CURLUE_BAD_HOSTNAME},
0, /* set */
CURLUE_OK, CURLUE_BAD_HOSTNAME},
{"https://example.com/",
- "host=0xff,", /* '++' there's no automatic URL decode when setting this
+ "host=0xff,", /* '++' there is no automatic URL decode when setting this
part */
"https://0xff/",
0, /* get */
"https://example.com/",
0, CURLU_NON_SUPPORT_SCHEME, CURLUE_OK, CURLUE_BAD_SCHEME},
{"https://example.com/",
- /* Set a 41 bytes scheme. That's too long so the old scheme remains set. */
+ /* Set a 41 bytes scheme. That is too long so the old scheme remains set. */
"scheme=bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbc,",
"https://example.com/",
0, CURLU_NON_SUPPORT_SCHEME, CURLUE_OK, CURLUE_BAD_SCHEME},
"scheme=https,user= @:,host=foobar,",
"https://%20%20%20%40%3A@foobar/",
0, CURLU_URLENCODE, CURLUE_OK, CURLUE_OK},
- /* Setting a host name with spaces is not OK: */
+ /* Setting a hostname with spaces is not OK: */
{NULL,
"scheme=https,host= ,path= ,user= ,password= ,query= ,fragment= ,",
"[nothing]",
if(!result)
tid_valid = true;
else {
- curl_mfprintf(stderr, "%s:%d Couldn't create thread, errno %d\n",
+ curl_mfprintf(stderr, "%s:%d Could not create thread, errno %d\n",
__FILE__, __LINE__, result);
goto test_cleanup;
}
return t1565_test_failure;
}
-#else /* without pthread, this test doesn't work */
+#else /* without pthread, this test does not work */
static CURLcode test_lib1565(const char *URL)
{
(void)URL;
* only tests whichever resolver curl is actually built with.
*/
-/* We're willing to wait a very generous two seconds for the removal. This is
+/* We are willing to wait a generous two seconds for the removal. This is
as low as we can go while still easily supporting SIGALRM timing for the
- non-threaded blocking resolver. It doesn't matter that much because when
+ non-threaded blocking resolver. It does not matter that much because when
the test passes, we never wait this long. We set it much higher via
the default TEST_HANG_TIMEOUT to avoid issues when running on overloaded
CI machines. */
blocks. */
timeout = TEST_HANG_TIMEOUT * 2;
else {
- /* If we can't set the DNS server, presume that we are configured to use a
- resolver that can't be cancelled (i.e. the threaded resolver or the
+ /* If we cannot set the DNS server, presume that we are configured to use
+ a resolver that cannot be cancelled (i.e. the threaded resolver or the
non-threaded blocking resolver). So, we just test that the
curl_multi_remove_handle() call does finish well within our test
timeout.
- But, it is very unlikely that the resolver request will take any time at
- all because we haven't been able to configure the resolver to use an
+ But, it is unlikely that the resolver request will take any time at
+ all because we have not been able to configure the resolver to use an
non-responsive DNS server. At least we exercise the flow.
*/
curl_mfprintf(stderr,
}
/* Setting a timeout on the request should ensure that even if we have to
- wait for the resolver during curl_multi_remove_handle(), it won't take
+ wait for the resolver during curl_multi_remove_handle(), it will not take
longer than this, because the resolver request inherits its timeout from
this. */
easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeout);
curl_mfprintf(stderr, "curl_multi_remove_handle() succeeded\n");
/* Fail the test if it took too long to remove. This happens after the fact,
- and says "it seems that it would have run forever", which isn't true, but
- it's close enough, and simple to do. */
+ and says "it seems that it would have run forever", which is not true, but
+ it is close enough, and simple to do. */
abort_on_test_timeout();
test_cleanup:
easy_setopt(curl, CURLOPT_URL, URL);
easy_setopt(curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
- /* Some TIMEVALUE; it doesn't matter. */
+ /* Some TIMEVALUE; it does not matter. */
easy_setopt(curl, CURLOPT_TIMEVALUE, 1566210680L);
header = curl_slist_append(NULL, "If-Modified-Since:");
}
res = CURLE_OK; /* reset for next use */
- /* print the used url */
+ /* print the used URL */
curl_url_get(curlu, CURLUPART_URL, &url_after, 0);
curl_mfprintf(stderr, "curlu now: <%s>\n", url_after);
curl_free(url_after);
"curl_easy_perform returned %d: <%s>, <%s>\n",
res, curl_easy_strerror(res), error_buffer);
- /* print url */
+ /* print URL */
curl_url_get(curlu, CURLUPART_URL, &url_after, 0);
curl_mfprintf(stderr, "curlu now: <%s>\n", url_after);
"curl_easy_perform returned %d: <%s>, <%s>\n",
res, curl_easy_strerror(res), error_buffer);
- /* print the used url */
+ /* print the used URL */
if(!curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &url_after))
curl_mprintf("Effective URL: %s\n", url_after);
curl_easy_reset(curl);
- /* using the same file name for the alt-svc cache, this clobbers the
+ /* using the same filename for the alt-svc cache, this clobbers the
content just written from the 'curldupe' handle */
curl_easy_cleanup(curl);
}
curl_easy_option_by_id(o->id);
if(ename->id != o->id) {
- curl_mprintf("name lookup id %d doesn't match %d\n",
+ curl_mprintf("name lookup id %d does not match %d\n",
ename->id, o->id);
}
else if(eid->id != o->id) {
- curl_mprintf("ID lookup %d doesn't match %d\n",
+ curl_mprintf("ID lookup %d does not match %d\n",
ename->id, o->id);
}
}
if(!c) {
- /* We're going to drive the transfer using multi interface here,
+ /* We are going to drive the transfer using multi interface here,
because we want to stop during the middle. */
m = curl_multi_add_handle(multi, curl);
/* ignores any content */
easy_setopt(curl, CURLOPT_WRITEFUNCTION, t1940_write_cb);
- /* if there's a proxy set, use it */
+ /* if there is a proxy set, use it */
if(libtest_arg2 && *libtest_arg2) {
easy_setopt(curl, CURLOPT_PROXY, libtest_arg2);
easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1L);
/* ignores any content */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, t1945_write_cb);
- /* if there's a proxy set, use it */
+ /* if there is a proxy set, use it */
if(libtest_arg2 && *libtest_arg2) {
curl_easy_setopt(curl, CURLOPT_PROXY, libtest_arg2);
curl_easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1L);
int maxfd = -99;
bool found_new_socket = FALSE;
- /* Start a new handle if we aren't at the max */
+ /* Start a new handle if we are not at the max */
if(state == ReadyForNewHandle) {
easy_init(ntlm_curls[num_handles]);
interval.tv_sec = 0;
interval.tv_usec = 5000;
- /* if there's no timeout and we get here on the last handle, we may
+ /* if there is no timeout and we get here on the last handle, we may
already have read the last part of the stream so waiting makes no
sense */
if(!running && num_handles == MAX_EASY_HANDLES) {
break;
}
- /* checking case when we don't have enough space for waitfds */
+ /* checking case when we do not have enough space for waitfds */
mc = curl_multi_waitfds(multi, ufds1, fd_count - 1, &fd_count_chk);
if(mc != CURLM_OUT_OF_MEMORY) {
if(fd_count_chk < fd_count) {
curl_mfprintf(stderr,
"curl_multi_waitfds() should return the amount of fds "
- "needed if enough isn't passed in (%u vs. %u).\n",
+ "needed if enough is not passed in (%u vs. %u).\n",
fd_count_chk, fd_count);
res = TEST_ERR_FAILURE;
break;
if(fd_count_chk < fd_count) {
curl_mfprintf(stderr,
"curl_multi_waitfds() should return the amount of fds "
- "needed if enough isn't passed in (%u vs. %u).\n",
+ "needed if enough is not passed in (%u vs. %u).\n",
fd_count_chk, fd_count);
res = TEST_ERR_FAILURE;
break;
results[i] = CURL_LAST; /* initialize with invalid value */
th = CreateThread(NULL, 0, t3026_run_thread, &results[i], 0, NULL);
if(!th) {
- curl_mfprintf(stderr, "%s:%d Couldn't create thread, "
+ curl_mfprintf(stderr, "%s:%d Could not create thread, "
"GetLastError 0x%08lx\n",
__FILE__, __LINE__, GetLastError());
tid_count = i;
results[i] = CURL_LAST; /* initialize with invalid value */
res = pthread_create(&tids[i], NULL, t3026_run_thread, &results[i]);
if(res) {
- curl_mfprintf(stderr, "%s:%d Couldn't create thread, errno %d\n",
+ curl_mfprintf(stderr, "%s:%d Could not create thread, errno %d\n",
__FILE__, __LINE__, res);
tid_count = i;
test_failure = TEST_ERR_MAJOR_BAD;
return test_failure;
}
-#else /* without pthread or Windows, this test doesn't work */
+#else /* without pthread or Windows, this test does not work */
static CURLcode test_lib3026(const char *URL)
{
curl_version_info_data *ver;
return TEST_ERR_MAJOR_BAD;
}
- /* Set the HTTPS url to retrieve. */
+ /* Set the HTTPS URL to retrieve. */
test_setopt(curl, CURLOPT_URL, URL);
/* Capture certificate information */
/* get the file size of the local file */
hd = fstat(fileno(hd_src), &file_info);
if(hd == -1) {
- /* can't open file, bail out */
+ /* cannot open file, bail out */
curl_mfprintf(stderr, "fstat() failed with error (%d) %s\n",
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
curl_mfprintf(stderr, "Error opening file '%s'\n", libtest_arg2);
test_setopt(curl, CURLOPT_INFILESIZE_LARGE,
(curl_off_t)file_info.st_size);
- /* Now run off and do what you've been told! */
+ /* Now run off and do what you have been told! */
res = curl_easy_perform(curl);
test_cleanup:
code = curl_easy_perform(curl);
if(code) {
int i = 0;
- curl_mfprintf(stderr, "perform url '%s' repeat %d failed, curlcode %d\n",
+ curl_mfprintf(stderr, "perform URL '%s' repeat %d failed, curlcode %d\n",
tdata->url, i, code);
}
curl_mfprintf(stderr, "raising soft limit up to OPEN_MAX\n");
rl.rlim_cur = OPEN_MAX;
if(setrlimit(RLIMIT_NOFILE, &rl) != 0) {
- /* on failure don't abort just issue a warning */
+ /* on failure do not abort just issue a warning */
t518_store_errmsg("setrlimit() failed", errno);
curl_mfprintf(stderr, "%s\n", t518_msgbuff);
t518_msgbuff[0] = '\0';
curl_mfprintf(stderr, "raising soft limit up to hard limit\n");
rl.rlim_cur = rl.rlim_max;
if(setrlimit(RLIMIT_NOFILE, &rl) != 0) {
- /* on failure don't abort just issue a warning */
+ /* on failure do not abort just issue a warning */
t518_store_errmsg("setrlimit() failed", errno);
curl_mfprintf(stderr, "%s\n", t518_msgbuff);
t518_msgbuff[0] = '\0';
t518_num_open.rlim_max = NUM_OPEN;
- /* verify that we won't overflow size_t in malloc() */
+ /* verify that we do not overflow size_t in malloc() */
if((size_t)(t518_num_open.rlim_max) > ((size_t)-1) / sizeof(*t518_testfd)) {
tutil_rlim2str(strbuff1, sizeof(strbuff1), t518_num_open.rlim_max);
/* get the file size of the local file */
hd = fstat(fileno(hd_src), &file_info);
if(hd == -1) {
- /* can't open file, bail out */
+ /* cannot open file, bail out */
curl_mfprintf(stderr, "fstat() failed with error (%d) %s\n",
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
curl_mfprintf(stderr, "Error opening file '%s'\n", libtest_arg2);
CURLMcode result = curl_multi_socket_action(multi, s, evBitmask,
&numhandles);
if(result != CURLM_OK) {
- curl_mfprintf(stderr, "%s Curl error on %s (%i) %s\n",
+ curl_mfprintf(stderr, "%s curl error on %s (%i) %s\n",
t530_tag(), info, result, curl_multi_strerror(result));
}
return result;
if(timeout.tv_sec != (time_t)-1 &&
t530_getMicroSecondTimeout(&timeout) == 0) {
- /* Curl's timer has elapsed. */
+ /* curl's timer has elapsed. */
if(socket_action(multi, CURL_SOCKET_TIMEOUT, 0, "timeout")) {
res = TEST_ERR_BAD_TIMEOUT;
goto test_cleanup;
curl_mfprintf(stderr, "raising soft limit up to OPEN_MAX\n");
rl.rlim_cur = OPEN_MAX;
if(setrlimit(RLIMIT_NOFILE, &rl) != 0) {
- /* on failure don't abort just issue a warning */
+ /* on failure do not abort just issue a warning */
t537_store_errmsg("setrlimit() failed", errno);
curl_mfprintf(stderr, "%s\n", t537_msgbuff);
t537_msgbuff[0] = '\0';
curl_mfprintf(stderr, "raising soft limit up to hard limit\n");
rl.rlim_cur = rl.rlim_max;
if(setrlimit(RLIMIT_NOFILE, &rl) != 0) {
- /* on failure don't abort just issue a warning */
+ /* on failure do not abort just issue a warning */
t537_store_errmsg("setrlimit() failed", errno);
curl_mfprintf(stderr, "%s\n", t537_msgbuff);
t537_msgbuff[0] = '\0';
* test 537 is all about testing libcurl functionality
* when the system has nearly exhausted the number of
* available file descriptors. Test 537 will try to run
- * with a very small number of file descriptors available.
+ * with a small number of file descriptors available.
* This implies that any file descriptor which is open
* when the test runs will have a number in the high range
* of whatever the system supports.
t537_num_open.rlim_max = nitems;
}
- /* verify that we won't overflow size_t in malloc() */
+ /* verify that we do not overflow size_t in malloc() */
if((size_t)(t537_num_open.rlim_max) > ((size_t)-1) / sizeof(*t537_testfd)) {
tutil_rlim2str(strbuff1, sizeof(strbuff1), t537_num_open.rlim_max);
curl_mfprintf(stderr, "shrinking array for %s file descriptors\n",
strbuff);
- /* we don't care if we can't shrink it */
+ /* we do not care if we cannot shrink it */
tmpfd = realloc(t537_testfd,
sizeof(*t537_testfd) * (size_t)(t537_num_open.rlim_max));
* argv1 = URL
* argv2 = proxy
* argv3 = proxyuser:password
- * argv4 = host name to use for the custom Host: header
+ * argv4 = hostname to use for the custom Host: header
*/
#include "first.h"
/* get the file size of the local file */
hd = fstat(fileno(hd_src), &file_info);
if(hd == -1) {
- /* can't open file, bail out */
+ /* cannot open file, bail out */
curl_mfprintf(stderr, "fstat() failed with error (%d) %s\n",
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
curl_mfprintf(stderr, "Error opening file '%s'\n", libtest_arg2);
/* now specify which file to upload */
test_setopt(curl, CURLOPT_READDATA, hd_src);
- /* Now run off and do what you've been told! */
+ /* Now run off and do what you have been told! */
res = curl_easy_perform(curl);
if(res)
goto test_cleanup;
/* specify target */
test_setopt(curl, CURLOPT_URL, URL);
- /* Now run off and do what you've been told! */
+ /* Now run off and do what you have been told! */
res = curl_easy_perform(curl);
test_cleanup:
CURLFORM_COPYNAME, "sendfile alternative",
CURLFORM_STREAM, &pooh,
CURLFORM_CONTENTLEN, (curl_off_t)pooh.sizeleft,
- CURLFORM_FILENAME, "file name 2",
+ CURLFORM_FILENAME, "filename 2 ",
CURLFORM_END);
}
curl_msnprintf(buf, sizeof(buf), "%.*f", 0, 9.2987654);
errors += string_check(buf, "9");
- /* very large precisions easily turn into system specific outputs so we only
+ /* large precisions easily turn into system specific outputs so we only
check the output buffer length here as we know the internal limit */
curl_msnprintf(buf, sizeof(buf), "%.*f", (1 << 30), 9.2987654);
/* specify target */
test_setopt(curl, CURLOPT_URL, URL);
- /* Now run off and do what you've been told! */
+ /* Now run off and do what you have been told! */
res = curl_easy_perform(curl);
test_cleanup:
sdp = curlx_open(libtest_arg2, O_RDONLY);
if(sdp == -1) {
- curl_mfprintf(stderr, "can't open %s\n", libtest_arg2);
+ curl_mfprintf(stderr, "cannot open %s\n", libtest_arg2);
res = TEST_ERR_MAJOR_BAD;
goto test_cleanup;
}
sdpf = curlx_fopen(libtest_arg2, "rb");
if(!sdpf) {
- curl_mfprintf(stderr, "can't fopen %s\n", libtest_arg2);
+ curl_mfprintf(stderr, "cannot fopen %s\n", libtest_arg2);
res = TEST_ERR_MAJOR_BAD;
goto test_cleanup;
}
FILE *idfile = curlx_fopen(libtest_arg2, "wb");
if(!idfile) {
- curl_mfprintf(stderr, "couldn't open the Session ID File\n");
+ curl_mfprintf(stderr, "Could not open the Session ID File\n");
return TEST_ERR_MAJOR_BAD;
}
FILE *protofile = curlx_fopen(libtest_arg2, "wb");
if(!protofile) {
- curl_mfprintf(stderr, "Couldn't open the protocol dump file\n");
+ curl_mfprintf(stderr, "Could not open the protocol dump file\n");
return TEST_ERR_MAJOR_BAD;
}
/* PUT style GET_PARAMETERS */
params = curlx_open(libtest_arg2, O_RDONLY);
if(params == -1) {
- curl_mfprintf(stderr, "can't open %s\n", libtest_arg2);
+ curl_mfprintf(stderr, "cannot open %s\n", libtest_arg2);
res = TEST_ERR_MAJOR_BAD;
goto test_cleanup;
}
paramsf = curlx_fopen(libtest_arg2, "rb");
if(!paramsf) {
- curl_mfprintf(stderr, "can't fopen %s\n", libtest_arg2);
+ curl_mfprintf(stderr, "cannot fopen %s\n", libtest_arg2);
res = TEST_ERR_MAJOR_BAD;
goto test_cleanup;
}
if(moo)
curlx_fclose(moo);
else
- curl_mfprintf(stderr, "Progress: end UL, can't open %s\n", libtest_arg2);
+ curl_mfprintf(stderr, "Progress: end UL, cannot open %s\n", libtest_arg2);
started = FALSE;
}
if(moo)
curlx_fclose(moo);
else
- curl_mfprintf(stderr, "Progress: start UL, can't open %s\n",
+ curl_mfprintf(stderr, "Progress: start UL, cannot open %s\n",
libtest_arg2);
started = TRUE;
}
sockets->max_count = 20;
}
else if(sockets->count >= sockets->max_count) {
- /* this can't happen in normal cases */
+ /* this cannot happen in normal cases */
curl_mfprintf(stderr, "too many file handles error\n");
exit(2);
}
CURLMcode result = curl_multi_socket_action(multi, s, evBitmask,
&numhandles);
if(result != CURLM_OK) {
- curl_mfprintf(stderr, "Curl error on %s (%i) %s\n",
+ curl_mfprintf(stderr, "curl error on %s (%i) %s\n",
info, result, curl_multi_strerror(result));
}
}
/* get the file size of the local file */
hd = fstat(fileno(hd_src), &file_info);
if(hd == -1) {
- /* can't open file, bail out */
+ /* cannot open file, bail out */
curl_mfprintf(stderr, "fstat() failed with error (%d) %s\n",
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
curl_mfprintf(stderr, "Error opening file '%s'\n", libtest_arg2);
if(timeout.tv_sec != (time_t)-1 &&
t582_getMicroSecondTimeout(&timeout) == 0) {
- /* Curl's timer has elapsed. */
+ /* curl's timer has elapsed. */
notifyCurl(multi, CURL_SOCKET_TIMEOUT, 0, "timeout");
}
code = curl_easy_perform(curl);
if(code != CURLE_OK) {
int i = 0;
- curl_mfprintf(stderr, "perform url '%s' repeat %d failed, curlcode %d\n",
+ curl_mfprintf(stderr, "perform URL '%s' repeat %d failed, curlcode %d\n",
tdata->url, i, code);
}
res = curl_mime_data_cb(part, datasize, t643_read_cb,
NULL, NULL, &pooh);
if(!res)
- res = curl_mime_filename(part, "file name 2");
+ res = curl_mime_filename(part, "filename 2 ");
}
if(res)
goto test_cleanup;
}
- /* this doesn't set the PATH part */
+ /* this does not set the PATH part */
if(curl_url_set(urlp, CURLUPART_HOST, "www.example.com", 0) ||
curl_url_set(urlp, CURLUPART_SCHEME, "http", 0) ||
curl_url_set(urlp, CURLUPART_PORT, "80", 0)) {
curl_mprintf("CURLINFO_HTTPAUTH_USED did not say NTLM\n");
}
- /* set a new URL for the second, so that we don't restart NTLM */
+ /* set a new URL for the second, so that we do not restart NTLM */
test_setopt(curl, CURLOPT_URL, libtest_arg2);
} while(!res && ++count < 2);
CURLMcode result = curl_multi_socket_action(multi, s, evBitmask,
&numhandles);
if(result != CURLM_OK) {
- curl_mfprintf(stderr, "%s Curl error on %s (%i) %s\n",
+ curl_mfprintf(stderr, "%s curl error on %s (%i) %s\n",
t758_tag(), info, result, curl_multi_strerror(result));
}
return result;
if(timeout.tv_sec != (time_t)-1 &&
t758_getMicroSecondTimeout(&timeout) == 0) {
- /* Curl's timer has elapsed. */
+ /* curl's timer has elapsed. */
if(t758_saction(multi, CURL_SOCKET_TIMEOUT, 0, "timeout")) {
res = TEST_ERR_BAD_TIMEOUT;
goto test_cleanup;
;
if(!$ARGV[0]) {
- die "missing target file name";
+ die "missing target filename";
}
use File::Temp qw/ :mktemp /;
# Read the output of curl --version
my $curl_protocols="";
-open(CURL, "$ARGV[1]") || die "Can't get curl $what list\n";
+open(CURL, "$ARGV[1]") || die "Cannot get curl $what list\n";
while(<CURL>) {
$curl_protocols = $_ if(/$what:/i);
}
# Read the output of curl-config
my @curl_config;
-open(CURLCONFIG, "sh $ARGV[0] --$what|") || die "Can't get curl-config $what list\n";
+open(CURLCONFIG, "sh $ARGV[0] --$what|") || die "Cannot get curl-config $what list\n";
while(<CURLCONFIG>) {
chomp;
$_ = lc($_) if($what eq "protocols"); # accept uppercase protocols in curl-config
my $what=$ARGV[2];
# Read the output of curl --version
-open(CURL, "$ARGV[1]") || die "Can't open curl --version list in $ARGV[1]\n";
+open(CURL, "$ARGV[1]") || die "Cannot open curl --version list in $ARGV[1]\n";
$_ = <CURL>;
chomp;
/libcurl\/([\.\d]+((-DEV)|(-rc\d)|(-\d+))?)/;
my $curlconfigversion;
# Read the output of curl-config --version/--vernum
-open(CURLCONFIG, "sh $ARGV[0] --$what|") || die "Can't get curl-config --$what list\n";
+open(CURLCONFIG, "sh $ARGV[0] --$what|") || die "Cannot get curl-config --$what list\n";
$_ = <CURLCONFIG>;
chomp;
my $filever=$_;
$curlconfigversion = "illegal value";
}
- # Strip off the -DEV and -rc suffixes from the curl version if they're there
+ # Strip off the -DEV and -rc suffixes from the curl version if they are there
$version =~ s/-\w*$//;
}
close CURLCONFIG;
exit 3;
}
if(!open(CURL, "$ARGV[0] -s --engine list|")) {
- print "Can't get SSL engine list\n";
+ print "Cannot get SSL engine list\n";
exit 2;
}
while(<CURL>) {
use Time::Local;
if($#ARGV < 1) {
- print "Usage: $0 prepare|postprocess dir [logfile]\n";
+ print "Usage: $0 prepare|postprocess directory [logfile]\n";
exit 1;
}
curl_mfprintf(stream, "%c",
((ptr[i + c] >= 0x20) && (ptr[i + c] < 0x80)) ?
ptr[i + c] : '.');
- /* check again for 0D0A, to avoid an extra \n if it's at width */
+ /* check again for 0D0A, to avoid an extra \n if it is at width */
if(nohex && (i + c + 2 < size) && ptr[i + c + 1] == 0x0D &&
ptr[i + c + 2] == 0x0A) {
i += (c + 3 - width);
}
secs = epoch_offset + tv.tv_sec;
/* !checksrc! disable BANNEDFUNC 1 */
- now = localtime(&secs); /* not thread safe but we don't care */
+ now = localtime(&secs); /* not thread safe but we do not care */
curl_msnprintf(timebuf, sizeof(timebuf), "%02d:%02d:%02d.%06ld ",
now->tm_hour, now->tm_min, now->tm_sec, (long)tv.tv_usec);
}
#include "memdebug.h"
-/* build request url */
+/* build request URL */
char *tutil_suburl(const char *base, int i)
{
return curl_maprintf("%s%.4d", base, i);
***************************************************************************/
#include "first.h"
-/* build request url */
+/* build request URL */
char *tutil_suburl(const char *base, int i);
#ifdef HAVE_SYS_RESOURCE_H
$addr = $_;
$size = $sizeataddr{$addr};
if($size > 0) {
- print "At $addr, there's $size bytes.\n";
+ print "At $addr, there is $size bytes.\n";
print " allocated by ".$getmem{$addr}."\n";
}
}
"""
buffer = bytearray()
- # If we keep receiving negotiation sequences, we won't fill the buffer.
+ # If we keep receiving negotiation sequences, we will not fill the buffer.
# Keep looping while we can, and until we have something to give back
# to the caller.
while len(buffer) == 0:
log.debug("Client can do")
self.state = self.DO
elif byte_int == NegTokens.DONT:
- # Client is indicating they can't do an option
- log.debug("Client can't do")
+ # Client is indicating they cannot do an option
+ log.debug("Client cannot do")
self.state = self.DONT
else:
# Received an unexpected byte. Stop negotiations
parser.add_argument("--verbose", action="store", type=int, default=0,
help="verbose output")
parser.add_argument("--pidfile", action="store",
- help="file name for the PID")
+ help="filename for the PID")
parser.add_argument("--logfile", action="store",
- help="file name for the log")
+ help="filename for the log")
parser.add_argument("--srcdir", action="store", help="test directory")
parser.add_argument("--id", action="store", help="server ID")
parser.add_argument("--ipv4", action="store_true", default=0,
# Forward slashes are simpler processed in Perl, do not require extra escaping
# for shell (unlike back slashes) and accepted by Windows native programs, so
# all functions return paths with only forward slashes.
-# All returned paths don't contain any duplicated slashes, only single slashes
+# All returned paths do not contain any duplicated slashes, only single slashes
# are used as directory separators on output.
# On non-Windows platforms functions acts as transparent wrappers for similar
# Perl's functions or return unmodified string (depending on functionality),
#######################################################################
# Quote an argument for passing safely to a Bourne shell
-# This does the same thing as String::ShellQuote but doesn't need a package.
+# This does the same thing as String::ShellQuote but does not need a package.
#
sub shell_quote {
my ($s)=@_;
$SIG{INT} = 'IGNORE';
$SIG{TERM} = 'IGNORE';
eval {
- # some msys2 perl versions don't define SIGUSR1, also missing from Win32 Perl
+ # some MSYS2 Perl versions do not define SIGUSR1, also missing from Win32 Perl
$SIG{USR1} = 'IGNORE';
};
# handle IPC calls
event_loop();
- # Can't rely on logmsg here in case it's buffered
+ # Cannot rely on logmsg here in case it is buffered
print "Runner $thisrunnerid exiting\n" if($verbose);
# To reach this point, either the controller has sent
# Create our pid directory
mkdir("$LOGDIR/$PIDDIR", 0777);
- # Don't create a separate process
+ # Do not create a separate process
$thisrunnerid = "integrated";
}
delete $ENV{'CURL_MEMLIMIT'} if($ENV{'CURL_MEMLIMIT'});
if(-r "core") {
- # there's core file present now!
+ # there is core file present now!
logmsg " core dumped\n";
$dumped_core = 1;
$fail = 2;
}
}
- # verify that it returns a proper error code, doesn't leak memory
- # and doesn't core dump
+ # verify that it returns a proper error code, does not leak memory
+ # and does not core dump
if(($ret & 255) || ($ret >> 8) >= 128) {
logmsg " system() returned $ret\n";
$fail=1;
chomp $dis[0] if($dis[0]);
if($dis[0] eq "test-duphandle") {
# marked to not run with duphandle
- logmsg " $testnum: IGNORED: Can't run test-duphandle\n";
+ logmsg " $testnum: IGNORED: Cannot run test-duphandle\n";
return (-1, 0, 0, "", "", 0);
}
}
if(!$dumped_core) {
if(-r "core") {
- # there's core file present now!
+ # there is core file present now!
$dumped_core = 1;
}
}
logmsg "postcheck $cmd\n" if($verbose);
my $rc = runclient("$cmd");
# Must run the postcheck command in torture mode in order
- # to clean up, but the result can't be relied upon.
+ # to clean up, but the result cannot be relied upon.
if($rc != 0 && !$torture) {
logmsg " $testnum: postcheck FAILED\n";
return -1;
# Get the name of the function from the reference
my $cv = svref_2object($funcref);
my $gv = $cv->GV;
- # Prepend the name to the function arguments so it's marshalled along with them
+ # Prepend the name to the function arguments so it is marshalled along with them
unshift @_, $gv->NAME;
# Marshall the arguments into a flat string
my $margs = freeze \@_;
my $resarrayref = thaw $buf;
# First argument is runner ID
- # TODO: remove this; it's unneeded since it's passed in
+ # TODO: remove this; it is unneeded since it is passed in
unshift @$resarrayref, $runnerid;
return @$resarrayref;
}
###################################################################
# Returns runner ID if a response from an async call is ready or error
# First value is ready, second is error, however an error case shows up
-# as ready in Linux, so you can't trust it.
+# as ready in Linux, so you cannot trust it.
# argument is 0 for nonblocking, undef for blocking, anything else for timeout
# Called by controller
sub runnerar_ready {
my $e_in = $r_in;
if(select(my $r_out=$r_in, undef, my $e_out=$e_in, $blocking) >= 1) {
for my $fd (0..$maxfileno) {
- # Return an error condition first in case it's both
+ # Return an error condition first in case it is both
if(vec($e_out, $fd, 1)) {
return (undef, $idbyfileno{$fd});
}
# to check the remote system's PATH, and the places in the code where
# the curl binary is read directly to determine its type also need to be
# fixed. As long as the -g option is never given, and the -n is always
-# given, this won't be a problem.
+# given, this will not be a problem.
use strict;
use warnings;
}
#######################################################################
-# Stop buffering log messages, but don't touch them
+# Stop buffering log messages, but do not touch them
sub singletest_unbufferlogs {
undef $singletest_bufferedrunner;
}
}
eval {
- # some msys2 perl versions don't define SIGUSR1
+ # some msys2 perl versions do not define SIGUSR1
$SIG{USR1} = \&catch_usr1;
};
$SIG{PIPE} = 'IGNORE'; # these errors are captured in the read/write calls
delete $ENV{uc($proxy)} if($ENV{uc($proxy)});
}
-# make sure we don't get affected by other variables that control our
+# make sure we do not get affected by other variables that control our
# behavior
delete $ENV{'SSL_CERT_DIR'} if($ENV{'SSL_CERT_DIR'});
my $disttests = "";
sub get_disttests {
# If a non-default $TESTDIR is being used there may not be any
- # Makefile.am in which case there's nothing to do.
+ # Makefile.am in which case there is nothing to do.
open(my $dh, "<", "$TESTDIR/Makefile.am") or return;
while(<$dh>) {
chomp $_;
# Get all files
opendir(my $dh, $dir) ||
- return 0; # can't open dir
+ return 0; # cannot open dir
while($file = readdir($dh)) {
- # Don't clear the $PIDDIR or $LOCKDIR since those need to live beyond
+ # Do not clear the $PIDDIR or $LOCKDIR since those need to live beyond
# one test
if(($file !~ /^(\.|\.\.)\z/) &&
"$file" ne $PIDDIR && "$file" ne $LOCKDIR) {
# Generate a "proto-ipv6" version of each protocol to match the
# IPv6 <server> name and a "proto-unix" to match the variant which
- # uses Unix domain sockets. This works even if support isn't
+ # uses Unix domain sockets. This works even if support is not
# compiled in because the <features> test will fail.
push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
displaylogcontent("$curlverout");
logmsg "contents of $curlvererr: \n";
displaylogcontent("$curlvererr");
- die "couldn't get curl's version";
+ die "Could not get curl's version";
}
if(-r "../lib/curl_config.h") {
if($torture) {
if(!$feature{"TrackMemory"}) {
- die "can't run torture tests since curl was built without ".
+ die "cannot run torture tests since curl was built without ".
"TrackMemory feature (--enable-curldebug)";
}
if($feature{"threaded-resolver"} && !$valgrind) {
- die "can't run torture tests since curl was built with the ".
- "threaded resolver, and we aren't running with valgrind";
+ die "cannot run torture tests since curl was built with the ".
+ "threaded resolver, and we are not running with valgrind";
}
}
$AZURE_RUN_ID = azure_create_test_run($ACURL);
logmsg "Azure Run ID: $AZURE_RUN_ID\n" if($verbose);
}
- # Appveyor doesn't require anything here
+ # Appveyor does not require anything here
}
if(azure_check_environment() && $AZURE_RUN_ID) {
$AZURE_RUN_ID = azure_update_test_run($ACURL, $AZURE_RUN_ID);
}
- # Appveyor doesn't require anything here
+ # Appveyor does not require anything here
}
# Verify that this test case should be run
sub singletest_shouldrun {
my $testnum = $_[0];
- my $why; # why the test won't be run
+ my $why; # why the test will not be run
my $errorreturncode = 1; # 1 means normal error, 2 means ignored error
my @what; # what features are needed
if(loadtest("${TESTDIR}/test${testnum}", 1)) {
if($verbose) {
# this is not a test
- logmsg "RUN: $testnum doesn't look like a test case\n";
+ logmsg "RUN: $testnum does not look like a test case\n";
}
$why = "no test";
}
my ($testnum, $why) = @_;
if($why && !$listonly) {
- # there's a problem, count it as "skipped"
+ # there is a problem, count it as "skipped"
$skipped{$why}++;
$teststat[$testnum]=$why; # store reason for this test case
return -1;
}
- # At this point we've committed to run this test
+ # At this point we have committed to run this test
logmsg sprintf("test %04d...", $testnum) if(!$automakestyle);
# name of the test
# Verify the sent request
my @out = loadarray("$logdir/$SERVERIN");
- # check if there's any attributes on the verify/protocol section
+ # check if there is any attributes on the verify/protocol section
my %hash = getpartattr("verify", "protocol");
if($hash{'nonewline'}) {
my @proxyprot = getpart("verify", "proxy");
if(@proxyprot) {
# Verify the sent proxy request
- # check if there's any attributes on the verify/protocol section
+ # check if there is any attributes on the verify/protocol section
my %hash = getpartattr("verify", "proxy");
if($hash{'nonewline'}) {
for my $partsuffix (('', '1', '2', '3', '4')) {
my @outfile=getpart("verify", "file".$partsuffix);
if(@outfile || partexists("verify", "file".$partsuffix) ) {
- # we're supposed to verify a dynamically generated file!
+ # we are supposed to verify a dynamically generated file!
my %hash = getpartattr("verify", "file".$partsuffix);
my $filename=$hash{'name'};
my @dnsd = getpart("verify", "dns");
if(@dnsd) {
- # we're supposed to verify a dynamically generated file!
+ # we are supposed to verify a dynamically generated file!
my %hash = getpartattr("verify", "dns");
my $hostname=$hash{'host'};
###################################################################
# Restore environment variables that were modified in a previous run.
# Test definition may instruct to (un)set environment vars.
- # This is done this early so that leftover variables don't affect
+ # This is done this early so that leftover variables do not affect
# starting servers or CI registration.
# restore_test_env(1);
# use this path to a curl used to verify servers
# Particularly useful when you introduce a crashing bug somewhere in
- # the development version as then it won't be able to run any tests
- # since it can't verify the servers!
+ # the development version as then it will not be able to run any tests
+ # since it cannot verify the servers!
$VCURL=shell_quote($ARGV[1]);
shift @ARGV;
# load additional reasons to skip tests
shift @ARGV;
my $exclude_file = $ARGV[0];
- open(my $fd, "<", $exclude_file) or die "Couldn't open '$exclude_file': $!";
+ open(my $fd, "<", $exclude_file) or die "Could not open '$exclude_file': $!";
while(my $line = <$fd>) {
next if($line =~ /^#/);
chomp $line;
# since valgrind 2.1.x, '--tool' option is mandatory
# use it, if it is supported by the version installed on the system
- # (this happened in 2003, so we could probably don't need to care about
+ # (this happened in 2003, so we could probably do not need to care about
# that old version any longer and just delete this check)
runclient("valgrind --help 2>&1 | grep -- --tool >$dev_null 2>&1");
if(($? >> 8)) {
close($curlh);
# valgrind 3 renamed the --logfile option to --log-file!!!
- # (this happened in 2005, so we could probably don't need to care about
+ # (this happened in 2005, so we could probably do not need to care about
# that old version any longer and just delete this check)
my $ver=join(' ', runclientoutput("valgrind --version"));
# cut off all but digits and dots
# clear and create logging directory:
#
-# TODO: figure how to get around this. This dir is needed for checksystemfeatures()
+# TODO: figure how to get around this. This directory is needed for checksystemfeatures()
# Maybe create & use & delete a temporary directory in that function
cleardir($LOGDIR);
mkdir($LOGDIR, 0777);
if($TESTCASES eq "all") {
# Get all commands and find out their test numbers
- opendir(DIR, $TESTDIR) || die "can't opendir $TESTDIR: $!";
+ opendir(DIR, $TESTDIR) || die "cannot opendir $TESTDIR: $!";
my @cmds = grep { /^test([0-9]+)$/ && -f "$TESTDIR/$_" } readdir(DIR);
closedir(DIR);
my ($runnerid, $testnum)=@_;
my $logdir = getrunnerlogdir($runnerid);
opendir(DIR, "$logdir") ||
- die "can't open dir: $!";
+ die "cannot open dir: $!";
my @logs = readdir(DIR);
closedir(DIR);
- logmsg "== Contents of files in the $logdir/ dir after test $testnum\n";
+ logmsg "== Contents of files in the $logdir/ directory after test $testnum\n";
foreach my $log (sort @logs) {
if($log =~ /\.(\.|)$/) {
next; # skip "." and ".."
next; # skip valgrindNnn of other tests
}
if(($log =~ /^test$testnum$/)) {
- next; # skip test$testnum since it can be very big
+ next; # skip test$testnum since it can be big
}
logmsg "=== Start of file $log\n";
displaylogcontent("$logdir/$log");
}
}
- # See if we've completed all the tests
+ # See if we have completed all the tests
if(!scalar(%runnersrunning)) {
# No runners are running; we must be done
scalar(@runtests) && die 'Internal error: still have tests to run';
}
# See if a test runner needs attention
- # If we could be running more tests, don't wait so we can schedule a new
+ # If we could be running more tests, do not wait so we can schedule a new
# one immediately. If all runners are busy, wait a fraction of a second
# for one to finish so we can still loop around to check the abort flag.
my $runnerwait = scalar(@runnersidle) && scalar(@runtests) ? 0.1 : 1.0;
next;
}
- $total++; # number of tests we've run
+ $total++; # number of tests we have run
$executed++;
if($error>0) {
$endwaitcnt += $runnerwait;
if($endwaitcnt >= 10) {
# Once all tests have been scheduled on a runner at the end of a test
- # run, we just wait for their results to come in. If we're still
+ # run, we just wait for their results to come in. If we are still
# waiting after a couple of minutes ($endwaitcnt multiplied by
- # $runnerwait, plus $jobs because that number won't time out), display
+ # $runnerwait, plus $jobs because that number will not time out), display
# the same test runner status as we give with a SIGUSR1. This will
# likely point to a single test that has hung.
logmsg "Hmmm, the tests are taking a while to finish. Here is the status:\n";
}
# Kill the runners
-# There is a race condition here since we don't know exactly when the runners
-# have each finished shutting themselves down, but we're about to exit so it
-# doesn't make much difference.
+# There is a race condition here since we do not know exactly when the runners
+# have each finished shutting themselves down, but we are about to exit so it
+# does not make much difference.
foreach my $runnerid (values %runnerids) {
runnerac_shutdown($runnerid);
sleep 0; # give runner a context switch so it can shut itself down
$ver_minor = $2;
}
elsif($verstr =~ /^sslVersion.*fips *= *yes/) {
- # the fips option causes an error if stunnel doesn't support it
+ # the fips option causes an error if stunnel does not support it
$fips_support = 1;
last
}
print $stunconf "verifyChain = yes\n";
}
if($fips_support) {
- # disable fips in case OpenSSL doesn't support it
+ # disable fips in case OpenSSL does not support it
print $stunconf "fips = no\n";
}
if(!$tstunnel_windows) {
# Put an "exec" in front of the command so that the child process
# keeps this child's process ID by being tied to the spawned shell.
- exec("exec $cmd") || die "Can't exec() $cmd: $!";
+ exec("exec $cmd") || die "Cannot exec() $cmd: $!";
# exec() will create a new process, but ties the existence of the
# new process to the parent waiting perl.exe and sh.exe processes.
bool bind_only);
/* global variables */
-static const char *srcpath = "."; /* pointing to the test dir */
+static const char *srcpath = "."; /* pointing to the test directory */
static const char *pidname = NULL;
static const char *portname = NULL; /* none by default */
static const char *serverlogfile = NULL;
}
if(*buffer != '\n') {
/*
- * We didn't find a new line so the last byte must be a
+ * We did not find a new line so the last byte must be a
* '\0' character inserted by fgets() which we should not
* count.
*/
memcpy(topic, &buffer[4], topic_len);
topic[topic_len] = 0;
- /* there's a QoS byte (two bits) after the topic */
+ /* there is a QoS byte (two bits) after the topic */
logmsg("SUBSCRIBE to '%s' [%d]", topic, packet_id);
stream = test2fopen(testno, logdir);
error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg("Couldn't open test file %ld", testno);
+ logmsg("Could not open test file %ld", testno);
goto end;
}
error = getpart(&data, &datalen, "reply", "data", stream);
FD_ZERO(&fds_write);
FD_ZERO(&fds_err);
- /* there's always a socket to wait for */
+ /* there is always a socket to wait for */
#ifdef __DJGPP__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warith-conversion"
/* Check that the system has IPv6 enabled before checking the resolver */
curl_socket_t s = socket(PF_INET6, SOCK_DGRAM, 0);
if(s == CURL_SOCKET_BAD)
- /* an IPv6 address was requested and we can't get/use one */
+ /* an IPv6 address was requested and we cannot get/use one */
rc = -1;
else {
sclose(s);
#endif
if(rc)
- printf("Resolving %s '%s' didn't work\n", ipv_inuse, host);
+ printf("Resolving %s '%s' did not work\n", ipv_inuse, host);
return !!rc;
}
long testno; /* test number found in the request */
long partno; /* part number found in the request */
bool open; /* keep connection open info, as found in the request */
- bool auth_req; /* authentication required, don't wait for body unless
- there's an Authorization header */
+ bool auth_req; /* authentication required, do not wait for body unless
+ there is an Authorization header */
bool auth; /* Authorization header present in the incoming request */
size_t cl; /* Content-Length of the incoming request */
bool digest; /* Authorization digest header found */
int error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg("Couldn't open test file %ld", req->testno);
+ logmsg("Could not open test file %ld", req->testno);
req->open = FALSE; /* closes connection */
return 1; /* done */
}
if(num < 0)
logmsg("negative pipe size ignored");
else if(num > 0)
- req->pipe = num-1; /* decrease by one since we don't count the
+ req->pipe = num-1; /* decrease by one since we do not count the
first request in this number */
}
else if(sscanf(ptr, "skip: %d", &num) == 1) {
req->open = FALSE; /* HTTP 1.0 closes connection by default */
if(!strncmp(doc, "bad", 3))
- /* if the host name starts with bad, we fake an error here */
+ /* if the hostname starts with bad, we fake an error here */
req->testno = DOCNUMBER_BADCONNECT;
else if(!strncmp(doc, "test", 4)) {
- /* if the host name starts with test, the port number used in the
+ /* if the hostname starts with test, the port number used in the
CONNECT line will be used as test number! */
char *portp = strchr(doc, ':');
if(portp && (*(portp + 1) != '\0') && ISDIGIT(*(portp + 1))) {
}
if(!end) {
- /* we don't have a complete request yet! */
+ /* we do not have a complete request yet! */
logmsg("rtspd_ProcessRequest returned without a complete request");
return 0; /* not complete yet */
}
/* **** Persistence ****
*
* If the request is an HTTP/1.0 one, we close the connection unconditionally
- * when we're done.
+ * when we are done.
*
* If the request is an HTTP/1.1 one, we MUST check for a "Connection:"
* header that might say "close". If it does, we close a connection when
return 1; /* done */
if((req->cl == 0) && !CURL_STRNICMP("Content-Length:", line, 15)) {
- /* If we don't ignore content-length, we read it and we read the whole
- request including the body before we return. If we've been told to
+ /* If we do not ignore content-length, we read it and we read the whole
+ request including the body before we return. If we have been told to
ignore the content-length, we will return as soon as all headers
have been received */
curl_off_t clen;
else {
if(req->skip)
/* we are instructed to not read the entire thing, so we make sure to
- only read what we're supposed to and NOT read the entire thing the
+ only read what we are supposed to and NOT read the entire thing the
client wants to send! */
got = sread(sock, reqbuf + req->offset, req->cl);
else
error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg("Couldn't open test file");
+ logmsg("Could not open test file");
return 0;
}
else {
error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg("Couldn't open test file");
+ logmsg("Could not open test file");
free(ptr);
return 0;
}
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
logmsg("Error opening file '%s'", responsedump);
- logmsg("couldn't create logfile '%s'", responsedump);
+ logmsg("could not create logfile '%s'", responsedump);
free(ptr);
free(cmd);
return -1;
* The only other messages that could occur here are PING and PORT,
* and both of them occur at the start of a test when nothing should be
* trying to DISC. Therefore, we should not ever get here, but if we
- * do, it's probably due to some kind of unclean shutdown situation so
+ * do, it is probably due to some kind of unclean shutdown situation so
* us shutting down is what we probably ought to be doing, anyway.
*/
return FALSE;
/* server mode */
sockfd = listenfd;
- /* there's always a socket to wait for */
+ /* there is always a socket to wait for */
#ifdef __DJGPP__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warith-conversion"
maxfd = 0; /* stdin */
}
else {
- /* there's always a socket to wait for */
+ /* there is always a socket to wait for */
#ifdef __DJGPP__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warith-conversion"
if((sockfd != CURL_SOCKET_BAD) && (FD_ISSET(sockfd, &fds_read)) ) {
ssize_t nread_socket;
if(*mode == PASSIVE_LISTEN) {
- /* there's no stream set up yet, this is an indication that there's a
+ /* there is no stream set up yet, this is an indication that there is a
client connecting. */
curl_socket_t newfd = accept(sockfd, NULL, NULL);
if(CURL_SOCKET_BAD == newfd) {
* state
* "nmethods_max [number: 3]" - the minimum numberf NMETHODS the client must
* state
- * "user [string]" - the user name that must match (if method is 2)
+ * "user [string]" - the username that must match (if method is 2)
* "password [string]" - the password that must match (if method is 2)
* "backend [IPv4]" - numerical IPv4 address of backend to connect to
* "backendport [number:0]" - TCP port of backend to connect to. 0 means use
FD_ZERO(&fds_write);
FD_ZERO(&fds_err);
- /* there's always a socket to wait for */
+ /* there is always a socket to wait for */
#ifdef __DJGPP__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warith-conversion"
long testno; /* test number found in the request */
long partno; /* part number found in the request */
bool open; /* keep connection open info, as found in the request */
- bool auth_req; /* authentication required, don't wait for body unless
- there's an Authorization header */
+ bool auth_req; /* authentication required, do not wait for body unless
+ there is an Authorization header */
bool auth; /* Authorization header present in the incoming request */
size_t cl; /* Content-Length of the incoming request */
bool digest; /* Authorization digest header found */
int prot_version; /* HTTP version * 10 */
int callcount; /* times sws_ProcessRequest() gets called */
bool skipall; /* skip all incoming data */
- bool noexpect; /* refuse Expect: (don't read the body) */
+ bool noexpect; /* refuse Expect: (do not read the body) */
bool connmon; /* monitor the state of the connection, log disconnects */
bool upgrade; /* test case allows upgrade */
bool upgrade_request; /* upgrade request found and allowed */
error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg(" Couldn't open test file %ld", req->testno);
+ logmsg(" Could not open test file %ld", req->testno);
req->open = FALSE; /* closes connection */
return 1; /* done */
}
if(http && sscanf(http, "HTTP/%d.%d",
&prot_major,
&prot_minor) == 2) {
- /* between the request keyword and HTTP/ there's a path */
+ /* between the request keyword and HTTP/ there is a path */
httppath = line + strlen(request);
npath = http - httppath;
}
if(req->testno == DOCNUMBER_NOTHING) {
- /* didn't find any in the first scan, try alternative test case
+ /* did not find any in the first scan, try alternative test case
number placements */
static char doc[MAXDOCNAMELEN];
if(sscanf(req->reqbuf, "CONNECT %" MAXDOCNAMELEN_TXT "s HTTP/%d.%d",
}
if(!end) {
- /* we don't have a complete request yet! */
+ /* we do not have a complete request yet! */
logmsg("request not complete yet");
return 0; /* not complete yet */
}
/* **** Persistence ****
*
* If the request is an HTTP/1.0 one, we close the connection unconditionally
- * when we're done.
+ * when we are done.
*
* If the request is an HTTP/1.1 one, we MUST check for a "Connection:"
* header that might say "close". If it does, we close a connection when
return 1; /* done */
if((req->cl == 0) && !CURL_STRNICMP("Content-Length:", line, 15)) {
- /* If we don't ignore content-length, we read it and we read the whole
- request including the body before we return. If we've been told to
+ /* If we do not ignore content-length, we read it and we read the whole
+ request including the body before we return. If we have been told to
ignore the content-length, we will return as soon as all headers
have been received */
curl_off_t clen;
else {
if(req->skip)
/* we are instructed to not read the entire thing, so we make sure to
- only read what we're supposed to and NOT read the entire thing the
+ only read what we are supposed to and NOT read the entire thing the
client wants to send! */
got = sread(sock, reqbuf + req->offset, req->cl);
else
if(req->connect_request) {
/* a CONNECT request, setup and talk the tunnel */
if(!is_proxy) {
- logmsg("received CONNECT but isn't running as proxy!");
+ logmsg("received CONNECT but not running as proxy!");
return 1;
}
else {
if(!req->open)
/* When instructed to close connection after server-reply we
- wait a very small amount of time before doing so. If this
+ wait a small amount of time before doing so. If this
is not done client might get an ECONNRESET before reading
a single byte of server-reply. */
curlx_wait_ms(50);
goto sws_cleanup;
}
- /* Reset the request, unless we're still in the middle of reading */
+ /* Reset the request, unless we are still in the middle of reading */
if(rc && !req->upgrade_request)
/* Note: resetting the HTTP request here can cause problems if:
* 1) req->skipall is TRUE,
* data (in service_connection()) as the first data received on
* this new HTTP request and report "** Unusual request" (skipall
* would have otherwise caused that data to be ignored). Normally,
- * that socket will be closed by the client and there won't be any
- * stale data to cause this, but stranger things have happened (see
- * issue #11678).
+ * that socket will be closed by the client and there will not be
+ * any stale data to cause this, but stranger things have happened
+ * (see issue #11678).
*/
init_httprequest(req);
} while(rc > 0);
{
newline = 0; /* init crlf flag */
prevchar = -1;
- bfs[0].counter = BF_ALLOC; /* pass out the first buffer */
+ bfs[0].counter = BF_ALLOC; /* pass out the first buffer */
current = 0;
bfs[1].counter = BF_FREE;
nextone = x; /* ahead or behind? */
current = !current; /* "incr" current */
b = &bfs[current]; /* look at new buffer */
- if(b->counter == BF_FREE) /* if it's empty */
+ if(b->counter == BF_FREE) /* if empty */
read_ahead(test, convert); /* fill it */
*dpp = &b->buf.hdr; /* set caller's ptr */
struct tftphdr *dp;
b = &bfs[nextone]; /* look at "next" buffer */
- if(b->counter != BF_FREE) /* nop if not free */
+ if(b->counter != BF_FREE) /* nop if not free */
return;
nextone = !nextone; /* "incr" next buffer ptr */
snprintf(outfile, sizeof(outfile), "%s/upload.%ld", logdir, test->testno);
test->ofile = open(outfile, O_CREAT|O_RDWR|CURL_O_BINARY, 0777);
if(test->ofile == -1) {
- logmsg("Couldn't create and/or open file %s for upload!", outfile);
+ logmsg("Could not create and/or open file %s for upload!", outfile);
return -1; /* failure! */
}
}
error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg(" Couldn't open test file %ld", req->testno);
+ logmsg(" Could not open test file %ld", req->testno);
return 1; /* done */
}
else {
int error = errno;
logmsg("fopen() failed with error (%d) %s",
error, curlx_strerror(error, errbuf, sizeof(errbuf)));
- logmsg("Couldn't open test file for test: %ld", testno);
+ logmsg("Could not open test file for test: %ld", testno);
return TFTP_EACCESS;
}
else {
}
sec = epoch_offset + tv.tv_sec;
/* !checksrc! disable BANNEDFUNC 1 */
- now = localtime(&sec); /* not thread safe but we don't care */
+ now = localtime(&sec); /* not thread safe but we do not care */
snprintf(timebuf, sizeof(timebuf), "%02d:%02d:%02d.%06ld",
(int)now->tm_hour, (int)now->tm_min, (int)now->tm_sec,
pidfile = fopen(filename, "wb");
if(!pidfile) {
char errbuf[STRERROR_LEN];
- logmsg("Couldn't write pid file: %s (%d) %s", filename,
+ logmsg("Could not write pid file: %s (%d) %s", filename,
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
return 0; /* fail */
}
FILE *portfile = fopen(filename, "wb");
if(!portfile) {
char errbuf[STRERROR_LEN];
- logmsg("Couldn't write port file: %s (%d) %s", filename,
+ logmsg("Could not write port file: %s (%d) %s", filename,
errno, curlx_strerror(errno, errbuf, sizeof(errbuf)));
return 0; /* fail */
}
exerunner
);
-our $logfile; # server log file name, for logmsg
+our $logfile; # server log filename, for logmsg
#***************************************************************************
# Just for convenience, test harness uses 'https' and 'httptls' literals as
#***************************************************************************
-# Return server name string formatted for file name purposes
+# Return server name string formatted for filename purposes
#
sub servername_canon {
my ($proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for server pid file.
+# Return filename for server pid file.
#
sub server_pidfilename {
my ($piddir, $proto, $ipver, $idnum) = @_;
}
#***************************************************************************
-# Return file name for server port file.
+# Return filename for server port file.
#
sub server_portfilename {
my ($piddir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for server log file.
+# Return filename for server log file.
#
sub server_logfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for server commands file.
+# Return filename for server commands file.
#
sub server_cmdfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for server input file.
+# Return filename for server input file.
#
sub server_inputfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for server output file.
+# Return filename for server output file.
#
sub server_outputfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for main or primary sockfilter pid file.
+# Return filename for main or primary sockfilter pid file.
#
sub mainsockf_pidfilename {
my ($piddir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for main or primary sockfilter log file.
+# Return filename for main or primary sockfilter log file.
#
sub mainsockf_logfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for data or secondary sockfilter pid file.
+# Return filename for data or secondary sockfilter pid file.
#
sub datasockf_pidfilename {
my ($piddir, $proto, $ipver, $idnum) = @_;
#***************************************************************************
-# Return file name for data or secondary sockfilter log file.
+# Return filename for data or secondary sockfilter log file.
#
sub datasockf_logfilename {
my ($logdir, $proto, $ipver, $idnum) = @_;
);
-my %serverpidfile; # all server pid file names, identified by server id
-my %serverportfile;# all server port file names, identified by server id
+my %serverpidfile; # all server pid filenames, identified by server id
+my %serverportfile;# all server port filenames, identified by server id
my $sshdvernum; # for socks server, ssh daemon version number
my $sshdverstr; # for socks server, ssh daemon version string
my $sshderror; # for socks server, ssh daemon version error
-my %doesntrun; # servers that don't work, identified by pidfile
+my %doesntrun; # servers that do not work, identified by pidfile
my %PORT = (nolisten => 47); # port we use for a local non-listening service
my $server_response_maxtime=13;
my $httptlssrv = find_httptlssrv();
Type => SOCK_STREAM,
Reuse => 1,
Listen => 10 )
- or die "Couldn't create tcp server socket: $@\n";
+ or die "Could not create tcp server socket: $@\n";
return $server->sockport();
}
}
#######################################################################
-# Load serverpidfile and serverportfile hashes with file names for all
+# Load serverpidfile and serverportfile hashes with filenames for all
# possible servers.
#
sub init_serverpidfile_hash {
# Put an "exec" in front of the command so that the child process
# keeps this child's process ID.
- exec("exec $cmd") || die "Can't exec() $cmd: $!";
+ exec("exec $cmd") || die "Cannot exec() $cmd: $!";
# exec() should never return back here to this process. We protect
# ourselves by calling die() just in case something goes really bad.
die "error: exec() has returned";
}
- # Ugly hack but ssh client and gnutls-serv don't support pid files
+ # Ugly hack but ssh client and gnutls-serv do not support pid files
if($fakepidfile) {
if(open(my $out, ">", "$pidfile")) {
print $out $child . "\n";
if(checkdied($child)) {
logmsg "startnew: child process has died, server might start up\n"
if($verbose);
- # We can't just abort waiting for the server with a
+ # We cannot just abort waiting for the server with a
# return (-1,-1);
# because the server might have forked and could still start
# up normally. Instead, just reduce the amount of time we remain
#######################################################################
-# Stop a test server along with pids which aren't in the %run hash yet.
+# Stop a test server along with pids which are not in the %run hash yet.
# This also stops all servers which are relative to the given one.
#
sub stopserver {
#######################################################################
# Verify that the server that runs on $ip, $port is our server. This also
# implies that we can speak with it, as there might be occasions when the
-# server runs fine but we cannot talk to it ("Failed to connect to ::1: Can't
+# server runs fine but we cannot talk to it ("Failed to connect to ::1: Cannot
# assign requested address")
#
sub verifyhttp {
$pid = 0+$1;
}
elsif($res == 6) {
- # curl: (6) Couldn't resolve host '::1'
+ # curl: (6) Could not resolve hostname '::1'
logmsg "RUN: failed to resolve host ($proto://$ip:$port/verifiedserver)\n";
return -1;
}
#######################################################################
# Verify that the server that runs on $ip, $port is our server. This also
# implies that we can speak with it, as there might be occasions when the
-# server runs fine but we cannot talk to it ("Failed to connect to ::1: Can't
+# server runs fine but we cannot talk to it ("Failed to connect to ::1: Cannot
# assign requested address")
#
sub verifyftp {
#######################################################################
# Verify that the server that runs on $ip, $port is our server. This also
# implies that we can speak with it, as there might be occasions when the
-# server runs fine but we cannot talk to it ("Failed to connect to ::1: Can't
+# server runs fine but we cannot talk to it ("Failed to connect to ::1: Cannot
# assign requested address")
#
sub verifyrtsp {
$pid = 0+$1;
}
elsif($res == 6) {
- # curl: (6) Couldn't resolve host '::1'
+ # curl: (6) Could not resolve hostname '::1'
logmsg "RUN: failed to resolve host ($proto://$ip:$port/verifiedserver)\n";
return -1;
}
my ($proto, $ipvnum, $idnum, $ip, $port) = @_;
my $server = servername_id($proto, $ipvnum, $idnum);
my $verified = 0;
- # Find out sftp client canonical file name
+ # Find out sftp client canonical filename
my $sftp = find_sftp();
if(!$sftp) {
logmsg "RUN: SFTP server cannot find $sftpexe\n";
return -1;
}
- # Find out ssh client canonical file name
+ # Find out ssh client canonical filename
my $ssh = find_ssh();
if(!$ssh) {
logmsg "RUN: SFTP server cannot find $sshexe\n";
# Verify that the non-stunnel HTTP TLS extensions capable server that runs
# on $ip, $port is our server. This also implies that we can speak with it,
# as there might be occasions when the server runs fine but we cannot talk
-# to it ("Failed to connect to ::1: Can't assign requested address")
+# to it ("Failed to connect to ::1: Cannot assign requested address")
#
sub verifyhttptls {
my ($proto, $ipvnum, $idnum, $ip, $port) = @_;
return $pid;
}
elsif($res == 6) {
- # curl: (6) Couldn't resolve host '::1'
+ # curl: (6) Could not resolve hostname '::1'
logmsg "RUN: failed to resolve host (https://$ip:$port/verifiedserver)\n";
return -1;
}
#######################################################################
# Verify that the server that runs on $ip, $port is our server. This also
# implies that we can speak with it, as there might be occasions when the
-# server runs fine but we cannot talk to it ("Failed to connect to ::1: Can't
+# server runs fine but we cannot talk to it ("Failed to connect to ::1: Cannot
# assign requested address")
#
sub verifysmb {
#######################################################################
# Verify that the server that runs on $ip, $port is our server. This also
# implies that we can speak with it, as there might be occasions when the
-# server runs fine but we cannot talk to it ("Failed to connect to ::1: Can't
+# server runs fine but we cannot talk to it ("Failed to connect to ::1: Cannot
# assign requested address")
#
sub verifytelnet {
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
if($httpspid <= 0 || !pidexists($httpspid)) {
# it is NOT alive
- # don't call stopserver since that will also kill the dependent
+ # do not call stopserver since that will also kill the dependent
# server that has already been started properly
$doesntrun{$pidfile} = 1;
$httpspid = $pid2 = 0;
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
my $portfile = $serverportfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
if($protospid <= 0 || !pidexists($protospid)) {
# it is NOT alive
- # don't call stopserver since that will also kill the dependent
+ # do not call stopserver since that will also kill the dependent
# server that has already been started properly
$doesntrun{$pidfile} = 1;
$protospid = $pid2 = 0;
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
my $portfile = $serverportfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $idnum = ($id && ($id =~ /^(\d+)$/) && ($id > 1)) ? $id : 1;
if(!$USER) {
- logmsg "Can't start ssh server due to lack of USER name\n";
+ logmsg "Cannot start ssh server due to lack of username\n";
return (4, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
# once it is known that the ssh server is alive, sftp server
# verification is performed actually connecting to it, authenticating
- # and performing a very simple remote command. This verification is
- # tried only one time.
+ # and performing a simple remote command. This verification is tried
+ # only one time.
$sshdlog = server_logfilename($LOGDIR, 'ssh', $ipvnum, $idnum);
$sftplog = server_logfilename($LOGDIR, 'sftp', $ipvnum, $idnum);
logmsg "RUN: failed to verify the $srvrname server on $port\n";
return (5, 0, 0, 0);
}
- # we're happy, no need to loop anymore!
+ # we are happy, no need to loop anymore!
$doesntrun{$pidfile} = 0;
my $hostfile;
my $pidfile = $serverpidfile{$server};
my $portfile = $serverportfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
my $pidfile = $serverpidfile{$server};
- # don't retry if the server doesn't work
+ # do not retry if the server does not work
if($doesntrun{$pidfile}) {
return (2, 0, 0, 0);
}
elsif($what =~ /^(ftp|imap|pop3|smtp)s$/) {
my $cproto = $1;
if(!$stunnel) {
- # we can't run ftps tests without stunnel
+ # we cannot run ftps tests without stunnel
return ("no stunnel", 4);
}
if($runcert{$what} && ($runcert{$what} ne $certfile)) {
}
elsif($what eq "https" || $what eq "https-mtls") {
if(!$stunnel) {
- # we can't run https tests without stunnel
+ # we cannot run https tests without stunnel
return ("no stunnel", 4);
}
if($runcert{$what} && ($runcert{$what} ne $certfile)) {
}
}
elsif($what eq "http/2") {
- # http/2 server proxies to a http server
+ # http/2 server proxies to an HTTP server
if($run{'http/2'} &&
!responsive_http_server("https", $verbose, 0, protoport('http2tls'))) {
logmsg "* restarting unresponsive HTTP/2 server\n";
}
}
elsif($what eq "http/3") {
- # http/3 server proxies to a http server
+ # http/3 server proxies to an HTTP server
if($run{'http/3'} &&
!responsive_http_server("https", $verbose, 0, protoport('http3'), 1)) {
logmsg "* restarting unresponsive HTTP/3 server\n";
}
elsif($what eq "gophers") {
if(!$stunnel) {
- # we can't run TLS tests without stunnel
+ # we cannot run TLS tests without stunnel
return ("no stunnel", 4);
}
if($runcert{'gophers'} && ($runcert{'gophers'} ne $certfile)) {
}
elsif($what eq "https-proxy") {
if(!$stunnel) {
- # we can't run https-proxy tests without stunnel
+ # we cannot run https-proxy tests without stunnel
return ("no stunnel", 4);
}
if($runcert{'https-proxy'} &&
}
elsif($what eq "httptls") {
if(!$httptlssrv) {
- # for now, we can't run http TLS-EXT tests without gnutls-serv
+ # for now, we cannot run http TLS-EXT tests without gnutls-serv
return ("no gnutls-serv (with SRP support)", 4);
}
if($run{'httptls'} &&
}
elsif($what eq "httptls-ipv6") {
if(!$httptlssrv) {
- # for now, we can't run http TLS-EXT tests without gnutls-serv
+ # for now, we cannot run http TLS-EXT tests without gnutls-serv
return ("no gnutls-serv", 4);
}
if($run{'httptls-ipv6'} &&
}
}
else {
- warn "we don't support a server for $what";
+ warn "we do not support a server for $what";
return ("no server for $what", 4);
}
}
# The purpose of FTPTIME2 is to provide times that can be
# used for time-out tests and that would work on most hosts as these
# adjust for the startup/check time for this particular host. We needed to
- # do this to make the test suite run better on very slow hosts.
+ # do this to make the test suite run better on slow hosts.
my $ftp2 = $ftpchecktime * 8;
$$thing =~ s/${prefix}FTPTIME2/$ftp2/g;
self.ctd = TestData(test_data_directory)
# Override smbComNtCreateAndX so we can pretend to have files which
- # don't exist.
+ # do not exist.
self.hookSmbCommand(imp_smb.SMB.SMB_COM_NT_CREATE_ANDX,
self.create_and_x)
log.debug("[SMB] Get server path '%s'", requested_filename)
if requested_filename not in [VERIFIED_REQ]:
- raise SmbError(STATUS_NO_SUCH_FILE, "Couldn't find the file")
+ raise SmbError(STATUS_NO_SUCH_FILE, "Could not find the file")
fid, filename = tempfile.mkstemp()
log.debug("[SMB] Created %s (%d) for storing '%s'",
parser.add_argument("--verbose", action="store", type=int, default=0,
help="verbose output")
parser.add_argument("--pidfile", action="store",
- help="file name for the PID")
+ help="filename for the PID")
parser.add_argument("--logfile", action="store",
- help="file name for the log")
+ help="filename for the log")
parser.add_argument("--srcdir", action="store", help="test directory")
parser.add_argument("--id", action="store", help="server ID")
parser.add_argument("--ipv4", action="store_true", default=0,
my @cfgarr;
#***************************************************************************
-# Returns a path of the given file name in the log directory (PiddirPath)
+# Returns a path of the given filename in the log directory (PiddirPath)
#
sub pp {
my $file = $_[0];
#
#***************************************************************************
-# Default ssh daemon pid file name & directory
+# Default ssh daemon pid filename & directory
#
if($pidfile) {
# Use our pidfile directory to store server config files
}
#***************************************************************************
-# ssh and sftp server log file names
+# ssh and sftp server log filenames
#
$sshdlog = server_logfilename($logdir, 'ssh', $ipvnum, $idnum);
$sftplog = server_logfilename($logdir, 'sftp', $ipvnum, $idnum);
# Validate username
#
if(!$username) {
- $error = 'Will not run ssh server without a user name';
+ $error = 'Will not run ssh server without a username';
}
elsif($username eq 'root') {
$error = 'Will not run ssh server as root to mitigate security risks';
#***************************************************************************
-# Find out ssh daemon canonical file name
+# Find out ssh daemon canonical filename
#
my $sshd = find_sshd();
if(!$sshd) {
#***************************************************************************
-# Find out sftp server plugin canonical file name
+# Find out sftp server plugin canonical filename
#
my $sftpsrv = find_sftpsrv();
if(!$sftpsrv) {
#***************************************************************************
-# Find out sftp client canonical file name
+# Find out sftp client canonical filename
#
my $sftp = find_sftp();
if(!$sftp) {
#***************************************************************************
-# Find out ssh keygen canonical file name
+# Find out ssh keygen canonical filename
#
my $sshkeygen = find_sshkeygen();
if(!$sshkeygen) {
#***************************************************************************
-# Find out ssh client canonical file name
+# Find out ssh client canonical filename
#
my $ssh = find_ssh();
if(!$ssh) {
(! -e pp($hstpubsha256f)) || (! -s pp($hstpubsha256f)) ||
(! -e pp($cliprvkeyf)) || (! -s pp($cliprvkeyf)) ||
(! -e pp($clipubkeyf)) || (! -s pp($clipubkeyf))) {
- # Make sure all files are gone so ssh-keygen doesn't complain
+ # Make sure all files are gone so ssh-keygen does not complain
unlink(pp($hstprvkeyf), pp($hstpubkeyf), pp($hstpubmd5f),
pp($hstpubsha256f), pp($cliprvkeyf), pp($clipubkeyf));
exit 1;
}
display_file_top(pp($cliprvkeyf)) if($verbose);
- # Make sure that permissions are restricted so openssh doesn't complain
+ # Make sure that permissions are restricted so openssh does not complain
chmod 0600, pp($hstprvkeyf);
chmod 0600, pp($cliprvkeyf);
if(($^O eq 'cygwin' || $^O eq 'msys') && -e "/bin/setfacl") {
# Put an "exec" in front of the command so that the child process
# keeps this child's process ID by being tied to the spawned shell.
- exec("exec $cmd") || die "Can't exec() $cmd: $!";
+ exec("exec $cmd") || die "Cannot exec() $cmd: $!";
# exec() will create a new process, but ties the existence of the
# new process to the parent waiting perl.exe and sh.exe processes.
$Cpreprocessor = 'cpp';
}
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
# need an include directory when building out-of-tree
sub scanallheaders {
my $d = "$root/include/curl";
opendir(my $dh, $d) ||
- die "Can't opendir: $!";
+ die "Cannot opendir: $!";
my @headers = grep { /.h\z/ } readdir($dh);
closedir $dh;
foreach my $h (@headers) {
sub scanman_md_dir {
my ($d) = @_;
opendir(my $dh, $d) ||
- die "Can't opendir: $!";
+ die "Cannot opendir: $!";
my @mans = grep { /.md\z/ } readdir($dh);
closedir $dh;
for my $m (@mans) {
# now scan through all symbols that were present in the symbols-in-versions
# but not in the headers
#
-# If the symbols were marked 'removed' in symbols-in-versions we don't output
+# If the symbols were marked 'removed' in symbols-in-versions we do not output
# anything about it since that is perfectly fine.
#
}
else {
if(!$memdebug) {
- print STDERR "$file doesn't include \"memdebug.h\"!\n";
+ print STDERR "$file does not include \"memdebug.h\"!\n";
}
if(!$curlmem) {
- print STDERR "$file doesn't include \"curl_memory.h\"!\n";
+ print STDERR "$file does not include \"curl_memory.h\"!\n";
}
return 1;
}
return 0;
}
-opendir(my $dh, $dir) || die "can't opendir $dir: $!";
+opendir(my $dh, $dir) || die "cannot opendir $dir: $!";
my @cfiles = grep { /\.c\z/ && -f "$dir/$_" } readdir($dh);
closedir $dh;
my $sort = 0;
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root = shift @ARGV;
while(defined $root) {
# src/tool_getparam.c lists all options curl can parse
# docs/curl.1 documents all command line options
# src/tool_listhelp.c outputs all options with curl -h
-# - make sure they're all in sync
+# - make sure they are all in sync
#
# Output all deviances to stderr.
use strict;
use warnings;
-# we may get the dir roots pointed out
+# we may get the directory roots pointed out
my $root=$ARGV[0] || ".";
my $buildroot=$ARGV[1] || ".";
my $syms = "$root/docs/libcurl/symbols-in-versions";
}
elsif($rem) {
# $opt was removed in $rem
- # so don't check for that
+ # so do not check for that
}
else {
if($type eq "OPT") {
# the DISABLE options that are documented
my %docs;
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
my $DOCS="CURL-DISABLE.md";
}
sub scan_configure {
- opendir(my $m, "$root/m4") || die "Can't opendir $root/m4: $!";
+ opendir(my $m, "$root/m4") || die "Cannot opendir $root/m4: $!";
my @m4 = grep { /\.m4$/ } readdir($m);
closedir $m;
scanconf("$root/configure.ac");
sub scan_dir {
my ($dir)=@_;
- opendir(my $dh, $dir) || die "Can't opendir $dir: $!";
+ opendir(my $dh, $dir) || die "Cannot opendir $dir: $!";
my @cfiles = grep { /\.[ch]\z/ && -f "$dir/$_" } readdir($dh);
closedir $dh;
for my $f (sort @cfiles) {
shift;
}
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
# need an include directory when building out-of-tree
while(<H_IN>) {
my ($line, $linenum) = ($_, $.);
if(/^#(line|) (\d+) \"(.*)\"/) {
- # if the included file isn't in our incdir, then we skip this section
+ # if the included file is not in our incdir, then we skip this section
# until next #line
#
if($3 !~ /^$incdir/) {
}
-opendir(my $dh, $incdir) || die "Can't opendir $incdir: $!";
+opendir(my $dh, $incdir) || die "Cannot opendir $incdir: $!";
my @hfiles = grep { /\.h$/ } readdir($dh);
closedir $dh;
use warnings;
use File::Basename;
-# get the file name first
+# get the filename first
my $symbolsinversions=shift @ARGV;
-# we may get the dir roots pointed out
+# we may get the directory roots pointed out
my @manpages=@ARGV;
my $errors = 0;
}
}
-# option-looking words that aren't options
+# option-looking words that are not options
my %allownonref = (
'CURLINFO_TEXT' => 1,
'CURLINFO_HEADER_IN' => 1,
open(my $m, "<", "$file") ||
die "test1173.pl could not open $file";
if($file =~ /[\/\\](CURL|curl_)([^\/\\]*).3/) {
- # This is a man page for libcurl. It requires an example unless it's
+ # This is a man page for libcurl. It requires an example unless it is
# considered deprecated.
$reqex = 1 unless defined $deprecated{'CURL'.$2};
if($1 eq "CURL") {
allsymbols();
if(!$symbol{'CURLALTSVC_H1'}) {
- print STDERR "didn't get the symbols-in-version!\n";
+ print STDERR "did not get the symbols-in-version!\n";
exit;
}
use strict;
use warnings;
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root = $ARGV[0] || ".";
my %error; # from the include file
exit 0;
}
-# Get header file names,
-opendir(my $dh, $incdir) || die "Can't opendir $incdir";
+# Get header filenames,
+opendir(my $dh, $incdir) || die "Cannot opendir $incdir";
my @hfiles = grep { /\.h$/ } readdir($dh);
closedir $dh;
use strict;
use warnings;
-# we may get the dir roots pointed out
+# we may get the directory roots pointed out
my $root=$ARGV[0] || ".";
my $buildroot=$ARGV[1] || ".";
my $manpge = "$buildroot/docs/libcurl/libcurl-errors.3";
}
-opendir(my $dh, $curlh) || die "Can't opendir $curlh: $!";
+opendir(my $dh, $curlh) || die "Cannot opendir $curlh: $!";
my @hfiles = grep { /\.h$/ } readdir($dh);
closedir $dh;
use strict;
use warnings;
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
my %insrc; # variable set in source
$Cpreprocessor = 'cpp';
}
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
# need an include directory when building out-of-tree
sub scanman_md_dir {
my ($d) = @_;
opendir(my $dh, $d) ||
- die "Can't opendir: $!";
+ die "Cannot opendir: $!";
my @mans = grep { /.md\z/ } readdir($dh);
closedir $dh;
for my $m (@mans) {
use strict;
use warnings;
-# we may get the dir root pointed out
+# we may get the directory root pointed out
my $root=$ARGV[0] || ".";
my %typecheck; # from the include file
###########################################################################
#
# - Get all options mentioned in the $cmddir.
-# - Make sure they're all mentioned in the $opts document
+# - Make sure they are all mentioned in the $opts document
# - Make sure that the version in $opts matches the version in the file in
# $cmddir
#
sub cmdfiles {
my ($dir)=@_;
- opendir(my $dh, $dir) || die "Can't opendir $dir: $!";
+ opendir(my $dh, $dir) || die "Cannot opendir $dir: $!";
my @opts = grep { /[a-z0-9].*\.md$/ && -f "$dir/$_" } readdir($dh);
closedir $dh;
# at a regular interval. The output is suitable to be mailed to
# curl-autocompile@haxx.se to be dealt with automatically (make sure the
# subject includes the word "autobuild" as the mail gets silently discarded
-# otherwise). The most current build status (with a reasonable backlog) will
-# be published on the curl site, at https://curl.se/auto/
+# otherwise). The most current build status (with a reasonable backlog) is
+# published on the curl site, at https://curl.se/auto/
# USAGE:
# testcurl.pl [options] [curl-daily-name] > output
# --mktarball=[command] Command to run after completed test
# --name=[name] Set name to report as
# --notes=[notes] More human-readable information about this configuration
-# --nocvsup Don't pull from git even though it is a git tree
-# --nogitpull Don't pull from git even though it is a git tree
-# --nobuildconf Don't run autoreconf -fi
-# --noconfigure Don't run configure
+# --nocvsup Do not pull from git even though it is a git tree
+# --nogitpull Do not pull from git even though it is a git tree
+# --nobuildconf Do not run autoreconf -fi
+# --noconfigure Do not run configure
# --runtestopts=[options] Options to pass to runtests.pl
-# --setup=[file name] File name to read setup from (deprecated)
+# --setup=[filename] Filename to read setup from (deprecated)
# --target=[your os] Specify your target environment.
#
# if [curl-daily-name] is omitted, a 'curl' git directory is assumed.
use Cwd;
use File::Spec;
-# Turn on warnings (equivalent to -w, which can't be used with /usr/bin/env)
+# Turn on warnings (equivalent to -w, which cannot be used with /usr/bin/env)
#BEGIN { $^W = 1; }
use vars qw($version $fixed $infixed $CURLDIR $git $pwd $build $buildlog
$version='2024-11-28';
$fixed=0;
-# Determine if we're running from git or a canned copy of curl,
+# Determine if we are running from git or a canned copy of curl,
# or if we got a specific target option or setup file option.
$CURLDIR="curl";
if(-f ".git/config") {
$str1066os = undef;
-# Make $pwd to become the path without newline. We'll use that in order to cut
+# Make $pwd to become the path without newline. We use that in order to cut
# off that path from all possible logs and error messages etc.
$pwd = getcwd();
# remove the generated sources to force them to be re-generated each
# time we run this test
unlink "$CURLDIR/src/tool_hugehelp.c";
- # find out if curl source dir has an in-tree c-ares repo
+ # find out if curl source directory has an in-tree c-ares repo
$have_embedded_ares = 1 if(-f "$CURLDIR/ares/GIT-INFO");
} elsif(!$git && -f "$CURLDIR/tests/testcurl.pl") {
logit "$CURLDIR is verified to be a fine daily source dir";
- # find out if curl source dir has an in-tree c-ares extracted tarball
+ # find out if curl source directory has an in-tree c-ares extracted tarball
$have_embedded_ares = 1 if(-f "$CURLDIR/ares/ares_build.h");
} else {
- mydie "$CURLDIR is not a daily source dir or checked out from git!"
+ mydie "$CURLDIR is not a daily source directory or checked out from git!"
}
}
# this is to remove old build logs that ended up in the wrong dir
foreach(glob("$CURLDIR/buildlog-*")) { unlink $_; }
-# create a dir to build in
+# create a directory to build in
mkdir $build, 0777;
if(-d $build) {
- logit "build dir $build was created fine";
+ logit "build directory $build was created fine";
} else {
- mydie "failed to create dir $build";
+ mydie "failed to create directory $build";
}
# get in the curl source tree root
}
}
-# Set timestamp to the one in curlver.h if this isn't a git test build.
+# Set timestamp to the one in curlver.h if this is not a git test build.
if((-f "include/curl/curlver.h") &&
(open(my $f, "<", "include/curl/curlver.h"))) {
while(<$f>) {
my $make = findinpath("gmake", "make", "nmake");
if(!$make) {
- mydie "Couldn't find make in the PATH";
+ mydie "Could not find make in the PATH";
}
# force to 'nmake' for VC builds
$make = "nmake" if($targetos =~ /vc/);
if(-f "lib/Makefile") {
logit "configure seems to have finished fine";
} else {
- mydie "configure didn't work";
+ mydie "configure did not work";
}
} else {
- logit "copying files to build dir ...";
+ logit "copying files to build directory ...";
if($^O eq 'MSWin32') {
system("xcopy /s /q \"$CURLDIR\" .");
}
close($log);
chdir "$pwd/$build";
}
- logit_spaced "cross-compiling, can't run tests";
+ logit_spaced "cross-compiling, cannot run tests";
}
# dummy message to feign success
print "TESTDONE: 1 tests out of 0 (dummy message)\n";
}
else {
if(($$thing =~ /^\n\z/) && $prevupdate) {
- # if there's a blank link after a line we update, we hope it is
+ # if there is a blank link after a line we update, we hope it is
# the empty line following headers
$$thing =~ s/\x0a/\x0d\x0a/;
}
~~~c
#include "unitcheck.h"
- #include "a libcurl header.h" /* from the lib dir */
+ #include "a libcurl header.h" /* from the lib directory */
static CURLcode test_unit9998(const char *arg)
{
~~~c
#include "unitcheck.h"
- #include "a libcurl header.h" /* from the lib dir */
+ #include "a libcurl header.h" /* from the lib directory */
static CURLcode t9999_setup(void)
{
fail_unless(Curl_node_elem(Curl_node_next(Curl_llist_head(&llist))) ==
&unusedData_case2,
"the node next to head is not getting set correctly");
- /* better safe than sorry, check that the tail isn't corrupted */
+ /* better safe than sorry, check that the tail is not corrupted */
fail_unless(Curl_node_elem(Curl_llist_tail(&llist)) != &unusedData_case2,
"the list tail is not getting set correctly");
}
/* BASE is just a define to make us fool around with decently large number so
- that we aren't zero-based */
+ that we are not zero-based */
#define BASE 1000000
/* macro to set the pretended current time */
{ "[[:foo:]]", "bar", NOMATCH|MAC_FAIL},
{ "[[:foo:]]", "f]", MATCH|LINUX_NOMATCH|MAC_FAIL},
- { "Curl[[:blank:]];-)", "Curl ;-)", MATCH },
+ { "curl[[:blank:]];-)", "curl ;-)", MATCH },
{ "*[[:blank:]]*", " ", MATCH },
{ "*[[:blank:]]*", "", NOMATCH },
{ "*[[:blank:]]*", "hi, im_Pavel", MATCH },
/* common using */
- { "filename.dat", "filename.dat", MATCH },
+ { "Filename.dat", "Filename.dat", MATCH },
{ "*curl*", "lets use curl!!", MATCH },
{ "filename.txt", "filename.dat", NOMATCH },
{ "*.txt", "text.txt", MATCH },
char *esc;
esc = curl_easy_escape(easy, "", -1);
- fail_unless(esc == NULL, "negative string length can't work");
+ fail_unless(esc == NULL, "negative string length cannot work");
esc = curl_easy_unescape(easy, "%41%41%41%41", -1, &len);
- fail_unless(esc == NULL, "negative string length can't work");
+ fail_unless(esc == NULL, "negative string length cannot work");
UNITTEST_END(t1605_stop(easy))
}
/* CURLOPT_RESOLVE address parsing tests */
static const struct testcase tests[] = {
- /* spaces aren't allowed, for now */
+ /* spaces are not allowed, for now */
{ "test.com:80:127.0.0.1, 127.0.0.2",
"test.com", 80, TRUE, { NULL, }
},
};
static const struct testcase tests[] = {
- /* spaces aren't allowed, for now */
+ /* spaces are not allowed, for now */
{ "test.com:80:127.0.0.1",
"test.com", 80, { "127.0.0.1", }
},
/*
* This debugf callback is simply dumping the string into the static buffer
- * for the unit test to inspect. Since we know that we're only dealing with
+ * for the unit test to inspect. Since we know that we are only dealing with
* text we can afford the luxury of skipping the type check here.
*/
static int debugf_cb(CURL *handle, curl_infotype type, char *buf, size_t size,
free_and_clear(ipv6port);
curl_url_cleanup(u);
- /* Incorrect zone index syntax, but the port extractor doesn't care */
+ /* Incorrect zone index syntax, but the port extractor does not care */
u = curl_url();
if(!u)
goto fail;
ALPN_h2, "6.example.net", 80);
fail_if(res, "Curl_altsvc_parse(9) failed!");
- /* missing port in host name */
+ /* missing port in hostname */
res = Curl_altsvc_parse(curl, asi,
"h2=\"example.net\"; ma=\"180\";\r\n",
ALPN_h2, "7.example.net", 80);
fail_if(res, "Curl_altsvc_parse(10) failed!");
- /* illegal port in host name */
+ /* illegal port in hostname */
res = Curl_altsvc_parse(curl, asi,
"h2=\"example.net:70000\"; ma=\"180\";\r\n",
ALPN_h2, "8.example.net", 80);
***************************************************************************/
#include "unitcheck.h"
-#include "doh.h" /* from the lib dir */
+#include "doh.h"
static CURLcode test_unit1655(const char *arg)
{
***************************************************************************/
#include "unitcheck.h"
-#include "doh.h" /* from the lib dir */
+#include "doh.h"
/* DoH + HTTPSRR are required */
#if !defined(CURL_DISABLE_DOH) && defined(USE_HTTPSRR)
self.data_folder = data_folder
def get_test_data(self, test_number):
- # Create the test file name
+ # Create the test filename
filename = os.path.join(self.data_folder,
"test{0}".format(test_number))
m = REPLY_DATA.search(contents)
if not m:
- raise Exception("Couldn't find a <reply><data> section")
+ raise Exception("Could not find a <reply><data> section")
- # Left-strip the data so we don't get a newline before our data.
+ # Left-strip the data so we do not get a newline before our data.
return m.group(1).lstrip()